From d9cddcb8901b19e6f29a205bb335b2365a769092 Mon Sep 17 00:00:00 2001 From: Dan Ryan Date: Fri, 20 Dec 2019 20:54:55 -0500 Subject: [PATCH 01/49] Updated and re-patched vendored dependencies - **click-completion**: ``0.5.1 => 0.5.2`` - **python-dotenv**: ``0.10.2 => 0.10.3`` - **jinja2**: ``2.10. => 2.10.3`` - **parse**: ``1.12.0 => 1.13.0`` - **pathlib2**: ``2.3.3 => 2.3.5`` - **pipreqs**: ``0.4.9 => 0.4.10`` - **urllib3**: ``1.25.2 => 1.25.7`` - **certifi**: ``2019.3.9 => 2019.11.28`` - **requirementslib**: ``1.5.1 => 1.5.4`` - **attrs**: ``19.1.0 => 19.3.0`` - **distlib**: ``0.2.9 => 0.3.0`` - **packaging**: ``19.0 => 19.2`` - **pyparsing**: ``2.3.1 => 2.4.5`` - **tomlkit**: ``0.5.3 => 0.5.8`` - **six**: ``1.12.0 => 1.13.0`` - **semver**: ``2.8.1 => 2.9.0`` - **vistir**: ``0.4.2 => 0.4.3`` - **pip-shims**: ``0.3.2 => 0.4.0`` - **yaspin**: ``0.14.3 => 0.15.0`` - **cerberus**: ``1.3.1 => 1.3.2`` - **pep517**: ``0.5.0 => 0.8.1`` - **orderedmultidict**: ``1.0 => 1.0.1`` - **pip**: ``19.0.3 => 19.3.1`` - **pip-tools**: ``3.5.0 => 4.3.0`` Signed-off-by: Dan Ryan --- pipenv/core.py | 4 +- pipenv/patched/notpip/LICENSE.txt | 2 +- pipenv/patched/notpip/__init__.py | 2 +- pipenv/patched/notpip/__main__.py | 2 +- pipenv/patched/notpip/_internal/__init__.py | 78 +- pipenv/patched/notpip/_internal/build_env.py | 27 +- pipenv/patched/notpip/_internal/cache.py | 61 +- .../notpip/_internal/cli/autocompletion.py | 9 +- .../notpip/_internal/cli/base_command.py | 246 +- .../notpip/_internal/cli/cmdoptions.py | 154 +- .../notpip/_internal/cli/command_context.py | 29 + .../notpip/_internal/cli/main_parser.py | 25 +- pipenv/patched/notpip/_internal/cli/parser.py | 4 + .../notpip/_internal/cli/req_command.py | 304 ++ pipenv/patched/notpip/_internal/collector.py | 548 ++++ .../notpip/_internal/commands/__init__.py | 147 +- .../notpip/_internal/commands/check.py | 16 +- .../notpip/_internal/commands/completion.py | 10 +- .../_internal/commands/configuration.py | 56 +- .../notpip/_internal/commands/debug.py | 115 + .../notpip/_internal/commands/download.py | 150 +- .../notpip/_internal/commands/freeze.py | 11 +- .../patched/notpip/_internal/commands/hash.py | 13 +- .../patched/notpip/_internal/commands/help.py | 12 +- .../notpip/_internal/commands/install.py | 451 +-- .../patched/notpip/_internal/commands/list.py | 66 +- .../notpip/_internal/commands/search.py | 36 +- .../patched/notpip/_internal/commands/show.py | 74 +- .../notpip/_internal/commands/uninstall.py | 64 +- .../notpip/_internal/commands/wheel.py | 136 +- .../patched/notpip/_internal/configuration.py | 69 +- .../_internal/distributions/__init__.py | 24 + .../notpip/_internal/distributions/base.py | 36 + .../_internal/distributions/installed.py | 18 + .../distributions/source/__init__.py | 0 .../_internal/distributions/source/legacy.py | 98 + .../notpip/_internal/distributions/wheel.py | 20 + pipenv/patched/notpip/_internal/download.py | 741 ++--- pipenv/patched/notpip/_internal/exceptions.py | 40 +- pipenv/patched/notpip/_internal/index.py | 1569 +++++----- .../{resolve.py => legacy_resolve.py} | 172 +- pipenv/patched/notpip/_internal/locations.py | 117 +- pipenv/patched/notpip/_internal/main.py | 47 + .../notpip/_internal/models/candidate.py | 22 +- .../notpip/_internal/models/format_control.py | 11 +- .../patched/notpip/_internal/models/link.py | 144 +- .../notpip/_internal/models/search_scope.py | 116 + .../_internal/models/selection_prefs.py | 47 + .../notpip/_internal/models/target_python.py | 106 + .../notpip/_internal/network/__init__.py | 2 + .../patched/notpip/_internal/network/auth.py | 298 ++ .../patched/notpip/_internal/network/cache.py | 75 + .../notpip/_internal/network/session.py | 426 +++ .../notpip/_internal/network/xmlrpc.py | 44 + .../notpip/_internal/operations/check.py | 20 +- .../notpip/_internal/operations/freeze.py | 42 +- .../_internal/operations/generate_metadata.py | 136 + .../notpip/_internal/operations/prepare.py | 304 +- pipenv/patched/notpip/_internal/pep425tags.py | 108 +- pipenv/patched/notpip/_internal/pyproject.py | 6 +- .../patched/notpip/_internal/req/__init__.py | 15 +- .../notpip/_internal/req/constructors.py | 235 +- .../patched/notpip/_internal/req/req_file.py | 61 +- .../notpip/_internal/req/req_install.py | 490 ++- .../patched/notpip/_internal/req/req_set.py | 119 +- .../notpip/_internal/req/req_tracker.py | 20 +- .../notpip/_internal/req/req_uninstall.py | 98 +- .../notpip/_internal/self_outdated_check.py | 244 ++ .../patched/notpip/_internal/utils/appdirs.py | 18 +- .../patched/notpip/_internal/utils/compat.py | 105 +- .../notpip/_internal/utils/deprecation.py | 32 +- .../notpip/_internal/utils/encoding.py | 19 +- .../notpip/_internal/utils/filesystem.py | 85 + .../notpip/_internal/utils/filetypes.py | 16 + .../patched/notpip/_internal/utils/glibc.py | 36 +- .../patched/notpip/_internal/utils/hashes.py | 26 +- .../_internal/utils/inject_securetransport.py | 36 + .../patched/notpip/_internal/utils/logging.py | 110 +- .../notpip/_internal/utils/marker_files.py | 27 + pipenv/patched/notpip/_internal/utils/misc.py | 746 ++--- .../patched/notpip/_internal/utils/models.py | 4 +- .../notpip/_internal/utils/outdated.py | 164 - .../notpip/_internal/utils/packaging.py | 72 +- .../_internal/utils/setuptools_build.py | 45 +- .../notpip/_internal/utils/subprocess.py | 278 ++ .../notpip/_internal/utils/temp_dir.py | 90 +- .../patched/notpip/_internal/utils/typing.py | 2 +- pipenv/patched/notpip/_internal/utils/ui.py | 37 +- .../notpip/_internal/utils/unpacking.py | 272 ++ pipenv/patched/notpip/_internal/utils/urls.py | 54 + .../notpip/_internal/utils/virtualenv.py | 34 + .../patched/notpip/_internal/vcs/__init__.py | 547 +--- pipenv/patched/notpip/_internal/vcs/bazaar.py | 70 +- pipenv/patched/notpip/_internal/vcs/git.py | 187 +- .../patched/notpip/_internal/vcs/mercurial.py | 98 +- .../notpip/_internal/vcs/subversion.py | 235 +- .../notpip/_internal/vcs/versioncontrol.py | 665 +++++ pipenv/patched/notpip/_internal/wheel.py | 436 +-- pipenv/patched/notpip/_vendor/__init__.py | 34 +- .../_vendor/cachecontrol/caches/file_cache.py | 4 +- .../notpip/_vendor/certifi/__init__.py | 2 +- .../patched/notpip/_vendor/certifi/cacert.pem | 246 +- pipenv/patched/notpip/_vendor/certifi/core.py | 5 - .../notpip/_vendor/contextlib2.LICENSE.txt | 122 + pipenv/patched/notpip/_vendor/contextlib2.py | 518 ++++ .../notpip/_vendor/distlib/__init__.py | 4 +- .../notpip/_vendor/distlib/metadata.py | 8 +- .../patched/notpip/_vendor/distlib/scripts.py | 26 +- pipenv/patched/notpip/_vendor/distlib/t32.exe | Bin 92672 -> 92672 bytes pipenv/patched/notpip/_vendor/distlib/t64.exe | Bin 102400 -> 102912 bytes pipenv/patched/notpip/_vendor/distlib/util.py | 6 +- pipenv/patched/notpip/_vendor/distlib/w32.exe | Bin 89088 -> 89088 bytes pipenv/patched/notpip/_vendor/distlib/w64.exe | Bin 99328 -> 99840 bytes .../patched/notpip/_vendor/distlib/wheel.py | 30 +- pipenv/patched/notpip/_vendor/distro.py | 67 +- .../notpip/_vendor/html5lib/_trie/_base.py | 5 +- .../_vendor/html5lib/treebuilders/dom.py | 5 +- .../patched/notpip/_vendor/lockfile/LICENSE | 21 - .../notpip/_vendor/lockfile/__init__.py | 347 --- .../notpip/_vendor/lockfile/linklockfile.py | 73 - .../notpip/_vendor/lockfile/mkdirlockfile.py | 84 - .../notpip/_vendor/lockfile/pidlockfile.py | 190 -- .../notpip/_vendor/lockfile/sqlitelockfile.py | 156 - .../_vendor/lockfile/symlinklockfile.py | 70 - .../notpip/_vendor/msgpack/__init__.py | 11 +- .../notpip/_vendor/msgpack/_version.py | 2 +- .../notpip/_vendor/msgpack/exceptions.py | 45 +- .../notpip/_vendor/msgpack/fallback.py | 330 ++- .../notpip/_vendor/packaging/__about__.py | 2 +- .../notpip/_vendor/packaging/markers.py | 2 +- .../patched/notpip/_vendor/packaging/tags.py | 404 +++ .../patched/notpip/_vendor/pep517/__init__.py | 2 +- .../notpip/_vendor/pep517/_in_process.py | 64 +- pipenv/patched/notpip/_vendor/pep517/build.py | 80 +- pipenv/patched/notpip/_vendor/pep517/check.py | 7 +- .../patched/notpip/_vendor/pep517/compat.py | 13 +- .../patched/notpip/_vendor/pep517/dirtools.py | 44 + .../patched/notpip/_vendor/pep517/envbuild.py | 29 +- pipenv/patched/notpip/_vendor/pep517/meta.py | 92 + .../patched/notpip/_vendor/pep517/wrappers.py | 155 +- .../notpip/_vendor/pkg_resources/__init__.py | 140 +- .../notpip/_vendor/progress/__init__.py | 80 +- pipenv/patched/notpip/_vendor/progress/bar.py | 5 +- .../notpip/_vendor/progress/counter.py | 13 +- .../notpip/_vendor/progress/helpers.py | 91 - .../notpip/_vendor/progress/spinner.py | 7 +- pipenv/patched/notpip/_vendor/pyparsing.py | 2606 +++++++++------- .../patched/notpip/_vendor/pytoml/parser.py | 7 +- .../patched/notpip/_vendor/pytoml/writer.py | 8 + .../notpip/_vendor/requests/__init__.py | 4 +- .../notpip/_vendor/requests/__version__.py | 6 +- pipenv/patched/notpip/_vendor/requests/api.py | 4 +- .../notpip/_vendor/requests/packages.py | 6 +- .../notpip/_vendor/urllib3/__init__.py | 56 +- .../notpip/_vendor/urllib3/_collections.py | 37 +- .../notpip/_vendor/urllib3/connection.py | 227 +- .../notpip/_vendor/urllib3/connectionpool.py | 439 ++- .../urllib3/contrib/_appengine_environ.py | 20 +- .../contrib/_securetransport/bindings.py | 275 +- .../contrib/_securetransport/low_level.py | 52 +- .../_vendor/urllib3/contrib/appengine.py | 118 +- .../_vendor/urllib3/contrib/ntlmpool.py | 98 +- .../_vendor/urllib3/contrib/pyopenssl.py | 158 +- .../urllib3/contrib/securetransport.py | 224 +- .../notpip/_vendor/urllib3/contrib/socks.py | 136 +- .../notpip/_vendor/urllib3/exceptions.py | 29 +- .../patched/notpip/_vendor/urllib3/fields.py | 165 +- .../notpip/_vendor/urllib3/filepost.py | 14 +- .../_vendor/urllib3/packages/__init__.py | 2 +- .../urllib3/packages/backports/makefile.py | 9 +- .../notpip/_vendor/urllib3/packages/six.py | 321 +- .../packages/ssl_match_hostname/__init__.py | 2 +- .../ssl_match_hostname/_implementation.py | 58 +- .../notpip/_vendor/urllib3/poolmanager.py | 184 +- .../patched/notpip/_vendor/urllib3/request.py | 79 +- .../notpip/_vendor/urllib3/response.py | 250 +- .../notpip/_vendor/urllib3/util/__init__.py | 60 +- .../notpip/_vendor/urllib3/util/connection.py | 16 +- .../notpip/_vendor/urllib3/util/request.py | 57 +- .../notpip/_vendor/urllib3/util/response.py | 9 +- .../notpip/_vendor/urllib3/util/retry.py | 99 +- .../notpip/_vendor/urllib3/util/ssl_.py | 240 +- .../notpip/_vendor/urllib3/util/timeout.py | 82 +- .../notpip/_vendor/urllib3/util/url.py | 391 ++- .../notpip/_vendor/urllib3/util/wait.py | 3 + pipenv/patched/notpip/_vendor/vendor.txt | 22 +- pipenv/patched/notpip/contextlib2.LICENSE.txt | 122 + pipenv/patched/notpip/idna.LICENSE.rst | 80 + .../notpip/{COPYING => msgpack.COPYING} | 0 .../{LICENSE.BSD => packaging.LICENSE.BSD} | 0 pipenv/patched/notpip/urllib3.LICENSE | 19 - pipenv/patched/patched.txt | 4 +- pipenv/patched/piptools/__init__.py | 11 + pipenv/patched/piptools/__main__.py | 7 +- pipenv/patched/piptools/_compat/__init__.py | 52 +- pipenv/patched/piptools/_compat/contextlib.py | 131 +- pipenv/patched/piptools/_compat/pip_compat.py | 137 +- pipenv/patched/piptools/_compat/tempfile.py | 6 +- pipenv/patched/piptools/cache.py | 53 +- pipenv/patched/piptools/click.py | 4 +- pipenv/patched/piptools/exceptions.py | 39 +- pipenv/patched/piptools/io.py | 644 ---- pipenv/patched/piptools/locations.py | 16 +- pipenv/patched/piptools/logging.py | 12 +- pipenv/patched/piptools/pip.py | 30 - pipenv/patched/piptools/repositories/base.py | 6 +- pipenv/patched/piptools/repositories/local.py | 22 +- pipenv/patched/piptools/repositories/pypi.py | 319 +- pipenv/patched/piptools/resolver.py | 291 +- pipenv/patched/piptools/scripts/compile.py | 457 ++- pipenv/patched/piptools/scripts/sync.py | 135 +- pipenv/patched/piptools/sync.py | 121 +- pipenv/patched/piptools/utils.py | 187 +- pipenv/patched/piptools/writer.py | 244 +- pipenv/utils.py | 17 +- pipenv/vendor/attr/__init__.py | 5 +- pipenv/vendor/attr/__init__.pyi | 63 +- pipenv/vendor/attr/_compat.py | 119 +- pipenv/vendor/attr/_funcs.py | 8 +- pipenv/vendor/attr/_make.py | 528 ++-- pipenv/vendor/attr/_version_info.py | 85 + pipenv/vendor/attr/_version_info.pyi | 9 + pipenv/vendor/attr/converters.py | 4 +- pipenv/vendor/attr/exceptions.py | 19 +- pipenv/vendor/attr/exceptions.pyi | 8 + pipenv/vendor/attr/filters.py | 10 +- pipenv/vendor/attr/validators.py | 126 +- pipenv/vendor/attr/validators.pyi | 60 +- pipenv/vendor/backports/__init__.py | 2 +- pipenv/vendor/cerberus/errors.py | 2 +- pipenv/vendor/cerberus/schema.py | 17 +- pipenv/vendor/cerberus/tests/test_errors.py | 13 + pipenv/vendor/cerberus/tests/test_schema.py | 2 +- pipenv/vendor/certifi/__init__.py | 2 +- pipenv/vendor/certifi/cacert.pem | 144 +- pipenv/vendor/click_completion/__init__.py | 2 +- pipenv/vendor/click_completion/core.py | 8 +- pipenv/vendor/click_completion/lib.py | 4 +- pipenv/vendor/click_completion/patch.py | 25 +- pipenv/vendor/distlib/__init__.py | 4 +- pipenv/vendor/distlib/_backport/sysconfig.py | 8 +- pipenv/vendor/distlib/database.py | 2 +- pipenv/vendor/distlib/locators.py | 21 +- pipenv/vendor/distlib/scripts.py | 25 +- pipenv/vendor/distlib/t32.exe | Bin 92672 -> 96768 bytes pipenv/vendor/distlib/t64.exe | Bin 102400 -> 105984 bytes pipenv/vendor/distlib/util.py | 5 +- pipenv/vendor/distlib/w32.exe | Bin 89088 -> 90112 bytes pipenv/vendor/distlib/w64.exe | Bin 99328 -> 99840 bytes pipenv/vendor/distlib/wheel.py | 2 +- pipenv/vendor/dotenv/cli.py | 40 +- pipenv/vendor/dotenv/compat.py | 44 +- pipenv/vendor/dotenv/main.py | 156 +- pipenv/vendor/dotenv/parser.py | 163 + pipenv/vendor/dotenv/version.py | 2 +- pipenv/vendor/jinja2/LICENSE | 31 - pipenv/vendor/jinja2/LICENSE.rst | 28 + pipenv/vendor/jinja2/__init__.py | 2 +- pipenv/vendor/jinja2/_compat.py | 6 + pipenv/vendor/jinja2/bccache.py | 5 +- pipenv/vendor/jinja2/debug.py | 16 +- pipenv/vendor/jinja2/runtime.py | 9 +- pipenv/vendor/jinja2/sandbox.py | 39 +- pipenv/vendor/jinja2/tests.py | 5 +- pipenv/vendor/jinja2/utils.py | 9 +- pipenv/vendor/orderedmultidict/LICENSE.md | 2 +- pipenv/vendor/orderedmultidict/__init__.py | 14 +- pipenv/vendor/orderedmultidict/__version__.py | 19 + pipenv/vendor/orderedmultidict/itemlist.py | 1 + .../orderedmultidict/orderedmultidict.py | 30 +- pipenv/vendor/packaging/__about__.py | 2 +- pipenv/vendor/packaging/markers.py | 2 +- pipenv/vendor/packaging/tags.py | 404 +++ pipenv/vendor/parse.LICENSE | 8 +- pipenv/vendor/parse.py | 17 +- pipenv/vendor/passa/actions/clean.py | 4 +- pipenv/vendor/passa/cli/add.py | 3 +- pipenv/vendor/pathlib2/__init__.py | 183 +- pipenv/vendor/pep517/__init__.py | 2 +- pipenv/vendor/pep517/_in_process.py | 64 +- pipenv/vendor/pep517/build.py | 80 +- pipenv/vendor/pep517/check.py | 7 +- pipenv/vendor/pep517/compat.py | 13 +- pipenv/vendor/pep517/dirtools.py | 44 + pipenv/vendor/pep517/envbuild.py | 29 +- pipenv/vendor/pep517/meta.py | 92 + pipenv/vendor/pep517/wrappers.py | 183 +- pipenv/vendor/pip_shims/__init__.py | 34 +- pipenv/vendor/pip_shims/backports.py | 1183 ++++++++ pipenv/vendor/pip_shims/environment.py | 40 + pipenv/vendor/pip_shims/models.py | 1152 +++++++ pipenv/vendor/pip_shims/shims.py | 386 +-- pipenv/vendor/pip_shims/utils.py | 410 ++- pipenv/vendor/pipreqs/__init__.py | 2 +- pipenv/vendor/pipreqs/mapping | 27 +- pipenv/vendor/pipreqs/pipreqs.py | 242 +- pipenv/vendor/pipreqs/stdlib | 709 +---- pipenv/vendor/plette/__init__.py | 2 +- pipenv/vendor/plette/models/base.py | 27 +- pipenv/vendor/pyparsing.py | 2637 ++++++++++------- .../vendor/{pytoml => pythonfinder}/LICENSE | 13 +- .../_vendor/pep514tools/environment.py | 2 +- pipenv/vendor/pythonfinder/models/mixins.py | 4 - pipenv/vendor/pythonfinder/models/path.py | 23 +- pipenv/vendor/pythonfinder/models/python.py | 11 +- pipenv/vendor/pytoml/__init__.py | 4 - pipenv/vendor/pytoml/core.py | 13 - pipenv/vendor/pytoml/parser.py | 341 --- pipenv/vendor/pytoml/test.py | 30 - pipenv/vendor/pytoml/utils.py | 67 - pipenv/vendor/pytoml/writer.py | 106 - pipenv/vendor/requirementslib/__init__.py | 2 +- .../vendor/requirementslib/models/__init__.py | 1 - .../requirementslib/models/dependencies.py | 113 +- .../vendor/requirementslib/models/lockfile.py | 43 +- .../vendor/requirementslib/models/markers.py | 2 +- .../vendor/requirementslib/models/pipfile.py | 150 +- .../requirementslib/models/requirements.py | 231 +- .../requirementslib/models/resolvers.py | 48 +- .../requirementslib/models/setup_info.py | 99 +- pipenv/vendor/requirementslib/models/url.py | 142 +- pipenv/vendor/requirementslib/models/utils.py | 68 +- pipenv/vendor/requirementslib/models/vcs.py | 105 +- pipenv/vendor/requirementslib/utils.py | 6 +- pipenv/vendor/semver.py | 323 +- pipenv/vendor/six.LICENSE | 2 +- pipenv/vendor/six.py | 17 +- pipenv/vendor/tomlkit/__init__.py | 2 +- pipenv/vendor/tomlkit/_compat.py | 5 +- pipenv/vendor/tomlkit/api.py | 2 +- pipenv/vendor/tomlkit/container.py | 26 +- pipenv/vendor/tomlkit/items.py | 247 +- pipenv/vendor/tomlkit/parser.py | 63 +- pipenv/vendor/urllib3/__init__.py | 55 +- pipenv/vendor/urllib3/_collections.py | 37 +- pipenv/vendor/urllib3/connection.py | 201 +- pipenv/vendor/urllib3/connectionpool.py | 418 ++- .../urllib3/contrib/_appengine_environ.py | 26 +- .../contrib/_securetransport/bindings.py | 262 +- .../contrib/_securetransport/low_level.py | 52 +- pipenv/vendor/urllib3/contrib/appengine.py | 121 +- pipenv/vendor/urllib3/contrib/ntlmpool.py | 96 +- pipenv/vendor/urllib3/contrib/pyopenssl.py | 129 +- .../vendor/urllib3/contrib/securetransport.py | 160 +- pipenv/vendor/urllib3/contrib/socks.py | 101 +- pipenv/vendor/urllib3/exceptions.py | 29 +- pipenv/vendor/urllib3/fields.py | 93 +- pipenv/vendor/urllib3/filepost.py | 14 +- pipenv/vendor/urllib3/packages/__init__.py | 2 +- .../urllib3/packages/backports/makefile.py | 9 +- .../urllib3/packages/rfc3986/__init__.py | 56 - .../vendor/urllib3/packages/rfc3986/_mixin.py | 353 --- .../urllib3/packages/rfc3986/abnf_regexp.py | 267 -- pipenv/vendor/urllib3/packages/rfc3986/api.py | 106 - .../urllib3/packages/rfc3986/builder.py | 298 -- .../vendor/urllib3/packages/rfc3986/compat.py | 54 - .../urllib3/packages/rfc3986/exceptions.py | 118 - pipenv/vendor/urllib3/packages/rfc3986/iri.py | 147 - .../vendor/urllib3/packages/rfc3986/misc.py | 146 - .../urllib3/packages/rfc3986/normalizers.py | 167 -- .../urllib3/packages/rfc3986/parseresult.py | 385 --- pipenv/vendor/urllib3/packages/rfc3986/uri.py | 153 - .../urllib3/packages/rfc3986/validators.py | 450 --- pipenv/vendor/urllib3/packages/six.py | 321 +- .../packages/ssl_match_hostname/__init__.py | 2 +- .../ssl_match_hostname/_implementation.py | 56 +- pipenv/vendor/urllib3/poolmanager.py | 183 +- pipenv/vendor/urllib3/request.py | 79 +- pipenv/vendor/urllib3/response.py | 195 +- pipenv/vendor/urllib3/util/__init__.py | 60 +- pipenv/vendor/urllib3/util/connection.py | 18 +- pipenv/vendor/urllib3/util/request.py | 52 +- pipenv/vendor/urllib3/util/response.py | 9 +- pipenv/vendor/urllib3/util/retry.py | 100 +- pipenv/vendor/urllib3/util/ssl_.py | 175 +- pipenv/vendor/urllib3/util/timeout.py | 79 +- pipenv/vendor/urllib3/util/url.py | 369 ++- pipenv/vendor/urllib3/util/wait.py | 3 + pipenv/vendor/vendor.txt | 44 +- pipenv/vendor/vendor_pip.txt | 22 +- pipenv/vendor/vistir/__init__.py | 2 +- pipenv/vendor/vistir/misc.py | 26 +- pipenv/vendor/vistir/path.py | 4 +- tasks/vendoring/__init__.py | 20 +- .../patched/_post-pip-update-pep425tags.patch | 26 +- .../patched/_post-pip-update-pypi-uri.patch | 44 - .../_post-pip-update-requests-imports.patch | 8 +- tasks/vendoring/patches/patched/pip19.patch | 782 +++-- .../vendoring/patches/patched/piptools.patch | 623 ++-- .../vendor/dotenv-typing-imports.patch | 129 +- .../vendor/pip_shims_module_names.patch | 23 +- .../patches/vendor/tomlkit-fix.patch | 14 +- 392 files changed, 27480 insertions(+), 19663 deletions(-) create mode 100644 pipenv/patched/notpip/_internal/cli/command_context.py create mode 100644 pipenv/patched/notpip/_internal/cli/req_command.py create mode 100644 pipenv/patched/notpip/_internal/collector.py create mode 100644 pipenv/patched/notpip/_internal/commands/debug.py create mode 100644 pipenv/patched/notpip/_internal/distributions/__init__.py create mode 100644 pipenv/patched/notpip/_internal/distributions/base.py create mode 100644 pipenv/patched/notpip/_internal/distributions/installed.py create mode 100644 pipenv/patched/notpip/_internal/distributions/source/__init__.py create mode 100644 pipenv/patched/notpip/_internal/distributions/source/legacy.py create mode 100644 pipenv/patched/notpip/_internal/distributions/wheel.py rename pipenv/patched/notpip/_internal/{resolve.py => legacy_resolve.py} (77%) create mode 100644 pipenv/patched/notpip/_internal/main.py create mode 100644 pipenv/patched/notpip/_internal/models/search_scope.py create mode 100644 pipenv/patched/notpip/_internal/models/selection_prefs.py create mode 100644 pipenv/patched/notpip/_internal/models/target_python.py create mode 100644 pipenv/patched/notpip/_internal/network/__init__.py create mode 100644 pipenv/patched/notpip/_internal/network/auth.py create mode 100644 pipenv/patched/notpip/_internal/network/cache.py create mode 100644 pipenv/patched/notpip/_internal/network/session.py create mode 100644 pipenv/patched/notpip/_internal/network/xmlrpc.py create mode 100644 pipenv/patched/notpip/_internal/operations/generate_metadata.py create mode 100644 pipenv/patched/notpip/_internal/self_outdated_check.py create mode 100644 pipenv/patched/notpip/_internal/utils/filetypes.py create mode 100644 pipenv/patched/notpip/_internal/utils/inject_securetransport.py create mode 100644 pipenv/patched/notpip/_internal/utils/marker_files.py delete mode 100644 pipenv/patched/notpip/_internal/utils/outdated.py create mode 100644 pipenv/patched/notpip/_internal/utils/subprocess.py create mode 100644 pipenv/patched/notpip/_internal/utils/unpacking.py create mode 100644 pipenv/patched/notpip/_internal/utils/urls.py create mode 100644 pipenv/patched/notpip/_internal/utils/virtualenv.py create mode 100644 pipenv/patched/notpip/_internal/vcs/versioncontrol.py create mode 100644 pipenv/patched/notpip/_vendor/contextlib2.LICENSE.txt create mode 100644 pipenv/patched/notpip/_vendor/contextlib2.py delete mode 100644 pipenv/patched/notpip/_vendor/lockfile/LICENSE delete mode 100644 pipenv/patched/notpip/_vendor/lockfile/__init__.py delete mode 100644 pipenv/patched/notpip/_vendor/lockfile/linklockfile.py delete mode 100644 pipenv/patched/notpip/_vendor/lockfile/mkdirlockfile.py delete mode 100644 pipenv/patched/notpip/_vendor/lockfile/pidlockfile.py delete mode 100644 pipenv/patched/notpip/_vendor/lockfile/sqlitelockfile.py delete mode 100644 pipenv/patched/notpip/_vendor/lockfile/symlinklockfile.py create mode 100644 pipenv/patched/notpip/_vendor/packaging/tags.py create mode 100644 pipenv/patched/notpip/_vendor/pep517/dirtools.py create mode 100644 pipenv/patched/notpip/_vendor/pep517/meta.py delete mode 100644 pipenv/patched/notpip/_vendor/progress/helpers.py create mode 100644 pipenv/patched/notpip/contextlib2.LICENSE.txt create mode 100644 pipenv/patched/notpip/idna.LICENSE.rst rename pipenv/patched/notpip/{COPYING => msgpack.COPYING} (100%) rename pipenv/patched/notpip/{LICENSE.BSD => packaging.LICENSE.BSD} (100%) delete mode 100644 pipenv/patched/notpip/urllib3.LICENSE delete mode 100644 pipenv/patched/piptools/io.py delete mode 100644 pipenv/patched/piptools/pip.py create mode 100644 pipenv/vendor/attr/_version_info.py create mode 100644 pipenv/vendor/attr/_version_info.pyi create mode 100644 pipenv/vendor/dotenv/parser.py delete mode 100644 pipenv/vendor/jinja2/LICENSE create mode 100644 pipenv/vendor/jinja2/LICENSE.rst create mode 100755 pipenv/vendor/orderedmultidict/__version__.py create mode 100644 pipenv/vendor/packaging/tags.py create mode 100644 pipenv/vendor/pep517/dirtools.py create mode 100644 pipenv/vendor/pep517/meta.py create mode 100644 pipenv/vendor/pip_shims/backports.py create mode 100644 pipenv/vendor/pip_shims/environment.py create mode 100644 pipenv/vendor/pip_shims/models.py rename pipenv/vendor/{pytoml => pythonfinder}/LICENSE (76%) delete mode 100644 pipenv/vendor/pytoml/__init__.py delete mode 100644 pipenv/vendor/pytoml/core.py delete mode 100644 pipenv/vendor/pytoml/parser.py delete mode 100644 pipenv/vendor/pytoml/test.py delete mode 100644 pipenv/vendor/pytoml/utils.py delete mode 100644 pipenv/vendor/pytoml/writer.py delete mode 100644 pipenv/vendor/urllib3/packages/rfc3986/__init__.py delete mode 100644 pipenv/vendor/urllib3/packages/rfc3986/_mixin.py delete mode 100644 pipenv/vendor/urllib3/packages/rfc3986/abnf_regexp.py delete mode 100644 pipenv/vendor/urllib3/packages/rfc3986/api.py delete mode 100644 pipenv/vendor/urllib3/packages/rfc3986/builder.py delete mode 100644 pipenv/vendor/urllib3/packages/rfc3986/compat.py delete mode 100644 pipenv/vendor/urllib3/packages/rfc3986/exceptions.py delete mode 100644 pipenv/vendor/urllib3/packages/rfc3986/iri.py delete mode 100644 pipenv/vendor/urllib3/packages/rfc3986/misc.py delete mode 100644 pipenv/vendor/urllib3/packages/rfc3986/normalizers.py delete mode 100644 pipenv/vendor/urllib3/packages/rfc3986/parseresult.py delete mode 100644 pipenv/vendor/urllib3/packages/rfc3986/uri.py delete mode 100644 pipenv/vendor/urllib3/packages/rfc3986/validators.py delete mode 100644 tasks/vendoring/patches/patched/_post-pip-update-pypi-uri.patch diff --git a/pipenv/core.py b/pipenv/core.py index 74b72359df..9070dfce64 100644 --- a/pipenv/core.py +++ b/pipenv/core.py @@ -1317,7 +1317,7 @@ def get_pip_args( "no_use_pep517": [], "no_deps": ["--no-deps"], "selective_upgrade": [ - "--upgrade-strategy=only-if-needed", "--exists_action={0}".format(PIP_EXISTS_ACTION or "i") + "--upgrade-strategy=only-if-needed", "--exists-action={0}".format(PIP_EXISTS_ACTION or "i") ], "src_dir": src_dir, } @@ -1406,7 +1406,7 @@ def pip_install( trusted_hosts=None, use_pep517=True ): - from pipenv.patched.notpip._internal import logger as piplogger + piplogger = logging.getLogger("pipenv.patched.notpip._internal.commands.install") src_dir = None if not trusted_hosts: trusted_hosts = [] diff --git a/pipenv/patched/notpip/LICENSE.txt b/pipenv/patched/notpip/LICENSE.txt index d3379faca6..737fec5c53 100644 --- a/pipenv/patched/notpip/LICENSE.txt +++ b/pipenv/patched/notpip/LICENSE.txt @@ -1,4 +1,4 @@ -Copyright (c) 2008-2018 The pip developers (see AUTHORS.txt file) +Copyright (c) 2008-2019 The pip developers (see AUTHORS.txt file) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the diff --git a/pipenv/patched/notpip/__init__.py b/pipenv/patched/notpip/__init__.py index f48c1ca630..a487794a9b 100644 --- a/pipenv/patched/notpip/__init__.py +++ b/pipenv/patched/notpip/__init__.py @@ -1 +1 @@ -__version__ = "19.0.3" +__version__ = "19.3.1" diff --git a/pipenv/patched/notpip/__main__.py b/pipenv/patched/notpip/__main__.py index a4879980b4..36a4800f13 100644 --- a/pipenv/patched/notpip/__main__.py +++ b/pipenv/patched/notpip/__main__.py @@ -13,7 +13,7 @@ path = os.path.dirname(os.path.dirname(__file__)) sys.path.insert(0, path) -from pipenv.patched.notpip._internal import main as _main # isort:skip # noqa +from pipenv.patched.notpip._internal.main import main as _main # isort:skip # noqa if __name__ == '__main__': sys.exit(_main()) diff --git a/pipenv/patched/notpip/_internal/__init__.py b/pipenv/patched/notpip/_internal/__init__.py index 6d223928fe..18d727b653 100644 --- a/pipenv/patched/notpip/_internal/__init__.py +++ b/pipenv/patched/notpip/_internal/__init__.py @@ -1,78 +1,2 @@ #!/usr/bin/env python -from __future__ import absolute_import - -import locale -import logging -import os -import warnings - -import sys - -# 2016-06-17 barry@debian.org: urllib3 1.14 added optional support for socks, -# but if invoked (i.e. imported), it will issue a warning to stderr if socks -# isn't available. requests unconditionally imports urllib3's socks contrib -# module, triggering this warning. The warning breaks DEP-8 tests (because of -# the stderr output) and is just plain annoying in normal usage. I don't want -# to add socks as yet another dependency for pip, nor do I want to allow-stder -# in the DEP-8 tests, so just suppress the warning. pdb tells me this has to -# be done before the import of pip.vcs. -from pipenv.patched.notpip._vendor.urllib3.exceptions import DependencyWarning -warnings.filterwarnings("ignore", category=DependencyWarning) # noqa - -# We want to inject the use of SecureTransport as early as possible so that any -# references or sessions or what have you are ensured to have it, however we -# only want to do this in the case that we're running on macOS and the linked -# OpenSSL is too old to handle TLSv1.2 -try: - import ssl -except ImportError: - pass -else: - # Checks for OpenSSL 1.0.1 on MacOS - if sys.platform == "darwin" and ssl.OPENSSL_VERSION_NUMBER < 0x1000100f: - try: - from pipenv.patched.notpip._vendor.urllib3.contrib import securetransport - except (ImportError, OSError): - pass - else: - securetransport.inject_into_urllib3() - -from pipenv.patched.notpip._internal.cli.autocompletion import autocomplete -from pipenv.patched.notpip._internal.cli.main_parser import parse_command -from pipenv.patched.notpip._internal.commands import commands_dict -from pipenv.patched.notpip._internal.exceptions import PipError -from pipenv.patched.notpip._internal.utils import deprecation -from pipenv.patched.notpip._internal.vcs import git, mercurial, subversion, bazaar # noqa -from pipenv.patched.notpip._vendor.urllib3.exceptions import InsecureRequestWarning - -logger = logging.getLogger(__name__) - -# Hide the InsecureRequestWarning from urllib3 -warnings.filterwarnings("ignore", category=InsecureRequestWarning) - - -def main(args=None): - if args is None: - args = sys.argv[1:] - - # Configure our deprecation warnings to be sent through loggers - deprecation.install_warning_logger() - - autocomplete() - - try: - cmd_name, cmd_args = parse_command(args) - except PipError as exc: - sys.stderr.write("ERROR: %s" % exc) - sys.stderr.write(os.linesep) - sys.exit(1) - - # Needed for locale.getpreferredencoding(False) to work - # in pip._internal.utils.encoding.auto_decode - try: - locale.setlocale(locale.LC_ALL, '') - except locale.Error as e: - # setlocale can apparently crash if locale are uninitialized - logger.debug("Ignoring error %s when setting locale", e) - command = commands_dict[cmd_name](isolated=("--isolated" in cmd_args)) - return command.main(cmd_args) +import pipenv.patched.notpip._internal.utils.inject_securetransport # noqa diff --git a/pipenv/patched/notpip/_internal/build_env.py b/pipenv/patched/notpip/_internal/build_env.py index d38adc4934..7760b5210a 100644 --- a/pipenv/patched/notpip/_internal/build_env.py +++ b/pipenv/patched/notpip/_internal/build_env.py @@ -1,6 +1,10 @@ """Build Environment used for isolation during sdist building """ +# The following comment should be removed at some point in the future. +# mypy: strict-optional=False +# mypy: disallow-untyped-defs=False + import logging import os import sys @@ -12,14 +16,14 @@ from pipenv.patched.notpip._vendor.pkg_resources import Requirement, VersionConflict, WorkingSet from pipenv.patched.notpip import __file__ as pip_location -from pipenv.patched.notpip._internal.utils.misc import call_subprocess +from pipenv.patched.notpip._internal.utils.subprocess import call_subprocess from pipenv.patched.notpip._internal.utils.temp_dir import TempDirectory from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING from pipenv.patched.notpip._internal.utils.ui import open_spinner if MYPY_CHECK_RUNNING: - from typing import Tuple, Set, Iterable, Optional, List # noqa: F401 - from pipenv.patched.notpip._internal.index import PackageFinder # noqa: F401 + from typing import Tuple, Set, Iterable, Optional, List + from pipenv.patched.notpip._internal.index import PackageFinder logger = logging.getLogger(__name__) @@ -51,7 +55,6 @@ class BuildEnvironment(object): def __init__(self): # type: () -> None self._temp_dir = TempDirectory(kind="build-env") - self._temp_dir.create() self._prefixes = OrderedDict(( (name, _Prefix(os.path.join(self._temp_dir.path, name))) @@ -166,8 +169,9 @@ def install_requirements( prefix.setup = True if not requirements: return + sys_executable = os.environ.get('PIP_PYTHON_PATH', sys.executable) args = [ - sys.executable, os.path.dirname(pip_location), 'install', + sys_executable, os.path.dirname(pip_location), 'install', '--ignore-installed', '--no-user', '--prefix', prefix.path, '--no-warn-script-location', ] # type: List[str] @@ -177,22 +181,25 @@ def install_requirements( formats = getattr(finder.format_control, format_control) args.extend(('--' + format_control.replace('_', '-'), ','.join(sorted(formats or {':none:'})))) - if finder.index_urls: - args.extend(['-i', finder.index_urls[0]]) - for extra_index in finder.index_urls[1:]: + + index_urls = finder.index_urls + if index_urls: + args.extend(['-i', index_urls[0]]) + for extra_index in index_urls[1:]: args.extend(['--extra-index-url', extra_index]) else: args.append('--no-index') for link in finder.find_links: args.extend(['--find-links', link]) - for _, host, _ in finder.secure_origins: + + for host in finder.trusted_hosts: args.extend(['--trusted-host', host]) if finder.allow_all_prereleases: args.append('--pre') args.append('--') args.extend(requirements) with open_spinner(message) as spinner: - call_subprocess(args, show_stdout=False, spinner=spinner) + call_subprocess(args, spinner=spinner) class NoOpBuildEnvironment(BuildEnvironment): diff --git a/pipenv/patched/notpip/_internal/cache.py b/pipenv/patched/notpip/_internal/cache.py index 9f35e83db3..9d241eca8f 100644 --- a/pipenv/patched/notpip/_internal/cache.py +++ b/pipenv/patched/notpip/_internal/cache.py @@ -1,6 +1,9 @@ """Cache Management """ +# The following comment should be removed at some point in the future. +# mypy: strict-optional=False + import errno import hashlib import logging @@ -8,16 +11,17 @@ from pipenv.patched.notpip._vendor.packaging.utils import canonicalize_name -from pipenv.patched.notpip._internal.download import path_to_url from pipenv.patched.notpip._internal.models.link import Link from pipenv.patched.notpip._internal.utils.compat import expanduser from pipenv.patched.notpip._internal.utils.temp_dir import TempDirectory from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING +from pipenv.patched.notpip._internal.utils.urls import path_to_url from pipenv.patched.notpip._internal.wheel import InvalidWheelFilename, Wheel if MYPY_CHECK_RUNNING: - from typing import Optional, Set, List, Any # noqa: F401 - from pipenv.patched.notpip._internal.index import FormatControl # noqa: F401 + from typing import Optional, Set, List, Any + from pipenv.patched.notpip._internal.index import FormatControl + from pipenv.patched.notpip._internal.pep425tags import Pep425Tag logger = logging.getLogger(__name__) @@ -100,8 +104,13 @@ def get_path_for_link(self, link): """ raise NotImplementedError() - def get(self, link, package_name): - # type: (Link, Optional[str]) -> Link + def get( + self, + link, # type: Link + package_name, # type: Optional[str] + supported_tags, # type: List[Pep425Tag] + ): + # type: (...) -> Link """Returns a link to a cached item if it exists, otherwise returns the passed link. """ @@ -150,8 +159,13 @@ def get_path_for_link(self, link): # Store wheels within the root cache_dir return os.path.join(self.cache_dir, "wheels", *parts) - def get(self, link, package_name): - # type: (Link, Optional[str]) -> Link + def get( + self, + link, # type: Link + package_name, # type: Optional[str] + supported_tags, # type: List[Pep425Tag] + ): + # type: (...) -> Link candidates = [] for wheel_name in self._get_candidates(link, package_name): @@ -159,10 +173,12 @@ def get(self, link, package_name): wheel = Wheel(wheel_name) except InvalidWheelFilename: continue - if not wheel.supported(): + if not wheel.supported(supported_tags): # Built for a different python/arch/etc continue - candidates.append((wheel.support_index_min(), wheel_name)) + candidates.append( + (wheel.support_index_min(supported_tags), wheel_name) + ) if not candidates: return link @@ -177,7 +193,6 @@ class EphemWheelCache(SimpleWheelCache): def __init__(self, format_control): # type: (FormatControl) -> None self._temp_dir = TempDirectory(kind="ephem-wheel-cache") - self._temp_dir.create() super(EphemWheelCache, self).__init__( self._temp_dir.path, format_control @@ -211,12 +226,26 @@ def get_ephem_path_for_link(self, link): # type: (Link) -> str return self._ephem_cache.get_path_for_link(link) - def get(self, link, package_name): - # type: (Link, Optional[str]) -> Link - retval = self._wheel_cache.get(link, package_name) - if retval is link: - retval = self._ephem_cache.get(link, package_name) - return retval + def get( + self, + link, # type: Link + package_name, # type: Optional[str] + supported_tags, # type: List[Pep425Tag] + ): + # type: (...) -> Link + retval = self._wheel_cache.get( + link=link, + package_name=package_name, + supported_tags=supported_tags, + ) + if retval is not link: + return retval + + return self._ephem_cache.get( + link=link, + package_name=package_name, + supported_tags=supported_tags, + ) def cleanup(self): # type: () -> None diff --git a/pipenv/patched/notpip/_internal/cli/autocompletion.py b/pipenv/patched/notpip/_internal/cli/autocompletion.py index 15b560a1dc..d8e657096c 100644 --- a/pipenv/patched/notpip/_internal/cli/autocompletion.py +++ b/pipenv/patched/notpip/_internal/cli/autocompletion.py @@ -1,12 +1,15 @@ """Logic that powers autocompletion installed by ``pip completion``. """ +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + import optparse import os import sys from pipenv.patched.notpip._internal.cli.main_parser import create_main_parser -from pipenv.patched.notpip._internal.commands import commands_dict, get_summaries +from pipenv.patched.notpip._internal.commands import commands_dict, create_command from pipenv.patched.notpip._internal.utils.misc import get_installed_distributions @@ -23,7 +26,7 @@ def autocomplete(): except IndexError: current = '' - subcommands = [cmd for cmd, summary in get_summaries()] + subcommands = list(commands_dict) options = [] # subcommand try: @@ -54,7 +57,7 @@ def autocomplete(): print(dist) sys.exit(1) - subcommand = commands_dict[subcommand_name]() + subcommand = create_command(subcommand_name) for opt in subcommand.parser.option_list_all: if opt.help != optparse.SUPPRESS_HELP: diff --git a/pipenv/patched/notpip/_internal/cli/base_command.py b/pipenv/patched/notpip/_internal/cli/base_command.py index 4aa16da613..dd818fe0b6 100644 --- a/pipenv/patched/notpip/_internal/cli/base_command.py +++ b/pipenv/patched/notpip/_internal/cli/base_command.py @@ -1,4 +1,5 @@ """Base Command class, and related routines""" + from __future__ import absolute_import, print_function import logging @@ -10,61 +11,59 @@ import traceback from pipenv.patched.notpip._internal.cli import cmdoptions +from pipenv.patched.notpip._internal.cli.command_context import CommandContextMixIn from pipenv.patched.notpip._internal.cli.parser import ( - ConfigOptionParser, UpdatingDefaultsHelpFormatter, + ConfigOptionParser, + UpdatingDefaultsHelpFormatter, ) from pipenv.patched.notpip._internal.cli.status_codes import ( - ERROR, PREVIOUS_BUILD_DIR_ERROR, SUCCESS, UNKNOWN_ERROR, + ERROR, + PREVIOUS_BUILD_DIR_ERROR, + SUCCESS, + UNKNOWN_ERROR, VIRTUALENV_NOT_FOUND, ) -from pipenv.patched.notpip._internal.download import PipSession from pipenv.patched.notpip._internal.exceptions import ( - BadCommand, CommandError, InstallationError, PreviousBuildDirError, + BadCommand, + CommandError, + InstallationError, + PreviousBuildDirError, UninstallationError, ) -from pipenv.patched.notpip._internal.index import PackageFinder -from pipenv.patched.notpip._internal.locations import running_under_virtualenv -from pipenv.patched.notpip._internal.req.constructors import ( - install_req_from_editable, install_req_from_line, -) -from pipenv.patched.notpip._internal.req.req_file import parse_requirements from pipenv.patched.notpip._internal.utils.deprecation import deprecated from pipenv.patched.notpip._internal.utils.logging import BrokenStdoutLoggingError, setup_logging -from pipenv.patched.notpip._internal.utils.misc import ( - get_prog, normalize_path, redact_password_from_url, -) -from pipenv.patched.notpip._internal.utils.outdated import pip_version_check +from pipenv.patched.notpip._internal.utils.misc import get_prog from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING +from pipenv.patched.notpip._internal.utils.virtualenv import running_under_virtualenv if MYPY_CHECK_RUNNING: - from typing import Optional, List, Tuple, Any # noqa: F401 - from optparse import Values # noqa: F401 - from pipenv.patched.notpip._internal.cache import WheelCache # noqa: F401 - from pipenv.patched.notpip._internal.req.req_set import RequirementSet # noqa: F401 + from typing import List, Tuple, Any + from optparse import Values __all__ = ['Command'] logger = logging.getLogger(__name__) -class Command(object): - name = None # type: Optional[str] - usage = None # type: Optional[str] - hidden = False # type: bool +class Command(CommandContextMixIn): + usage = None # type: str ignore_require_venv = False # type: bool - def __init__(self, isolated=False): - # type: (bool) -> None + def __init__(self, name, summary, isolated=False): + # type: (str, str, bool) -> None + super(Command, self).__init__() parser_kw = { 'usage': self.usage, - 'prog': '%s %s' % (get_prog(), self.name), + 'prog': '%s %s' % (get_prog(), name), 'formatter': UpdatingDefaultsHelpFormatter(), 'add_help_option': False, - 'name': self.name, + 'name': name, 'description': self.__doc__, 'isolated': isolated, } + self.name = name + self.summary = summary self.parser = ConfigOptionParser(**parser_kw) # Commands should add options to this option group @@ -78,53 +77,34 @@ def __init__(self, isolated=False): ) self.parser.add_option_group(gen_opts) + def handle_pip_version_check(self, options): + # type: (Values) -> None + """ + This is a no-op so that commands by default do not do the pip version + check. + """ + # Make sure we do the pip version check if the index_group options + # are present. + assert not hasattr(options, 'no_index') + def run(self, options, args): # type: (Values, List[Any]) -> Any raise NotImplementedError - def _build_session(self, options, retries=None, timeout=None): - # type: (Values, Optional[int], Optional[int]) -> PipSession - session = PipSession( - cache=( - normalize_path(os.path.join(options.cache_dir, "http")) - if options.cache_dir else None - ), - retries=retries if retries is not None else options.retries, - insecure_hosts=options.trusted_hosts, - ) - - # Handle custom ca-bundles from the user - if options.cert: - session.verify = options.cert - - # Handle SSL client certificate - if options.client_cert: - session.cert = options.client_cert - - # Handle timeouts - if options.timeout or timeout: - session.timeout = ( - timeout if timeout is not None else options.timeout - ) - - # Handle configured proxies - if options.proxy: - session.proxies = { - "http": options.proxy, - "https": options.proxy, - } - - # Determine if we can prompt the user for authentication or not - session.auth.prompting = not options.no_input - - return session - def parse_args(self, args): # type: (List[str]) -> Tuple # factored out for testability return self.parser.parse_args(args) def main(self, args): + # type: (List[str]) -> int + try: + with self.main_context(): + return self._main(args) + finally: + logging.shutdown() + + def _main(self, args): # type: (List[str]) -> int options, args = self.parse_args(args) @@ -137,17 +117,11 @@ def main(self, args): user_log_file=options.log, ) - if sys.version_info[:2] == (3, 4): - deprecated( - "Python 3.4 support has been deprecated. pip 19.1 will be the " - "last one supporting it. Please upgrade your Python as Python " - "3.4 won't be maintained after March 2019 (cf PEP 429).", - replacement=None, - gone_in='19.2', - ) - elif sys.version_info[:2] == (2, 7): + if sys.version_info[:2] == (2, 7): message = ( - "A future version of pip will drop support for Python 2.7." + "A future version of pip will drop support for Python 2.7. " + "More details about Python 2 support in pip, can be found at " + "https://pip.pypa.io/en/latest/development/release-process/#python-2-support" # noqa ) if platform.python_implementation() == "CPython": message = ( @@ -192,7 +166,7 @@ def main(self, args): return ERROR except CommandError as exc: - logger.critical('ERROR: %s', exc) + logger.critical('%s', exc) logger.debug('Exception information:', exc_info=True) return ERROR @@ -214,128 +188,6 @@ def main(self, args): return UNKNOWN_ERROR finally: - allow_version_check = ( - # Does this command have the index_group options? - hasattr(options, "no_index") and - # Is this command allowed to perform this check? - not (options.disable_pip_version_check or options.no_index) - ) - # Check if we're using the latest version of pip available - if allow_version_check: - session = self._build_session( - options, - retries=0, - timeout=min(5, options.timeout) - ) - with session: - pip_version_check(session, options) - - # Shutdown the logging module - logging.shutdown() + self.handle_pip_version_check(options) return SUCCESS - - -class RequirementCommand(Command): - - @staticmethod - def populate_requirement_set(requirement_set, # type: RequirementSet - args, # type: List[str] - options, # type: Values - finder, # type: PackageFinder - session, # type: PipSession - name, # type: str - wheel_cache # type: Optional[WheelCache] - ): - # type: (...) -> None - """ - Marshal cmd line args into a requirement set. - """ - # NOTE: As a side-effect, options.require_hashes and - # requirement_set.require_hashes may be updated - - for filename in options.constraints: - for req_to_add in parse_requirements( - filename, - constraint=True, finder=finder, options=options, - session=session, wheel_cache=wheel_cache): - req_to_add.is_direct = True - requirement_set.add_requirement(req_to_add) - - for req in args: - req_to_add = install_req_from_line( - req, None, isolated=options.isolated_mode, - use_pep517=options.use_pep517, - wheel_cache=wheel_cache - ) - req_to_add.is_direct = True - requirement_set.add_requirement(req_to_add) - - for req in options.editables: - req_to_add = install_req_from_editable( - req, - isolated=options.isolated_mode, - use_pep517=options.use_pep517, - wheel_cache=wheel_cache - ) - req_to_add.is_direct = True - requirement_set.add_requirement(req_to_add) - - for filename in options.requirements: - for req_to_add in parse_requirements( - filename, - finder=finder, options=options, session=session, - wheel_cache=wheel_cache, - use_pep517=options.use_pep517): - req_to_add.is_direct = True - requirement_set.add_requirement(req_to_add) - # If --require-hashes was a line in a requirements file, tell - # RequirementSet about it: - requirement_set.require_hashes = options.require_hashes - - if not (args or options.editables or options.requirements): - opts = {'name': name} - if options.find_links: - raise CommandError( - 'You must give at least one requirement to %(name)s ' - '(maybe you meant "pip %(name)s %(links)s"?)' % - dict(opts, links=' '.join(options.find_links))) - else: - raise CommandError( - 'You must give at least one requirement to %(name)s ' - '(see "pip help %(name)s")' % opts) - - def _build_package_finder( - self, - options, # type: Values - session, # type: PipSession - platform=None, # type: Optional[str] - python_versions=None, # type: Optional[List[str]] - abi=None, # type: Optional[str] - implementation=None # type: Optional[str] - ): - # type: (...) -> PackageFinder - """ - Create a package finder appropriate to this requirement command. - """ - index_urls = [options.index_url] + options.extra_index_urls - if options.no_index: - logger.debug( - 'Ignoring indexes: %s', - ','.join(redact_password_from_url(url) for url in index_urls), - ) - index_urls = [] - - return PackageFinder( - find_links=options.find_links, - format_control=options.format_control, - index_urls=index_urls, - trusted_hosts=options.trusted_hosts, - allow_all_prereleases=options.pre, - session=session, - platform=platform, - versions=python_versions, - abi=abi, - implementation=implementation, - prefer_binary=options.prefer_binary, - ) diff --git a/pipenv/patched/notpip/_internal/cli/cmdoptions.py b/pipenv/patched/notpip/_internal/cli/cmdoptions.py index 3c5a70848c..ba6166d93e 100644 --- a/pipenv/patched/notpip/_internal/cli/cmdoptions.py +++ b/pipenv/patched/notpip/_internal/cli/cmdoptions.py @@ -5,28 +5,37 @@ globally. One reason being that options with action='append' can carry state between parses. pip parses general options twice internally, and shouldn't pass on state. To be consistent, all options will follow this design. - """ + +# The following comment should be removed at some point in the future. +# mypy: strict-optional=False +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import +import logging import textwrap import warnings from distutils.util import strtobool from functools import partial from optparse import SUPPRESS_HELP, Option, OptionGroup +from textwrap import dedent from pipenv.patched.notpip._internal.exceptions import CommandError -from pipenv.patched.notpip._internal.locations import USER_CACHE_DIR, src_prefix +from pipenv.patched.notpip._internal.locations import USER_CACHE_DIR, get_src_prefix from pipenv.patched.notpip._internal.models.format_control import FormatControl from pipenv.patched.notpip._internal.models.index import PyPI +from pipenv.patched.notpip._internal.models.target_python import TargetPython from pipenv.patched.notpip._internal.utils.hashes import STRONG_HASHES from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING from pipenv.patched.notpip._internal.utils.ui import BAR_TYPES if MYPY_CHECK_RUNNING: - from typing import Any, Callable, Dict, List, Optional, Union # noqa: F401 - from optparse import OptionParser, Values # noqa: F401 - from pipenv.patched.notpip._internal.cli.parser import ConfigOptionParser # noqa: F401 + from typing import Any, Callable, Dict, Optional, Tuple + from optparse import OptionParser, Values + from pipenv.patched.notpip._internal.cli.parser import ConfigOptionParser + +logger = logging.getLogger(__name__) def raise_option_error(parser, option, msg): @@ -101,7 +110,7 @@ def check_dist_restriction(options, check_target=False): # Installations or downloads using dist restrictions must not combine # source distributions and dist-specific wheels, as they are not - # gauranteed to be locally compatible. + # guaranteed to be locally compatible. if dist_restriction_set and sdist_dependencies_allowed: raise CommandError( "When restricting platform and interpreter constraints using " @@ -275,7 +284,7 @@ def exists_action(): action='append', metavar='action', help="Default action when a path already exists: " - "(s)witch, (i)gnore, (w)ipe, (b)ackup, (a)bort).", + "(s)witch, (i)gnore, (w)ipe, (b)ackup, (a)bort.", ) @@ -305,7 +314,7 @@ def exists_action(): dest='index_url', metavar='URL', default=PyPI.simple_url, - help="Base URL of Python Package Index (default %default). " + help="Base URL of the Python Package Index (default %default). " "This should point to a repository compliant with PEP 503 " "(the simple repository API) or a local directory laid out " "in the same format.", @@ -357,8 +366,8 @@ def trusted_host(): action="append", metavar="HOSTNAME", default=[], - help="Mark this host as trusted, even though it does not have valid " - "or any HTTPS.", + help="Mark this host or host:port pair as trusted, even though it " + "does not have valid or any HTTPS.", ) @@ -406,7 +415,7 @@ def editable(): '--src', '--source', '--source-dir', '--source-directory', dest='src_dir', metavar='dir', - default=src_prefix, + default=get_src_prefix(), help='Directory to check out editable projects into. ' 'The default in a virtualenv is "/src". ' 'The default for global installs is "/src".' @@ -445,9 +454,9 @@ def no_binary(): help="Do not use binary packages. Can be supplied multiple times, and " "each time adds to the existing value. Accepts either :all: to " "disable all binary packages, :none: to empty the set, or one or " - "more package names with commas between them. Note that some " - "packages are tricky to compile and may fail to install when " - "this option is used on them.", + "more package names with commas between them (no colons). Note " + "that some packages are tricky to compile and may fail to " + "install when this option is used on them.", ) @@ -478,18 +487,69 @@ def only_binary(): ) # type: Callable[..., Option] +# This was made a separate function for unit-testing purposes. +def _convert_python_version(value): + # type: (str) -> Tuple[Tuple[int, ...], Optional[str]] + """ + Convert a version string like "3", "37", or "3.7.3" into a tuple of ints. + + :return: A 2-tuple (version_info, error_msg), where `error_msg` is + non-None if and only if there was a parsing error. + """ + if not value: + # The empty string is the same as not providing a value. + return (None, None) + + parts = value.split('.') + if len(parts) > 3: + return ((), 'at most three version parts are allowed') + + if len(parts) == 1: + # Then we are in the case of "3" or "37". + value = parts[0] + if len(value) > 1: + parts = [value[0], value[1:]] + + try: + version_info = tuple(int(part) for part in parts) + except ValueError: + return ((), 'each version part must be an integer') + + return (version_info, None) + + +def _handle_python_version(option, opt_str, value, parser): + # type: (Option, str, str, OptionParser) -> None + """ + Handle a provided --python-version value. + """ + version_info, error_msg = _convert_python_version(value) + if error_msg is not None: + msg = ( + 'invalid --python-version value: {!r}: {}'.format( + value, error_msg, + ) + ) + raise_option_error(parser, option=option, msg=msg) + + parser.values.python_version = version_info + + python_version = partial( Option, '--python-version', dest='python_version', metavar='python_version', + action='callback', + callback=_handle_python_version, type='str', default=None, - help=("Only use wheels compatible with Python " - "interpreter version . If not specified, then the " - "current system interpreter minor version is used. A major " - "version (e.g. '2') can be specified to match all " - "minor revs of that major version. A minor version " - "(e.g. '34') can also be specified."), + help=dedent("""\ + The Python interpreter version to use for wheel and "Requires-Python" + compatibility checks. Defaults to a version derived from the running + interpreter. The version can be specified using up to three dot-separated + integers (e.g. "3" for 3.0.0, "3.7" for 3.7.0, or "3.7.3"). A major-minor + version can also be given as a string without dots (e.g. "37" for 3.7.0). + """), ) # type: Callable[..., Option] @@ -522,6 +582,26 @@ def only_binary(): ) # type: Callable[..., Option] +def add_target_python_options(cmd_opts): + # type: (OptionGroup) -> None + cmd_opts.add_option(platform()) + cmd_opts.add_option(python_version()) + cmd_opts.add_option(implementation()) + cmd_opts.add_option(abi()) + + +def make_target_python(options): + # type: (Values) -> TargetPython + target_python = TargetPython( + platform=options.platform, + py_version_info=options.python_version, + abi=options.abi, + implementation=options.implementation, + ) + + return target_python + + def prefer_binary(): # type: () -> Option return Option( @@ -543,7 +623,8 @@ def prefer_binary(): ) # type: Callable[..., Option] -def no_cache_dir_callback(option, opt, value, parser): +def _handle_no_cache_dir(option, opt, value, parser): + # type: (Option, str, str, OptionParser) -> None """ Process a value provided for the --no-cache-dir option. @@ -575,7 +656,7 @@ def no_cache_dir_callback(option, opt, value, parser): "--no-cache-dir", dest="cache_dir", action="callback", - callback=no_cache_dir_callback, + callback=_handle_no_cache_dir, help="Disable the cache.", ) # type: Callable[..., Option] @@ -620,7 +701,8 @@ def no_cache_dir_callback(option, opt, value, parser): ) # type: Callable[..., Option] -def no_use_pep517_callback(option, opt, value, parser): +def _handle_no_use_pep517(option, opt, value, parser): + # type: (Option, str, str, OptionParser) -> None """ Process a value provided for the --no-use-pep517 option. @@ -658,7 +740,7 @@ def no_use_pep517_callback(option, opt, value, parser): '--no-use-pep517', dest='use_pep517', action='callback', - callback=no_use_pep517_callback, + callback=_handle_no_use_pep517, default=None, help=SUPPRESS_HELP ) # type: Any @@ -724,12 +806,12 @@ def no_use_pep517_callback(option, opt, value, parser): ) # type: Callable[..., Option] -def _merge_hash(option, opt_str, value, parser): +def _handle_merge_hash(option, opt_str, value, parser): # type: (Option, str, str, OptionParser) -> None """Given a value spelled "algo:digest", append the digest to a list pointed to in a dict by the algo name.""" if not parser.values.hashes: - parser.values.hashes = {} # type: ignore + parser.values.hashes = {} try: algo, digest = value.split(':', 1) except ValueError: @@ -749,7 +831,7 @@ def _merge_hash(option, opt_str, value, parser): # __dict__ copying in process_line(). dest='hashes', action='callback', - callback=_merge_hash, + callback=_handle_merge_hash, type='string', help="Verify that the package's archive matches this " 'hash before installing. Example: --hash=sha256:abcdef...', @@ -768,6 +850,24 @@ def _merge_hash(option, opt_str, value, parser): ) # type: Callable[..., Option] +list_path = partial( + Option, + '--path', + dest='path', + action='append', + help='Restrict to the specified installation path for listing ' + 'packages (can be used multiple times).' +) # type: Callable[..., Option] + + +def check_list_path_option(options): + # type: (Values) -> None + if options.path and (options.user or options.local): + raise CommandError( + "Cannot combine '--path' with '--user' or '--local'" + ) + + ########## # groups # ########## diff --git a/pipenv/patched/notpip/_internal/cli/command_context.py b/pipenv/patched/notpip/_internal/cli/command_context.py new file mode 100644 index 0000000000..d529fb6712 --- /dev/null +++ b/pipenv/patched/notpip/_internal/cli/command_context.py @@ -0,0 +1,29 @@ +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + +from contextlib import contextmanager + +from pipenv.patched.notpip._vendor.contextlib2 import ExitStack + + +class CommandContextMixIn(object): + def __init__(self): + super(CommandContextMixIn, self).__init__() + self._in_main_context = False + self._main_context = ExitStack() + + @contextmanager + def main_context(self): + assert not self._in_main_context + + self._in_main_context = True + try: + with self._main_context: + yield + finally: + self._in_main_context = False + + def enter_context(self, context_provider): + assert self._in_main_context + + return self._main_context.enter_context(context_provider) diff --git a/pipenv/patched/notpip/_internal/cli/main_parser.py b/pipenv/patched/notpip/_internal/cli/main_parser.py index 704bf40411..769a72812a 100644 --- a/pipenv/patched/notpip/_internal/cli/main_parser.py +++ b/pipenv/patched/notpip/_internal/cli/main_parser.py @@ -4,20 +4,18 @@ import os import sys -from pipenv.patched.notpip import __version__ from pipenv.patched.notpip._internal.cli import cmdoptions from pipenv.patched.notpip._internal.cli.parser import ( - ConfigOptionParser, UpdatingDefaultsHelpFormatter, -) -from pipenv.patched.notpip._internal.commands import ( - commands_dict, get_similar_commands, get_summaries, + ConfigOptionParser, + UpdatingDefaultsHelpFormatter, ) +from pipenv.patched.notpip._internal.commands import commands_dict, get_similar_commands from pipenv.patched.notpip._internal.exceptions import CommandError -from pipenv.patched.notpip._internal.utils.misc import get_prog +from pipenv.patched.notpip._internal.utils.misc import get_pip_version, get_prog from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: - from typing import Tuple, List # noqa: F401 + from typing import Tuple, List __all__ = ["create_main_parser", "parse_command"] @@ -39,12 +37,7 @@ def create_main_parser(): parser = ConfigOptionParser(**parser_kw) parser.disable_interspersed_args() - pip_pkg_dir = os.path.abspath(os.path.join( - os.path.dirname(__file__), "..", "..", - )) - parser.version = 'pip %s from %s (python %s)' % ( - __version__, pip_pkg_dir, sys.version[:3], - ) + parser.version = get_pip_version() # add the general options gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser) @@ -54,8 +47,10 @@ def create_main_parser(): parser.main = True # type: ignore # create command listing for description - command_summaries = get_summaries() - description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries] + description = [''] + [ + '%-27s %s' % (name, command_info.summary) + for name, command_info in commands_dict.items() + ] parser.description = '\n'.join(description) return parser diff --git a/pipenv/patched/notpip/_internal/cli/parser.py b/pipenv/patched/notpip/_internal/cli/parser.py index 2d2c8f4d7b..3d5abd4666 100644 --- a/pipenv/patched/notpip/_internal/cli/parser.py +++ b/pipenv/patched/notpip/_internal/cli/parser.py @@ -1,4 +1,8 @@ """Base option parser setup""" + +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import import logging diff --git a/pipenv/patched/notpip/_internal/cli/req_command.py b/pipenv/patched/notpip/_internal/cli/req_command.py new file mode 100644 index 0000000000..ff76aeb876 --- /dev/null +++ b/pipenv/patched/notpip/_internal/cli/req_command.py @@ -0,0 +1,304 @@ +"""Contains the Command base classes that depend on PipSession. + +The classes in this module are in a separate module so the commands not +needing download / PackageFinder capability don't unnecessarily import the +PackageFinder machinery and all its vendored dependencies, etc. +""" + +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + +import os +from functools import partial + +from pipenv.patched.notpip._internal.cli.base_command import Command +from pipenv.patched.notpip._internal.cli.command_context import CommandContextMixIn +from pipenv.patched.notpip._internal.exceptions import CommandError +from pipenv.patched.notpip._internal.index import PackageFinder +from pipenv.patched.notpip._internal.legacy_resolve import Resolver +from pipenv.patched.notpip._internal.models.selection_prefs import SelectionPreferences +from pipenv.patched.notpip._internal.network.session import PipSession +from pipenv.patched.notpip._internal.operations.prepare import RequirementPreparer +from pipenv.patched.notpip._internal.req.constructors import ( + install_req_from_editable, + install_req_from_line, + install_req_from_req_string, +) +from pipenv.patched.notpip._internal.req.req_file import parse_requirements +from pipenv.patched.notpip._internal.self_outdated_check import ( + make_link_collector, + pip_self_version_check, +) +from pipenv.patched.notpip._internal.utils.misc import normalize_path +from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from optparse import Values + from typing import List, Optional, Tuple + from pipenv.patched.notpip._internal.cache import WheelCache + from pipenv.patched.notpip._internal.models.target_python import TargetPython + from pipenv.patched.notpip._internal.req.req_set import RequirementSet + from pipenv.patched.notpip._internal.req.req_tracker import RequirementTracker + from pipenv.patched.notpip._internal.utils.temp_dir import TempDirectory + + +class SessionCommandMixin(CommandContextMixIn): + + """ + A class mixin for command classes needing _build_session(). + """ + def __init__(self): + super(SessionCommandMixin, self).__init__() + self._session = None # Optional[PipSession] + + @classmethod + def _get_index_urls(cls, options): + """Return a list of index urls from user-provided options.""" + index_urls = [] + if not getattr(options, "no_index", False): + url = getattr(options, "index_url", None) + if url: + index_urls.append(url) + urls = getattr(options, "extra_index_urls", None) + if urls: + index_urls.extend(urls) + # Return None rather than an empty list + return index_urls or None + + def get_default_session(self, options): + # type: (Values) -> PipSession + """Get a default-managed session.""" + if self._session is None: + self._session = self.enter_context(self._build_session(options)) + return self._session + + def _build_session(self, options, retries=None, timeout=None): + # type: (Values, Optional[int], Optional[int]) -> PipSession + session = PipSession( + cache=( + normalize_path(os.path.join(options.cache_dir, "http")) + if options.cache_dir else None + ), + retries=retries if retries is not None else options.retries, + trusted_hosts=options.trusted_hosts, + index_urls=self._get_index_urls(options), + ) + + # Handle custom ca-bundles from the user + if options.cert: + session.verify = options.cert + + # Handle SSL client certificate + if options.client_cert: + session.cert = options.client_cert + + # Handle timeouts + if options.timeout or timeout: + session.timeout = ( + timeout if timeout is not None else options.timeout + ) + + # Handle configured proxies + if options.proxy: + session.proxies = { + "http": options.proxy, + "https": options.proxy, + } + + # Determine if we can prompt the user for authentication or not + session.auth.prompting = not options.no_input + + return session + + +class IndexGroupCommand(Command, SessionCommandMixin): + + """ + Abstract base class for commands with the index_group options. + + This also corresponds to the commands that permit the pip version check. + """ + + def handle_pip_version_check(self, options): + # type: (Values) -> None + """ + Do the pip version check if not disabled. + + This overrides the default behavior of not doing the check. + """ + # Make sure the index_group options are present. + assert hasattr(options, 'no_index') + + if options.disable_pip_version_check or options.no_index: + return + + # Otherwise, check if we're using the latest version of pip available. + session = self._build_session( + options, + retries=0, + timeout=min(5, options.timeout) + ) + with session: + pip_self_version_check(session, options) + + +class RequirementCommand(IndexGroupCommand): + + @staticmethod + def make_requirement_preparer( + temp_build_dir, # type: TempDirectory + options, # type: Values + req_tracker, # type: RequirementTracker + download_dir=None, # type: str + wheel_download_dir=None, # type: str + ): + # type: (...) -> RequirementPreparer + """ + Create a RequirementPreparer instance for the given parameters. + """ + temp_build_dir_path = temp_build_dir.path + assert temp_build_dir_path is not None + return RequirementPreparer( + build_dir=temp_build_dir_path, + src_dir=options.src_dir, + download_dir=download_dir, + wheel_download_dir=wheel_download_dir, + progress_bar=options.progress_bar, + build_isolation=options.build_isolation, + req_tracker=req_tracker, + ) + + @staticmethod + def make_resolver( + preparer, # type: RequirementPreparer + session, # type: PipSession + finder, # type: PackageFinder + options, # type: Values + wheel_cache=None, # type: Optional[WheelCache] + use_user_site=False, # type: bool + ignore_installed=True, # type: bool + ignore_requires_python=False, # type: bool + force_reinstall=False, # type: bool + upgrade_strategy="to-satisfy-only", # type: str + use_pep517=None, # type: Optional[bool] + py_version_info=None # type: Optional[Tuple[int, ...]] + ): + # type: (...) -> Resolver + """ + Create a Resolver instance for the given parameters. + """ + make_install_req = partial( + install_req_from_req_string, + isolated=options.isolated_mode, + wheel_cache=wheel_cache, + use_pep517=use_pep517, + ) + return Resolver( + preparer=preparer, + session=session, + finder=finder, + make_install_req=make_install_req, + use_user_site=use_user_site, + ignore_dependencies=options.ignore_dependencies, + ignore_installed=ignore_installed, + ignore_requires_python=ignore_requires_python, + force_reinstall=force_reinstall, + upgrade_strategy=upgrade_strategy, + py_version_info=py_version_info + ) + + def populate_requirement_set( + self, + requirement_set, # type: RequirementSet + args, # type: List[str] + options, # type: Values + finder, # type: PackageFinder + session, # type: PipSession + wheel_cache, # type: Optional[WheelCache] + ): + # type: (...) -> None + """ + Marshal cmd line args into a requirement set. + """ + # NOTE: As a side-effect, options.require_hashes and + # requirement_set.require_hashes may be updated + + for filename in options.constraints: + for req_to_add in parse_requirements( + filename, + constraint=True, finder=finder, options=options, + session=session, wheel_cache=wheel_cache): + req_to_add.is_direct = True + requirement_set.add_requirement(req_to_add) + + for req in args: + req_to_add = install_req_from_line( + req, None, isolated=options.isolated_mode, + use_pep517=options.use_pep517, + wheel_cache=wheel_cache + ) + req_to_add.is_direct = True + requirement_set.add_requirement(req_to_add) + + for req in options.editables: + req_to_add = install_req_from_editable( + req, + isolated=options.isolated_mode, + use_pep517=options.use_pep517, + wheel_cache=wheel_cache + ) + req_to_add.is_direct = True + requirement_set.add_requirement(req_to_add) + + for filename in options.requirements: + for req_to_add in parse_requirements( + filename, + finder=finder, options=options, session=session, + wheel_cache=wheel_cache, + use_pep517=options.use_pep517): + req_to_add.is_direct = True + requirement_set.add_requirement(req_to_add) + # If --require-hashes was a line in a requirements file, tell + # RequirementSet about it: + requirement_set.require_hashes = options.require_hashes + + if not (args or options.editables or options.requirements): + opts = {'name': self.name} + if options.find_links: + raise CommandError( + 'You must give at least one requirement to %(name)s ' + '(maybe you meant "pip %(name)s %(links)s"?)' % + dict(opts, links=' '.join(options.find_links))) + else: + raise CommandError( + 'You must give at least one requirement to %(name)s ' + '(see "pip help %(name)s")' % opts) + + def _build_package_finder( + self, + options, # type: Values + session, # type: PipSession + target_python=None, # type: Optional[TargetPython] + ignore_requires_python=None, # type: Optional[bool] + ): + # type: (...) -> PackageFinder + """ + Create a package finder appropriate to this requirement command. + + :param ignore_requires_python: Whether to ignore incompatible + "Requires-Python" values in links. Defaults to False. + """ + link_collector = make_link_collector(session, options=options) + selection_prefs = SelectionPreferences( + allow_yanked=True, + format_control=options.format_control, + allow_all_prereleases=options.pre, + prefer_binary=options.prefer_binary, + ignore_requires_python=ignore_requires_python, + ) + + return PackageFinder.create( + link_collector=link_collector, + selection_prefs=selection_prefs, + target_python=target_python, + ) diff --git a/pipenv/patched/notpip/_internal/collector.py b/pipenv/patched/notpip/_internal/collector.py new file mode 100644 index 0000000000..1469cb7ce8 --- /dev/null +++ b/pipenv/patched/notpip/_internal/collector.py @@ -0,0 +1,548 @@ +""" +The main purpose of this module is to expose LinkCollector.collect_links(). +""" + +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + +import cgi +import itertools +import logging +import mimetypes +import os +from collections import OrderedDict + +from pipenv.patched.notpip._vendor import html5lib, requests +from pipenv.patched.notpip._vendor.distlib.compat import unescape +from pipenv.patched.notpip._vendor.requests.exceptions import HTTPError, RetryError, SSLError +from pipenv.patched.notpip._vendor.six.moves.urllib import parse as urllib_parse +from pipenv.patched.notpip._vendor.six.moves.urllib import request as urllib_request + +from pipenv.patched.notpip._internal.models.link import Link +from pipenv.patched.notpip._internal.utils.filetypes import ARCHIVE_EXTENSIONS +from pipenv.patched.notpip._internal.utils.misc import redact_auth_from_url +from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING +from pipenv.patched.notpip._internal.utils.urls import path_to_url, url_to_path +from pipenv.patched.notpip._internal.vcs import is_url, vcs + +if MYPY_CHECK_RUNNING: + from typing import ( + Callable, Dict, Iterable, List, MutableMapping, Optional, Sequence, + Tuple, Union, + ) + import xml.etree.ElementTree + + from pipenv.patched.notpip._vendor.requests import Response + + from pipenv.patched.notpip._internal.models.search_scope import SearchScope + from pipenv.patched.notpip._internal.network.session import PipSession + + HTMLElement = xml.etree.ElementTree.Element + ResponseHeaders = MutableMapping[str, str] + + +logger = logging.getLogger(__name__) + + +def _match_vcs_scheme(url): + # type: (str) -> Optional[str] + """Look for VCS schemes in the URL. + + Returns the matched VCS scheme, or None if there's no match. + """ + for scheme in vcs.schemes: + if url.lower().startswith(scheme) and url[len(scheme)] in '+:': + return scheme + return None + + +def _is_url_like_archive(url): + # type: (str) -> bool + """Return whether the URL looks like an archive. + """ + filename = Link(url).filename + for bad_ext in ARCHIVE_EXTENSIONS: + if filename.endswith(bad_ext): + return True + return False + + +class _NotHTML(Exception): + def __init__(self, content_type, request_desc): + # type: (str, str) -> None + super(_NotHTML, self).__init__(content_type, request_desc) + self.content_type = content_type + self.request_desc = request_desc + + +def _ensure_html_header(response): + # type: (Response) -> None + """Check the Content-Type header to ensure the response contains HTML. + + Raises `_NotHTML` if the content type is not text/html. + """ + content_type = response.headers.get("Content-Type", "") + if not content_type.lower().startswith("text/html"): + raise _NotHTML(content_type, response.request.method) + + +class _NotHTTP(Exception): + pass + + +def _ensure_html_response(url, session): + # type: (str, PipSession) -> None + """Send a HEAD request to the URL, and ensure the response contains HTML. + + Raises `_NotHTTP` if the URL is not available for a HEAD request, or + `_NotHTML` if the content type is not text/html. + """ + scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url) + if scheme not in {'http', 'https'}: + raise _NotHTTP() + + resp = session.head(url, allow_redirects=True) + resp.raise_for_status() + + _ensure_html_header(resp) + + +def _get_html_response(url, session): + # type: (str, PipSession) -> Response + """Access an HTML page with GET, and return the response. + + This consists of three parts: + + 1. If the URL looks suspiciously like an archive, send a HEAD first to + check the Content-Type is HTML, to avoid downloading a large file. + Raise `_NotHTTP` if the content type cannot be determined, or + `_NotHTML` if it is not HTML. + 2. Actually perform the request. Raise HTTP exceptions on network failures. + 3. Check the Content-Type header to make sure we got HTML, and raise + `_NotHTML` otherwise. + """ + if _is_url_like_archive(url): + _ensure_html_response(url, session=session) + + logger.debug('Getting page %s', redact_auth_from_url(url)) + + resp = session.get( + url, + headers={ + "Accept": "text/html", + # We don't want to blindly returned cached data for + # /simple/, because authors generally expecting that + # twine upload && pip install will function, but if + # they've done a pip install in the last ~10 minutes + # it won't. Thus by setting this to zero we will not + # blindly use any cached data, however the benefit of + # using max-age=0 instead of no-cache, is that we will + # still support conditional requests, so we will still + # minimize traffic sent in cases where the page hasn't + # changed at all, we will just always incur the round + # trip for the conditional GET now instead of only + # once per 10 minutes. + # For more information, please see pypa/pip#5670. + "Cache-Control": "max-age=0", + }, + ) + resp.raise_for_status() + + # The check for archives above only works if the url ends with + # something that looks like an archive. However that is not a + # requirement of an url. Unless we issue a HEAD request on every + # url we cannot know ahead of time for sure if something is HTML + # or not. However we can check after we've downloaded it. + _ensure_html_header(resp) + + return resp + + +def _get_encoding_from_headers(headers): + # type: (ResponseHeaders) -> Optional[str] + """Determine if we have any encoding information in our headers. + """ + if headers and "Content-Type" in headers: + content_type, params = cgi.parse_header(headers["Content-Type"]) + if "charset" in params: + return params['charset'] + return None + + +def _determine_base_url(document, page_url): + # type: (HTMLElement, str) -> str + """Determine the HTML document's base URL. + + This looks for a ```` tag in the HTML document. If present, its href + attribute denotes the base URL of anchor tags in the document. If there is + no such tag (or if it does not have a valid href attribute), the HTML + file's URL is used as the base URL. + + :param document: An HTML document representation. The current + implementation expects the result of ``html5lib.parse()``. + :param page_url: The URL of the HTML document. + """ + for base in document.findall(".//base"): + href = base.get("href") + if href is not None: + return href + return page_url + + +def _clean_link(url): + # type: (str) -> str + """Makes sure a link is fully encoded. That is, if a ' ' shows up in + the link, it will be rewritten to %20 (while not over-quoting + % or other characters).""" + # Split the URL into parts according to the general structure + # `scheme://netloc/path;parameters?query#fragment`. Note that the + # `netloc` can be empty and the URI will then refer to a local + # filesystem path. + result = urllib_parse.urlparse(url) + # In both cases below we unquote prior to quoting to make sure + # nothing is double quoted. + if result.netloc == "": + # On Windows the path part might contain a drive letter which + # should not be quoted. On Linux where drive letters do not + # exist, the colon should be quoted. We rely on urllib.request + # to do the right thing here. + path = urllib_request.pathname2url( + urllib_request.url2pathname(result.path)) + else: + # In addition to the `/` character we protect `@` so that + # revision strings in VCS URLs are properly parsed. + path = urllib_parse.quote(urllib_parse.unquote(result.path), safe="/@") + return urllib_parse.urlunparse(result._replace(path=path)) + + +def _create_link_from_element( + anchor, # type: HTMLElement + page_url, # type: str + base_url, # type: str +): + # type: (...) -> Optional[Link] + """ + Convert an anchor element in a simple repository page to a Link. + """ + href = anchor.get("href") + if not href: + return None + + url = _clean_link(urllib_parse.urljoin(base_url, href)) + pyrequire = anchor.get('data-requires-python') + pyrequire = unescape(pyrequire) if pyrequire else None + + yanked_reason = anchor.get('data-yanked') + if yanked_reason: + # This is a unicode string in Python 2 (and 3). + yanked_reason = unescape(yanked_reason) + + link = Link( + url, + comes_from=page_url, + requires_python=pyrequire, + yanked_reason=yanked_reason, + ) + + return link + + +def parse_links(page): + # type: (HTMLPage) -> Iterable[Link] + """ + Parse an HTML document, and yield its anchor elements as Link objects. + """ + document = html5lib.parse( + page.content, + transport_encoding=page.encoding, + namespaceHTMLElements=False, + ) + + url = page.url + base_url = _determine_base_url(document, url) + for anchor in document.findall(".//a"): + link = _create_link_from_element( + anchor, + page_url=url, + base_url=base_url, + ) + if link is None: + continue + yield link + + +class HTMLPage(object): + """Represents one page, along with its URL""" + + def __init__( + self, + content, # type: bytes + encoding, # type: Optional[str] + url, # type: str + ): + # type: (...) -> None + """ + :param encoding: the encoding to decode the given content. + :param url: the URL from which the HTML was downloaded. + """ + self.content = content + self.encoding = encoding + self.url = url + + def __str__(self): + return redact_auth_from_url(self.url) + + +def _handle_get_page_fail( + link, # type: Link + reason, # type: Union[str, Exception] + meth=None # type: Optional[Callable[..., None]] +): + # type: (...) -> None + if meth is None: + meth = logger.debug + meth("Could not fetch URL %s: %s - skipping", link, reason) + + +def _make_html_page(response): + # type: (Response) -> HTMLPage + encoding = _get_encoding_from_headers(response.headers) + return HTMLPage(response.content, encoding=encoding, url=response.url) + + +def _get_html_page(link, session=None): + # type: (Link, Optional[PipSession]) -> Optional[HTMLPage] + if session is None: + raise TypeError( + "_get_html_page() missing 1 required keyword argument: 'session'" + ) + + url = link.url.split('#', 1)[0] + + # Check for VCS schemes that do not support lookup as web pages. + vcs_scheme = _match_vcs_scheme(url) + if vcs_scheme: + logger.debug('Cannot look at %s URL %s', vcs_scheme, link) + return None + + # Tack index.html onto file:// URLs that point to directories + scheme, _, path, _, _, _ = urllib_parse.urlparse(url) + if (scheme == 'file' and os.path.isdir(urllib_request.url2pathname(path))): + # add trailing slash if not present so urljoin doesn't trim + # final segment + if not url.endswith('/'): + url += '/' + url = urllib_parse.urljoin(url, 'index.html') + logger.debug(' file: URL is directory, getting %s', url) + + try: + resp = _get_html_response(url, session=session) + except _NotHTTP: + logger.debug( + 'Skipping page %s because it looks like an archive, and cannot ' + 'be checked by HEAD.', link, + ) + except _NotHTML as exc: + logger.debug( + 'Skipping page %s because the %s request got Content-Type: %s', + link, exc.request_desc, exc.content_type, + ) + except HTTPError as exc: + _handle_get_page_fail(link, exc) + except RetryError as exc: + _handle_get_page_fail(link, exc) + except SSLError as exc: + reason = "There was a problem confirming the ssl certificate: " + reason += str(exc) + _handle_get_page_fail(link, reason, meth=logger.info) + except requests.ConnectionError as exc: + _handle_get_page_fail(link, "connection error: %s" % exc) + except requests.Timeout: + _handle_get_page_fail(link, "timed out") + else: + return _make_html_page(resp) + return None + + +def _remove_duplicate_links(links): + # type: (Iterable[Link]) -> List[Link] + """ + Return a list of links, with duplicates removed and ordering preserved. + """ + # We preserve the ordering when removing duplicates because we can. + return list(OrderedDict.fromkeys(links)) + + +def group_locations(locations, expand_dir=False): + # type: (Sequence[str], bool) -> Tuple[List[str], List[str]] + """ + Divide a list of locations into two groups: "files" (archives) and "urls." + + :return: A pair of lists (files, urls). + """ + files = [] + urls = [] + + # puts the url for the given file path into the appropriate list + def sort_path(path): + url = path_to_url(path) + if mimetypes.guess_type(url, strict=False)[0] == 'text/html': + urls.append(url) + else: + files.append(url) + + for url in locations: + + is_local_path = os.path.exists(url) + is_file_url = url.startswith('file:') + + if is_local_path or is_file_url: + if is_local_path: + path = url + else: + path = url_to_path(url) + if os.path.isdir(path): + if expand_dir: + path = os.path.realpath(path) + for item in os.listdir(path): + sort_path(os.path.join(path, item)) + elif is_file_url: + urls.append(url) + else: + logger.warning( + "Path '{0}' is ignored: " + "it is a directory.".format(path), + ) + elif os.path.isfile(path): + sort_path(path) + else: + logger.warning( + "Url '%s' is ignored: it is neither a file " + "nor a directory.", url, + ) + elif is_url(url): + # Only add url with clear scheme + urls.append(url) + else: + logger.warning( + "Url '%s' is ignored. It is either a non-existing " + "path or lacks a specific scheme.", url, + ) + + return files, urls + + +class CollectedLinks(object): + + """ + Encapsulates all the Link objects collected by a call to + LinkCollector.collect_links(), stored separately as-- + + (1) links from the configured file locations, + (2) links from the configured find_links, and + (3) a dict mapping HTML page url to links from that page. + """ + + def __init__( + self, + files, # type: List[Link] + find_links, # type: List[Link] + pages, # type: Dict[str, List[Link]] + ): + # type: (...) -> None + """ + :param files: Links from file locations. + :param find_links: Links from find_links. + :param pages: A dict mapping HTML page url to links from that page. + """ + self.files = files + self.find_links = find_links + self.pages = pages + + +class LinkCollector(object): + + """ + Responsible for collecting Link objects from all configured locations, + making network requests as needed. + + The class's main method is its collect_links() method. + """ + + def __init__( + self, + session, # type: PipSession + search_scope, # type: SearchScope + ): + # type: (...) -> None + self.search_scope = search_scope + self.session = session + + @property + def find_links(self): + # type: () -> List[str] + return self.search_scope.find_links + + def _get_pages(self, locations): + # type: (Iterable[Link]) -> Iterable[HTMLPage] + """ + Yields (page, page_url) from the given locations, skipping + locations that have errors. + """ + for location in locations: + page = _get_html_page(location, session=self.session) + if page is None: + continue + + yield page + + def collect_links(self, project_name): + # type: (str) -> CollectedLinks + """Find all available links for the given project name. + + :return: All the Link objects (unfiltered), as a CollectedLinks object. + """ + search_scope = self.search_scope + index_locations = search_scope.get_index_urls_locations(project_name) + index_file_loc, index_url_loc = group_locations(index_locations) + fl_file_loc, fl_url_loc = group_locations( + self.find_links, expand_dir=True, + ) + + file_links = [ + Link(url) for url in itertools.chain(index_file_loc, fl_file_loc) + ] + + # We trust every directly linked archive in find_links + find_link_links = [Link(url, '-f') for url in self.find_links] + + # We trust every url that the user has given us whether it was given + # via --index-url or --find-links. + # We want to filter out anything that does not have a secure origin. + url_locations = [ + link for link in itertools.chain( + (Link(url) for url in index_url_loc), + (Link(url) for url in fl_url_loc), + ) + if self.session.is_secure_origin(link) + ] + + url_locations = _remove_duplicate_links(url_locations) + lines = [ + '{} location(s) to search for versions of {}:'.format( + len(url_locations), project_name, + ), + ] + for link in url_locations: + lines.append('* {}'.format(link)) + logger.debug('\n'.join(lines)) + + pages_links = {} + for page in self._get_pages(url_locations): + pages_links[page.url] = list(parse_links(page)) + + return CollectedLinks( + files=file_links, + find_links=find_link_links, + pages=pages_links, + ) diff --git a/pipenv/patched/notpip/_internal/commands/__init__.py b/pipenv/patched/notpip/_internal/commands/__init__.py index a403c6f9a7..abcafa5502 100644 --- a/pipenv/patched/notpip/_internal/commands/__init__.py +++ b/pipenv/patched/notpip/_internal/commands/__init__.py @@ -1,57 +1,103 @@ """ Package containing all pip commands """ + +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import -from pipenv.patched.notpip._internal.commands.completion import CompletionCommand -from pipenv.patched.notpip._internal.commands.configuration import ConfigurationCommand -from pipenv.patched.notpip._internal.commands.download import DownloadCommand -from pipenv.patched.notpip._internal.commands.freeze import FreezeCommand -from pipenv.patched.notpip._internal.commands.hash import HashCommand -from pipenv.patched.notpip._internal.commands.help import HelpCommand -from pipenv.patched.notpip._internal.commands.list import ListCommand -from pipenv.patched.notpip._internal.commands.check import CheckCommand -from pipenv.patched.notpip._internal.commands.search import SearchCommand -from pipenv.patched.notpip._internal.commands.show import ShowCommand -from pipenv.patched.notpip._internal.commands.install import InstallCommand -from pipenv.patched.notpip._internal.commands.uninstall import UninstallCommand -from pipenv.patched.notpip._internal.commands.wheel import WheelCommand +import importlib +from collections import OrderedDict, namedtuple from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: - from typing import List, Type # noqa: F401 - from pipenv.patched.notpip._internal.cli.base_command import Command # noqa: F401 - -commands_order = [ - InstallCommand, - DownloadCommand, - UninstallCommand, - FreezeCommand, - ListCommand, - ShowCommand, - CheckCommand, - ConfigurationCommand, - SearchCommand, - WheelCommand, - HashCommand, - CompletionCommand, - HelpCommand, -] # type: List[Type[Command]] - -commands_dict = {c.name: c for c in commands_order} - - -def get_summaries(ordered=True): - """Yields sorted (command name, command summary) tuples.""" - - if ordered: - cmditems = _sort_commands(commands_dict, commands_order) - else: - cmditems = commands_dict.items() - - for name, command_class in cmditems: - yield (name, command_class.summary) + from typing import Any + from pipenv.patched.notpip._internal.cli.base_command import Command + + +CommandInfo = namedtuple('CommandInfo', 'module_path, class_name, summary') + +# The ordering matters for help display. +# Also, even though the module path starts with the same +# "pip._internal.commands" prefix in each case, we include the full path +# because it makes testing easier (specifically when modifying commands_dict +# in test setup / teardown by adding info for a FakeCommand class defined +# in a test-related module). +# Finally, we need to pass an iterable of pairs here rather than a dict +# so that the ordering won't be lost when using Python 2.7. +commands_dict = OrderedDict([ + ('install', CommandInfo( + 'pip._internal.commands.install', 'InstallCommand', + 'Install packages.', + )), + ('download', CommandInfo( + 'pip._internal.commands.download', 'DownloadCommand', + 'Download packages.', + )), + ('uninstall', CommandInfo( + 'pip._internal.commands.uninstall', 'UninstallCommand', + 'Uninstall packages.', + )), + ('freeze', CommandInfo( + 'pip._internal.commands.freeze', 'FreezeCommand', + 'Output installed packages in requirements format.', + )), + ('list', CommandInfo( + 'pip._internal.commands.list', 'ListCommand', + 'List installed packages.', + )), + ('show', CommandInfo( + 'pip._internal.commands.show', 'ShowCommand', + 'Show information about installed packages.', + )), + ('check', CommandInfo( + 'pip._internal.commands.check', 'CheckCommand', + 'Verify installed packages have compatible dependencies.', + )), + ('config', CommandInfo( + 'pip._internal.commands.configuration', 'ConfigurationCommand', + 'Manage local and global configuration.', + )), + ('search', CommandInfo( + 'pip._internal.commands.search', 'SearchCommand', + 'Search PyPI for packages.', + )), + ('wheel', CommandInfo( + 'pip._internal.commands.wheel', 'WheelCommand', + 'Build wheels from your requirements.', + )), + ('hash', CommandInfo( + 'pip._internal.commands.hash', 'HashCommand', + 'Compute hashes of package archives.', + )), + ('completion', CommandInfo( + 'pip._internal.commands.completion', 'CompletionCommand', + 'A helper command used for command completion.', + )), + ('debug', CommandInfo( + 'pip._internal.commands.debug', 'DebugCommand', + 'Show information useful for debugging.', + )), + ('help', CommandInfo( + 'pip._internal.commands.help', 'HelpCommand', + 'Show help for commands.', + )), +]) # type: OrderedDict[str, CommandInfo] + + +def create_command(name, **kwargs): + # type: (str, **Any) -> Command + """ + Create an instance of the Command class with the given name. + """ + module_path, class_name, summary = commands_dict[name] + module = importlib.import_module(module_path) + command_class = getattr(module, class_name) + command = command_class(name=name, summary=summary, **kwargs) + + return command def get_similar_commands(name): @@ -66,14 +112,3 @@ def get_similar_commands(name): return close_commands[0] else: return False - - -def _sort_commands(cmddict, order): - def keyfn(key): - try: - return order.index(key[1]) - except ValueError: - # unordered items should come last - return 0xff - - return sorted(cmddict.items(), key=keyfn) diff --git a/pipenv/patched/notpip/_internal/commands/check.py b/pipenv/patched/notpip/_internal/commands/check.py index cf84f5dffd..8e1db5599c 100644 --- a/pipenv/patched/notpip/_internal/commands/check.py +++ b/pipenv/patched/notpip/_internal/commands/check.py @@ -1,19 +1,23 @@ +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + import logging from pipenv.patched.notpip._internal.cli.base_command import Command from pipenv.patched.notpip._internal.operations.check import ( - check_package_set, create_package_set_from_installed, + check_package_set, + create_package_set_from_installed, ) +from pipenv.patched.notpip._internal.utils.misc import write_output logger = logging.getLogger(__name__) class CheckCommand(Command): """Verify installed packages have compatible dependencies.""" - name = 'check' + usage = """ %prog [options]""" - summary = 'Verify installed packages have compatible dependencies.' def run(self, options, args): package_set, parsing_probs = create_package_set_from_installed() @@ -22,7 +26,7 @@ def run(self, options, args): for project_name in missing: version = package_set[project_name].version for dependency in missing[project_name]: - logger.info( + write_output( "%s %s requires %s, which is not installed.", project_name, version, dependency[0], ) @@ -30,7 +34,7 @@ def run(self, options, args): for project_name in conflicting: version = package_set[project_name].version for dep_name, dep_version, req in conflicting[project_name]: - logger.info( + write_output( "%s %s has requirement %s, but you have %s %s.", project_name, version, req, dep_name, dep_version, ) @@ -38,4 +42,4 @@ def run(self, options, args): if missing or conflicting or parsing_probs: return 1 else: - logger.info("No broken requirements found.") + write_output("No broken requirements found.") diff --git a/pipenv/patched/notpip/_internal/commands/completion.py b/pipenv/patched/notpip/_internal/commands/completion.py index cb8a11a7ca..4cccfbf2d6 100644 --- a/pipenv/patched/notpip/_internal/commands/completion.py +++ b/pipenv/patched/notpip/_internal/commands/completion.py @@ -1,3 +1,6 @@ +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import import sys @@ -16,7 +19,7 @@ { COMPREPLY=( $( COMP_WORDS="${COMP_WORDS[*]}" \\ COMP_CWORD=$COMP_CWORD \\ - PIP_AUTO_COMPLETE=1 $1 ) ) + PIP_AUTO_COMPLETE=1 $1 2>/dev/null ) ) } complete -o default -F _pip_completion %(prog)s """, @@ -27,7 +30,7 @@ read -cn cword reply=( $( COMP_WORDS="$words[*]" \\ COMP_CWORD=$(( cword-1 )) \\ - PIP_AUTO_COMPLETE=1 $words[1] ) ) + PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null )) } compctl -K _pip_completion %(prog)s """, @@ -47,8 +50,7 @@ class CompletionCommand(Command): """A helper command to be used for command completion.""" - name = 'completion' - summary = 'A helper command used for command completion.' + ignore_require_venv = True def __init__(self, *args, **kw): diff --git a/pipenv/patched/notpip/_internal/commands/configuration.py b/pipenv/patched/notpip/_internal/commands/configuration.py index 6c1dbdfdbd..1573fb187e 100644 --- a/pipenv/patched/notpip/_internal/commands/configuration.py +++ b/pipenv/patched/notpip/_internal/commands/configuration.py @@ -1,13 +1,19 @@ +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + import logging import os import subprocess from pipenv.patched.notpip._internal.cli.base_command import Command from pipenv.patched.notpip._internal.cli.status_codes import ERROR, SUCCESS -from pipenv.patched.notpip._internal.configuration import Configuration, kinds +from pipenv.patched.notpip._internal.configuration import ( + Configuration, + get_configuration_files, + kinds, +) from pipenv.patched.notpip._internal.exceptions import PipError -from pipenv.patched.notpip._internal.locations import venv_config_file -from pipenv.patched.notpip._internal.utils.misc import get_prog +from pipenv.patched.notpip._internal.utils.misc import get_prog, write_output logger = logging.getLogger(__name__) @@ -23,13 +29,13 @@ class ConfigurationCommand(Command): set: Set the name=value unset: Unset the value associated with name - If none of --user, --global and --venv are passed, a virtual + If none of --user, --global and --site are passed, a virtual environment configuration file is used if one is active and the file exists. Otherwise, all modifications happen on the to the user file by default. """ - name = 'config' + ignore_require_venv = True usage = """ %prog [] list %prog [] [--editor ] edit @@ -39,8 +45,6 @@ class ConfigurationCommand(Command): %prog [] unset name """ - summary = "Manage local and global configuration." - def __init__(self, *args, **kwargs): super(ConfigurationCommand, self).__init__(*args, **kwargs) @@ -74,11 +78,11 @@ def __init__(self, *args, **kwargs): ) self.cmd_opts.add_option( - '--venv', - dest='venv_file', + '--site', + dest='site_file', action='store_true', default=False, - help='Use the virtualenv configuration file only' + help='Use the current environment configuration file only' ) self.parser.insert_option_group(0, self.cmd_opts) @@ -127,40 +131,42 @@ def run(self, options, args): return SUCCESS def _determine_file(self, options, need_value): - file_options = { - kinds.USER: options.user_file, - kinds.GLOBAL: options.global_file, - kinds.VENV: options.venv_file - } + file_options = [key for key, value in ( + (kinds.USER, options.user_file), + (kinds.GLOBAL, options.global_file), + (kinds.SITE, options.site_file), + ) if value] - if sum(file_options.values()) == 0: + if not file_options: if not need_value: return None - # Default to user, unless there's a virtualenv file. - elif os.path.exists(venv_config_file): - return kinds.VENV + # Default to user, unless there's a site file. + elif any( + os.path.exists(site_config_file) + for site_config_file in get_configuration_files()[kinds.SITE] + ): + return kinds.SITE else: return kinds.USER - elif sum(file_options.values()) == 1: - # There's probably a better expression for this. - return [key for key in file_options if file_options[key]][0] + elif len(file_options) == 1: + return file_options[0] raise PipError( "Need exactly one file to operate upon " - "(--user, --venv, --global) to perform." + "(--user, --site, --global) to perform." ) def list_values(self, options, args): self._get_n_args(args, "list", n=0) for key, value in sorted(self.configuration.items()): - logger.info("%s=%r", key, value) + write_output("%s=%r", key, value) def get_name(self, options, args): key = self._get_n_args(args, "get [name]", n=1) value = self.configuration.get_value(key) - logger.info("%s", value) + write_output("%s", value) def set_name_value(self, options, args): key, value = self._get_n_args(args, "set [name] [value]", n=2) diff --git a/pipenv/patched/notpip/_internal/commands/debug.py b/pipenv/patched/notpip/_internal/commands/debug.py new file mode 100644 index 0000000000..f83e757344 --- /dev/null +++ b/pipenv/patched/notpip/_internal/commands/debug.py @@ -0,0 +1,115 @@ +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + +from __future__ import absolute_import + +import locale +import logging +import sys + +from pipenv.patched.notpip._internal.cli import cmdoptions +from pipenv.patched.notpip._internal.cli.base_command import Command +from pipenv.patched.notpip._internal.cli.cmdoptions import make_target_python +from pipenv.patched.notpip._internal.cli.status_codes import SUCCESS +from pipenv.patched.notpip._internal.utils.logging import indent_log +from pipenv.patched.notpip._internal.utils.misc import get_pip_version +from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING +from pipenv.patched.notpip._internal.wheel import format_tag + +if MYPY_CHECK_RUNNING: + from typing import Any, List + from optparse import Values + +logger = logging.getLogger(__name__) + + +def show_value(name, value): + # type: (str, str) -> None + logger.info('{}: {}'.format(name, value)) + + +def show_sys_implementation(): + # type: () -> None + logger.info('sys.implementation:') + if hasattr(sys, 'implementation'): + implementation = sys.implementation # type: ignore + implementation_name = implementation.name + else: + implementation_name = '' + + with indent_log(): + show_value('name', implementation_name) + + +def show_tags(options): + # type: (Values) -> None + tag_limit = 10 + + target_python = make_target_python(options) + tags = target_python.get_tags() + + # Display the target options that were explicitly provided. + formatted_target = target_python.format_given() + suffix = '' + if formatted_target: + suffix = ' (target: {})'.format(formatted_target) + + msg = 'Compatible tags: {}{}'.format(len(tags), suffix) + logger.info(msg) + + if options.verbose < 1 and len(tags) > tag_limit: + tags_limited = True + tags = tags[:tag_limit] + else: + tags_limited = False + + with indent_log(): + for tag in tags: + logger.info(format_tag(tag)) + + if tags_limited: + msg = ( + '...\n' + '[First {tag_limit} tags shown. Pass --verbose to show all.]' + ).format(tag_limit=tag_limit) + logger.info(msg) + + +class DebugCommand(Command): + """ + Display debug information. + """ + + usage = """ + %prog """ + ignore_require_venv = True + + def __init__(self, *args, **kw): + super(DebugCommand, self).__init__(*args, **kw) + + cmd_opts = self.cmd_opts + cmdoptions.add_target_python_options(cmd_opts) + self.parser.insert_option_group(0, cmd_opts) + + def run(self, options, args): + # type: (Values, List[Any]) -> int + logger.warning( + "This command is only meant for debugging. " + "Do not use this with automation for parsing and getting these " + "details, since the output and options of this command may " + "change without notice." + ) + show_value('pip version', get_pip_version()) + show_value('sys.version', sys.version) + show_value('sys.executable', sys.executable) + show_value('sys.getdefaultencoding', sys.getdefaultencoding()) + show_value('sys.getfilesystemencoding', sys.getfilesystemencoding()) + show_value( + 'locale.getpreferredencoding', locale.getpreferredencoding(), + ) + show_value('sys.platform', sys.platform) + show_sys_implementation() + + show_tags(options) + + return SUCCESS diff --git a/pipenv/patched/notpip/_internal/commands/download.py b/pipenv/patched/notpip/_internal/commands/download.py index 133ca135ee..a56f0983cd 100644 --- a/pipenv/patched/notpip/_internal/commands/download.py +++ b/pipenv/patched/notpip/_internal/commands/download.py @@ -1,16 +1,18 @@ +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import import logging import os from pipenv.patched.notpip._internal.cli import cmdoptions -from pipenv.patched.notpip._internal.cli.base_command import RequirementCommand -from pipenv.patched.notpip._internal.operations.prepare import RequirementPreparer +from pipenv.patched.notpip._internal.cli.cmdoptions import make_target_python +from pipenv.patched.notpip._internal.cli.req_command import RequirementCommand from pipenv.patched.notpip._internal.req import RequirementSet from pipenv.patched.notpip._internal.req.req_tracker import RequirementTracker -from pipenv.patched.notpip._internal.resolve import Resolver from pipenv.patched.notpip._internal.utils.filesystem import check_path_owner -from pipenv.patched.notpip._internal.utils.misc import ensure_dir, normalize_path +from pipenv.patched.notpip._internal.utils.misc import ensure_dir, normalize_path, write_output from pipenv.patched.notpip._internal.utils.temp_dir import TempDirectory logger = logging.getLogger(__name__) @@ -28,7 +30,6 @@ class DownloadCommand(RequirementCommand): pip also supports downloading from "requirements files", which provide an easy way to specify a whole environment to be downloaded. """ - name = 'download' usage = """ %prog [options] [package-index-options] ... @@ -37,8 +38,6 @@ class DownloadCommand(RequirementCommand): %prog [options] ... %prog [options] ...""" - summary = 'Download packages.' - def __init__(self, *args, **kw): super(DownloadCommand, self).__init__(*args, **kw) @@ -69,10 +68,7 @@ def __init__(self, *args, **kw): help=("Download packages into ."), ) - cmd_opts.add_option(cmdoptions.platform()) - cmd_opts.add_option(cmdoptions.python_version()) - cmd_opts.add_option(cmdoptions.implementation()) - cmd_opts.add_option(cmdoptions.abi()) + cmdoptions.add_target_python_options(cmd_opts) index_opts = cmdoptions.make_option_group( cmdoptions.index_group, @@ -88,11 +84,6 @@ def run(self, options, args): # of the RequirementSet code require that property. options.editables = [] - if options.python_version: - python_versions = [options.python_version] - else: - python_versions = None - cmdoptions.check_dist_restriction(options) options.src_dir = os.path.abspath(options.src_dir) @@ -100,77 +91,66 @@ def run(self, options, args): ensure_dir(options.download_dir) - with self._build_session(options) as session: - finder = self._build_package_finder( + session = self.get_default_session(options) + + target_python = make_target_python(options) + finder = self._build_package_finder( + options=options, + session=session, + target_python=target_python, + ) + build_delete = (not (options.no_clean or options.build_dir)) + if options.cache_dir and not check_path_owner(options.cache_dir): + logger.warning( + "The directory '%s' or its parent directory is not owned " + "by the current user and caching wheels has been " + "disabled. check the permissions and owner of that " + "directory. If executing pip with sudo, you may want " + "sudo's -H flag.", + options.cache_dir, + ) + options.cache_dir = None + + with RequirementTracker() as req_tracker, TempDirectory( + options.build_dir, delete=build_delete, kind="download" + ) as directory: + + requirement_set = RequirementSet( + require_hashes=options.require_hashes, + ) + self.populate_requirement_set( + requirement_set, + args, + options, + finder, + session, + None + ) + + preparer = self.make_requirement_preparer( + temp_build_dir=directory, options=options, + req_tracker=req_tracker, + download_dir=options.download_dir, + ) + + resolver = self.make_resolver( + preparer=preparer, + finder=finder, session=session, - platform=options.platform, - python_versions=python_versions, - abi=options.abi, - implementation=options.implementation, + options=options, + py_version_info=options.python_version, ) - build_delete = (not (options.no_clean or options.build_dir)) - if options.cache_dir and not check_path_owner(options.cache_dir): - logger.warning( - "The directory '%s' or its parent directory is not owned " - "by the current user and caching wheels has been " - "disabled. check the permissions and owner of that " - "directory. If executing pip with sudo, you may want " - "sudo's -H flag.", - options.cache_dir, - ) - options.cache_dir = None - - with RequirementTracker() as req_tracker, TempDirectory( - options.build_dir, delete=build_delete, kind="download" - ) as directory: - - requirement_set = RequirementSet( - require_hashes=options.require_hashes, - ) - self.populate_requirement_set( - requirement_set, - args, - options, - finder, - session, - self.name, - None - ) - - preparer = RequirementPreparer( - build_dir=directory.path, - src_dir=options.src_dir, - download_dir=options.download_dir, - wheel_download_dir=None, - progress_bar=options.progress_bar, - build_isolation=options.build_isolation, - req_tracker=req_tracker, - ) - - resolver = Resolver( - preparer=preparer, - finder=finder, - session=session, - wheel_cache=None, - use_user_site=False, - upgrade_strategy="to-satisfy-only", - force_reinstall=False, - ignore_dependencies=options.ignore_dependencies, - ignore_requires_python=False, - ignore_installed=True, - isolated=options.isolated_mode, - ) - resolver.resolve(requirement_set) - - downloaded = ' '.join([ - req.name for req in requirement_set.successfully_downloaded - ]) - if downloaded: - logger.info('Successfully downloaded %s', downloaded) - - # Clean up - if not options.no_clean: - requirement_set.cleanup_files() + resolver.resolve(requirement_set) + + downloaded = ' '.join([ + req.name for req in requirement_set.successfully_downloaded + ]) + if downloaded: + write_output('Successfully downloaded %s', downloaded) + + # Clean up + if not options.no_clean: + requirement_set.cleanup_files() return requirement_set diff --git a/pipenv/patched/notpip/_internal/commands/freeze.py b/pipenv/patched/notpip/_internal/commands/freeze.py index 343227bab7..0c40522e42 100644 --- a/pipenv/patched/notpip/_internal/commands/freeze.py +++ b/pipenv/patched/notpip/_internal/commands/freeze.py @@ -1,8 +1,12 @@ +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import import sys from pipenv.patched.notpip._internal.cache import WheelCache +from pipenv.patched.notpip._internal.cli import cmdoptions from pipenv.patched.notpip._internal.cli.base_command import Command from pipenv.patched.notpip._internal.models.format_control import FormatControl from pipenv.patched.notpip._internal.operations.freeze import freeze @@ -17,10 +21,9 @@ class FreezeCommand(Command): packages are listed in a case-insensitive sorted order. """ - name = 'freeze' + usage = """ %prog [options]""" - summary = 'Output installed packages in requirements format.' log_streams = ("ext://sys.stderr", "ext://sys.stderr") def __init__(self, *args, **kw): @@ -56,6 +59,7 @@ def __init__(self, *args, **kw): action='store_true', default=False, help='Only output packages installed in user-site.') + self.cmd_opts.add_option(cmdoptions.list_path()) self.cmd_opts.add_option( '--all', dest='freeze_all', @@ -77,11 +81,14 @@ def run(self, options, args): if not options.freeze_all: skip.update(DEV_PKGS) + cmdoptions.check_list_path_option(options) + freeze_kwargs = dict( requirement=options.requirements, find_links=options.find_links, local_only=options.local, user_only=options.user, + paths=options.path, skip_regex=options.skip_requirements_regex, isolated=options.isolated_mode, wheel_cache=wheel_cache, diff --git a/pipenv/patched/notpip/_internal/commands/hash.py b/pipenv/patched/notpip/_internal/commands/hash.py index 183f11aef3..eb8ce6e3dc 100644 --- a/pipenv/patched/notpip/_internal/commands/hash.py +++ b/pipenv/patched/notpip/_internal/commands/hash.py @@ -1,3 +1,6 @@ +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import import hashlib @@ -7,7 +10,7 @@ from pipenv.patched.notpip._internal.cli.base_command import Command from pipenv.patched.notpip._internal.cli.status_codes import ERROR from pipenv.patched.notpip._internal.utils.hashes import FAVORITE_HASH, STRONG_HASHES -from pipenv.patched.notpip._internal.utils.misc import read_chunks +from pipenv.patched.notpip._internal.utils.misc import read_chunks, write_output logger = logging.getLogger(__name__) @@ -18,11 +21,9 @@ class HashCommand(Command): These can be used with --hash in a requirements file to do repeatable installs. - """ - name = 'hash' + usage = '%prog [options] ...' - summary = 'Compute hashes of package archives.' ignore_require_venv = True def __init__(self, *args, **kw): @@ -44,8 +45,8 @@ def run(self, options, args): algorithm = options.algorithm for path in args: - logger.info('%s:\n--hash=%s:%s', - path, algorithm, _hash_of_file(path, algorithm)) + write_output('%s:\n--hash=%s:%s', + path, algorithm, _hash_of_file(path, algorithm)) def _hash_of_file(path, algorithm): diff --git a/pipenv/patched/notpip/_internal/commands/help.py b/pipenv/patched/notpip/_internal/commands/help.py index f2c619659b..73fd016b3c 100644 --- a/pipenv/patched/notpip/_internal/commands/help.py +++ b/pipenv/patched/notpip/_internal/commands/help.py @@ -1,3 +1,6 @@ +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import from pipenv.patched.notpip._internal.cli.base_command import Command @@ -7,14 +10,15 @@ class HelpCommand(Command): """Show help for commands""" - name = 'help' + usage = """ %prog """ - summary = 'Show help for commands.' ignore_require_venv = True def run(self, options, args): - from pipenv.patched.notpip._internal.commands import commands_dict, get_similar_commands + from pipenv.patched.notpip._internal.commands import ( + commands_dict, create_command, get_similar_commands, + ) try: # 'pip help' with no args is handled by pip.__init__.parseopt() @@ -31,7 +35,7 @@ def run(self, options, args): raise CommandError(' - '.join(msg)) - command = commands_dict[cmd_name]() + command = create_command(cmd_name) command.parser.print_help() return SUCCESS diff --git a/pipenv/patched/notpip/_internal/commands/install.py b/pipenv/patched/notpip/_internal/commands/install.py index 68255c8555..76ae4d6d83 100644 --- a/pipenv/patched/notpip/_internal/commands/install.py +++ b/pipenv/patched/notpip/_internal/commands/install.py @@ -1,3 +1,10 @@ +# The following comment should be removed at some point in the future. +# It's included for now because without it InstallCommand.run() has a +# couple errors where we have to know req.name is str rather than +# Optional[str] for the InstallRequirement req. +# mypy: strict-optional=False +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import import errno @@ -8,31 +15,101 @@ from optparse import SUPPRESS_HELP from pipenv.patched.notpip._vendor import pkg_resources +from pipenv.patched.notpip._vendor.packaging.utils import canonicalize_name from pipenv.patched.notpip._internal.cache import WheelCache from pipenv.patched.notpip._internal.cli import cmdoptions -from pipenv.patched.notpip._internal.cli.base_command import RequirementCommand -from pipenv.patched.notpip._internal.cli.status_codes import ERROR +from pipenv.patched.notpip._internal.cli.cmdoptions import make_target_python +from pipenv.patched.notpip._internal.cli.req_command import RequirementCommand +from pipenv.patched.notpip._internal.cli.status_codes import ERROR, SUCCESS from pipenv.patched.notpip._internal.exceptions import ( - CommandError, InstallationError, PreviousBuildDirError, + CommandError, + InstallationError, + PreviousBuildDirError, ) -from pipenv.patched.notpip._internal.locations import distutils_scheme, virtualenv_no_global +from pipenv.patched.notpip._internal.locations import distutils_scheme from pipenv.patched.notpip._internal.operations.check import check_install_conflicts -from pipenv.patched.notpip._internal.operations.prepare import RequirementPreparer from pipenv.patched.notpip._internal.req import RequirementSet, install_given_reqs from pipenv.patched.notpip._internal.req.req_tracker import RequirementTracker -from pipenv.patched.notpip._internal.resolve import Resolver from pipenv.patched.notpip._internal.utils.filesystem import check_path_owner from pipenv.patched.notpip._internal.utils.misc import ( - ensure_dir, get_installed_version, + ensure_dir, + get_installed_version, protect_pip_from_modification_on_windows, + write_output, ) from pipenv.patched.notpip._internal.utils.temp_dir import TempDirectory +from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING +from pipenv.patched.notpip._internal.utils.virtualenv import virtualenv_no_global from pipenv.patched.notpip._internal.wheel import WheelBuilder +if MYPY_CHECK_RUNNING: + from optparse import Values + from typing import Any, List, Optional + + from pipenv.patched.notpip._internal.models.format_control import FormatControl + from pipenv.patched.notpip._internal.req.req_install import InstallRequirement + from pipenv.patched.notpip._internal.wheel import BinaryAllowedPredicate + + logger = logging.getLogger(__name__) +def is_wheel_installed(): + """ + Return whether the wheel package is installed. + """ + try: + import wheel # noqa: F401 + except ImportError: + return False + + return True + + +def build_wheels( + builder, # type: WheelBuilder + pep517_requirements, # type: List[InstallRequirement] + legacy_requirements, # type: List[InstallRequirement] +): + # type: (...) -> List[InstallRequirement] + """ + Build wheels for requirements, depending on whether wheel is installed. + """ + # We don't build wheels for legacy requirements if wheel is not installed. + should_build_legacy = is_wheel_installed() + + # Always build PEP 517 requirements + build_failures = builder.build( + pep517_requirements, + should_unpack=True, + ) + + if should_build_legacy: + # We don't care about failures building legacy + # requirements, as we'll fall through to a direct + # install for those. + builder.build( + legacy_requirements, + should_unpack=True, + ) + + return build_failures + + +def get_check_binary_allowed(format_control): + # type: (FormatControl) -> BinaryAllowedPredicate + def check_binary_allowed(req): + # type: (InstallRequirement) -> bool + if req.use_pep517: + return True + canonical_name = canonicalize_name(req.name) + allowed_formats = format_control.get_allowed_formats(canonical_name) + return "binary" in allowed_formats + + return check_binary_allowed + + class InstallCommand(RequirementCommand): """ Install packages from: @@ -45,7 +122,6 @@ class InstallCommand(RequirementCommand): pip also supports installing from "requirements files", which provide an easy way to specify a whole environment to be installed. """ - name = 'install' usage = """ %prog [options] [package-index-options] ... @@ -54,8 +130,6 @@ class InstallCommand(RequirementCommand): %prog [options] [-e] ... %prog [options] ...""" - summary = 'Install packages.' - def __init__(self, *args, **kw): super(InstallCommand, self).__init__(*args, **kw) @@ -77,10 +151,7 @@ def __init__(self, *args, **kw): '. Use --upgrade to replace existing packages in ' 'with new versions.' ) - cmd_opts.add_option(cmdoptions.platform()) - cmd_opts.add_option(cmdoptions.python_version()) - cmd_opts.add_option(cmdoptions.implementation()) - cmd_opts.add_option(cmdoptions.abi()) + cmdoptions.add_target_python_options(cmd_opts) cmd_opts.add_option( '--user', @@ -148,7 +219,11 @@ def __init__(self, *args, **kw): '-I', '--ignore-installed', dest='ignore_installed', action='store_true', - help='Ignore the installed packages (reinstalling instead).') + help='Ignore the installed packages, overwriting them. ' + 'This can break your system if the existing package ' + 'is of a different version or was installed ' + 'with a different package manager!' + ) cmd_opts.add_option(cmdoptions.ignore_requires_python()) cmd_opts.add_option(cmdoptions.no_build_isolation()) @@ -204,6 +279,7 @@ def __init__(self, *args, **kw): self.parser.insert_option_group(0, cmd_opts) def run(self, options, args): + # type: (Values, List[Any]) -> int cmdoptions.check_install_build_global(options) upgrade_strategy = "to-satisfy-only" if options.upgrade: @@ -214,11 +290,6 @@ def run(self, options, args): cmdoptions.check_dist_restriction(options, check_target=True) - if options.python_version: - python_versions = [options.python_version] - else: - python_versions = None - options.src_dir = os.path.abspath(options.src_dir) install_options = options.install_options or [] if options.use_user_site: @@ -235,7 +306,8 @@ def run(self, options, args): install_options.append('--user') install_options.append('--prefix=') - target_temp_dir = TempDirectory(kind="target") + target_temp_dir = None # type: Optional[TempDirectory] + target_temp_dir_path = None # type: Optional[str] if options.target_dir: options.ignore_installed = True options.target_dir = os.path.abspath(options.target_dir) @@ -247,200 +319,193 @@ def run(self, options, args): ) # Create a target directory for using with the target option - target_temp_dir.create() - install_options.append('--home=' + target_temp_dir.path) + target_temp_dir = TempDirectory(kind="target") + target_temp_dir_path = target_temp_dir.path + install_options.append('--home=' + target_temp_dir_path) global_options = options.global_options or [] - with self._build_session(options) as session: - finder = self._build_package_finder( - options=options, - session=session, - platform=options.platform, - python_versions=python_versions, - abi=options.abi, - implementation=options.implementation, + session = self.get_default_session(options) + + target_python = make_target_python(options) + finder = self._build_package_finder( + options=options, + session=session, + target_python=target_python, + ignore_requires_python=options.ignore_requires_python, + ) + build_delete = (not (options.no_clean or options.build_dir)) + wheel_cache = WheelCache(options.cache_dir, options.format_control) + + if options.cache_dir and not check_path_owner(options.cache_dir): + logger.warning( + "The directory '%s' or its parent directory is not owned " + "by the current user and caching wheels has been " + "disabled. check the permissions and owner of that " + "directory. If executing pip with sudo, you may want " + "sudo's -H flag.", + options.cache_dir, + ) + options.cache_dir = None + + with RequirementTracker() as req_tracker, TempDirectory( + options.build_dir, delete=build_delete, kind="install" + ) as directory: + requirement_set = RequirementSet( + require_hashes=options.require_hashes, + check_supported_wheels=not options.target_dir, ) - build_delete = (not (options.no_clean or options.build_dir)) - wheel_cache = WheelCache(options.cache_dir, options.format_control) - - if options.cache_dir and not check_path_owner(options.cache_dir): - logger.warning( - "The directory '%s' or its parent directory is not owned " - "by the current user and caching wheels has been " - "disabled. check the permissions and owner of that " - "directory. If executing pip with sudo, you may want " - "sudo's -H flag.", - options.cache_dir, + + try: + self.populate_requirement_set( + requirement_set, args, options, finder, session, + wheel_cache + ) + preparer = self.make_requirement_preparer( + temp_build_dir=directory, + options=options, + req_tracker=req_tracker, ) - options.cache_dir = None - - with RequirementTracker() as req_tracker, TempDirectory( - options.build_dir, delete=build_delete, kind="install" - ) as directory: - requirement_set = RequirementSet( - require_hashes=options.require_hashes, - check_supported_wheels=not options.target_dir, + resolver = self.make_resolver( + preparer=preparer, + finder=finder, + session=session, + options=options, + wheel_cache=wheel_cache, + use_user_site=options.use_user_site, + ignore_installed=options.ignore_installed, + ignore_requires_python=options.ignore_requires_python, + force_reinstall=options.force_reinstall, + upgrade_strategy=upgrade_strategy, + use_pep517=options.use_pep517, ) + resolver.resolve(requirement_set) try: - self.populate_requirement_set( - requirement_set, args, options, finder, session, - self.name, wheel_cache - ) - preparer = RequirementPreparer( - build_dir=directory.path, - src_dir=options.src_dir, - download_dir=None, - wheel_download_dir=None, - progress_bar=options.progress_bar, - build_isolation=options.build_isolation, - req_tracker=req_tracker, - ) - - resolver = Resolver( - preparer=preparer, - finder=finder, - session=session, - wheel_cache=wheel_cache, - use_user_site=options.use_user_site, - upgrade_strategy=upgrade_strategy, - force_reinstall=options.force_reinstall, - ignore_dependencies=options.ignore_dependencies, - ignore_requires_python=options.ignore_requires_python, - ignore_installed=options.ignore_installed, - isolated=options.isolated_mode, - use_pep517=options.use_pep517 - ) - resolver.resolve(requirement_set) - - protect_pip_from_modification_on_windows( - modifying_pip=requirement_set.has_requirement("pip") - ) + pip_req = requirement_set.get_requirement("pip") + except KeyError: + modifying_pip = None + else: + # If we're not replacing an already installed pip, + # we're not modifying it. + modifying_pip = getattr(pip_req, "satisfied_by", None) is None + protect_pip_from_modification_on_windows( + modifying_pip=modifying_pip + ) - # Consider legacy and PEP517-using requirements separately - legacy_requirements = [] - pep517_requirements = [] - for req in requirement_set.requirements.values(): - if req.use_pep517: - pep517_requirements.append(req) - else: - legacy_requirements.append(req) + check_binary_allowed = get_check_binary_allowed( + finder.format_control + ) + # Consider legacy and PEP517-using requirements separately + legacy_requirements = [] + pep517_requirements = [] + for req in requirement_set.requirements.values(): + if req.use_pep517: + pep517_requirements.append(req) + else: + legacy_requirements.append(req) + + wheel_builder = WheelBuilder( + preparer, wheel_cache, + build_options=[], global_options=[], + check_binary_allowed=check_binary_allowed, + ) - # We don't build wheels for legacy requirements if we - # don't have wheel installed or we don't have a cache dir - try: - import wheel # noqa: F401 - build_legacy = bool(options.cache_dir) - except ImportError: - build_legacy = False - - wb = WheelBuilder( - finder, preparer, wheel_cache, - build_options=[], global_options=[], - ) + build_failures = build_wheels( + builder=wheel_builder, + pep517_requirements=pep517_requirements, + legacy_requirements=legacy_requirements, + ) - # Always build PEP 517 requirements - build_failures = wb.build( - pep517_requirements, - session=session, autobuilding=True - ) + # If we're using PEP 517, we cannot do a direct install + # so we fail here. + if build_failures: + raise InstallationError( + "Could not build wheels for {} which use" + " PEP 517 and cannot be installed directly".format( + ", ".join(r.name for r in build_failures))) - if build_legacy: - # We don't care about failures building legacy - # requirements, as we'll fall through to a direct - # install for those. - wb.build( - legacy_requirements, - session=session, autobuilding=True - ) + to_install = resolver.get_installation_order( + requirement_set + ) - # If we're using PEP 517, we cannot do a direct install - # so we fail here. - if build_failures: - raise InstallationError( - "Could not build wheels for {} which use" - " PEP 517 and cannot be installed directly".format( - ", ".join(r.name for r in build_failures))) + # Consistency Checking of the package set we're installing. + should_warn_about_conflicts = ( + not options.ignore_dependencies and + options.warn_about_conflicts + ) + if should_warn_about_conflicts: + self._warn_about_conflicts(to_install) + + # Don't warn about script install locations if + # --target has been specified + warn_script_location = options.warn_script_location + if options.target_dir: + warn_script_location = False + + installed = install_given_reqs( + to_install, + install_options, + global_options, + root=options.root_path, + home=target_temp_dir_path, + prefix=options.prefix_path, + pycompile=options.compile, + warn_script_location=warn_script_location, + use_user_site=options.use_user_site, + ) - to_install = resolver.get_installation_order( - requirement_set - ) + lib_locations = get_lib_location_guesses( + user=options.use_user_site, + home=target_temp_dir_path, + root=options.root_path, + prefix=options.prefix_path, + isolated=options.isolated_mode, + ) + working_set = pkg_resources.WorkingSet(lib_locations) - # Consistency Checking of the package set we're installing. - should_warn_about_conflicts = ( - not options.ignore_dependencies and - options.warn_about_conflicts - ) - if should_warn_about_conflicts: - self._warn_about_conflicts(to_install) - - # Don't warn about script install locations if - # --target has been specified - warn_script_location = options.warn_script_location - if options.target_dir: - warn_script_location = False - - installed = install_given_reqs( - to_install, - install_options, - global_options, - root=options.root_path, - home=target_temp_dir.path, - prefix=options.prefix_path, - pycompile=options.compile, - warn_script_location=warn_script_location, - use_user_site=options.use_user_site, + reqs = sorted(installed, key=operator.attrgetter('name')) + items = [] + for req in reqs: + item = req.name + try: + installed_version = get_installed_version( + req.name, working_set=working_set + ) + if installed_version: + item += '-' + installed_version + except Exception: + pass + items.append(item) + installed_desc = ' '.join(items) + if installed_desc: + write_output( + 'Successfully installed %s', installed_desc, ) + except EnvironmentError as error: + show_traceback = (self.verbosity >= 1) - lib_locations = get_lib_location_guesses( - user=options.use_user_site, - home=target_temp_dir.path, - root=options.root_path, - prefix=options.prefix_path, - isolated=options.isolated_mode, - ) - working_set = pkg_resources.WorkingSet(lib_locations) - - reqs = sorted(installed, key=operator.attrgetter('name')) - items = [] - for req in reqs: - item = req.name - try: - installed_version = get_installed_version( - req.name, working_set=working_set - ) - if installed_version: - item += '-' + installed_version - except Exception: - pass - items.append(item) - installed = ' '.join(items) - if installed: - logger.info('Successfully installed %s', installed) - except EnvironmentError as error: - show_traceback = (self.verbosity >= 1) - - message = create_env_error_message( - error, show_traceback, options.use_user_site, - ) - logger.error(message, exc_info=show_traceback) - - return ERROR - except PreviousBuildDirError: - options.no_clean = True - raise - finally: - # Clean up - if not options.no_clean: - requirement_set.cleanup_files() - wheel_cache.cleanup() + message = create_env_error_message( + error, show_traceback, options.use_user_site, + ) + logger.error(message, exc_info=show_traceback) + + return ERROR + except PreviousBuildDirError: + options.no_clean = True + raise + finally: + # Clean up + if not options.no_clean: + requirement_set.cleanup_files() + wheel_cache.cleanup() if options.target_dir: self._handle_target_dir( options.target_dir, target_temp_dir, options.upgrade ) - return requirement_set + + return SUCCESS def _handle_target_dir(self, target_dir, target_temp_dir, upgrade): ensure_dir(target_dir) diff --git a/pipenv/patched/notpip/_internal/commands/list.py b/pipenv/patched/notpip/_internal/commands/list.py index a2bd5be1f9..b61b4c8c05 100644 --- a/pipenv/patched/notpip/_internal/commands/list.py +++ b/pipenv/patched/notpip/_internal/commands/list.py @@ -1,3 +1,6 @@ +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import import json @@ -7,27 +10,30 @@ from pipenv.patched.notpip._vendor.six.moves import zip_longest from pipenv.patched.notpip._internal.cli import cmdoptions -from pipenv.patched.notpip._internal.cli.base_command import Command +from pipenv.patched.notpip._internal.cli.req_command import IndexGroupCommand from pipenv.patched.notpip._internal.exceptions import CommandError from pipenv.patched.notpip._internal.index import PackageFinder +from pipenv.patched.notpip._internal.models.selection_prefs import SelectionPreferences +from pipenv.patched.notpip._internal.self_outdated_check import make_link_collector from pipenv.patched.notpip._internal.utils.misc import ( - dist_is_editable, get_installed_distributions, + dist_is_editable, + get_installed_distributions, + write_output, ) from pipenv.patched.notpip._internal.utils.packaging import get_installer logger = logging.getLogger(__name__) -class ListCommand(Command): +class ListCommand(IndexGroupCommand): """ List installed packages, including editables. Packages are listed in a case-insensitive sorted order. """ - name = 'list' + usage = """ %prog [options]""" - summary = 'List installed packages.' def __init__(self, *args, **kw): super(ListCommand, self).__init__(*args, **kw) @@ -62,7 +68,7 @@ def __init__(self, *args, **kw): action='store_true', default=False, help='Only output packages installed in user-site.') - + cmd_opts.add_option(cmdoptions.list_path()) cmd_opts.add_option( '--pre', action='store_true', @@ -109,16 +115,21 @@ def __init__(self, *args, **kw): self.parser.insert_option_group(0, index_opts) self.parser.insert_option_group(0, cmd_opts) - def _build_package_finder(self, options, index_urls, session): + def _build_package_finder(self, options, session): """ Create a package finder appropriate to this list command. """ - return PackageFinder( - find_links=options.find_links, - index_urls=index_urls, + link_collector = make_link_collector(session, options=options) + + # Pass allow_yanked=False to ignore yanked versions. + selection_prefs = SelectionPreferences( + allow_yanked=False, allow_all_prereleases=options.pre, - trusted_hosts=options.trusted_hosts, - session=session, + ) + + return PackageFinder.create( + link_collector=link_collector, + selection_prefs=selection_prefs, ) def run(self, options, args): @@ -126,11 +137,14 @@ def run(self, options, args): raise CommandError( "Options --outdated and --uptodate cannot be combined.") + cmdoptions.check_list_path_option(options) + packages = get_installed_distributions( local_only=options.local, user_only=options.user, editables_only=options.editable, include_editables=options.include_editable, + paths=options.path, ) # get_not_required must be called firstly in order to find and @@ -166,13 +180,8 @@ def get_not_required(self, packages, options): return {pkg for pkg in packages if pkg.key not in dep_keys} def iter_packages_latest_infos(self, packages, options): - index_urls = [options.index_url] + options.extra_index_urls - if options.no_index: - logger.debug('Ignoring indexes: %s', ','.join(index_urls)) - index_urls = [] - with self._build_session(options) as session: - finder = self._build_package_finder(options, index_urls, session) + finder = self._build_package_finder(options, session) for dist in packages: typ = 'unknown' @@ -182,12 +191,15 @@ def iter_packages_latest_infos(self, packages, options): all_candidates = [candidate for candidate in all_candidates if not candidate.version.is_prerelease] - if not all_candidates: + evaluator = finder.make_candidate_evaluator( + project_name=dist.project_name, + ) + best_candidate = evaluator.sort_best_candidate(all_candidates) + if best_candidate is None: continue - best_candidate = max(all_candidates, - key=finder._candidate_sort_key) + remote_version = best_candidate.version - if best_candidate.location.is_wheel: + if best_candidate.link.is_wheel: typ = 'wheel' else: typ = 'sdist' @@ -207,12 +219,12 @@ def output_package_listing(self, packages, options): elif options.list_format == 'freeze': for dist in packages: if options.verbose >= 1: - logger.info("%s==%s (%s)", dist.project_name, - dist.version, dist.location) + write_output("%s==%s (%s)", dist.project_name, + dist.version, dist.location) else: - logger.info("%s==%s", dist.project_name, dist.version) + write_output("%s==%s", dist.project_name, dist.version) elif options.list_format == 'json': - logger.info(format_for_json(packages, options)) + write_output(format_for_json(packages, options)) def output_package_listing_columns(self, data, header): # insert the header first: we need to know the size of column names @@ -226,7 +238,7 @@ def output_package_listing_columns(self, data, header): pkg_strings.insert(1, " ".join(map(lambda x: '-' * x, sizes))) for val in pkg_strings: - logger.info(val) + write_output(val) def tabulate(vals): diff --git a/pipenv/patched/notpip/_internal/commands/search.py b/pipenv/patched/notpip/_internal/commands/search.py index 986208f998..0da724d993 100644 --- a/pipenv/patched/notpip/_internal/commands/search.py +++ b/pipenv/patched/notpip/_internal/commands/search.py @@ -1,3 +1,6 @@ +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import import logging @@ -12,22 +15,23 @@ from pipenv.patched.notpip._vendor.six.moves import xmlrpc_client # type: ignore from pipenv.patched.notpip._internal.cli.base_command import Command +from pipenv.patched.notpip._internal.cli.req_command import SessionCommandMixin from pipenv.patched.notpip._internal.cli.status_codes import NO_MATCHES_FOUND, SUCCESS -from pipenv.patched.notpip._internal.download import PipXmlrpcTransport from pipenv.patched.notpip._internal.exceptions import CommandError from pipenv.patched.notpip._internal.models.index import PyPI +from pipenv.patched.notpip._internal.network.xmlrpc import PipXmlrpcTransport from pipenv.patched.notpip._internal.utils.compat import get_terminal_size from pipenv.patched.notpip._internal.utils.logging import indent_log +from pipenv.patched.notpip._internal.utils.misc import write_output logger = logging.getLogger(__name__) -class SearchCommand(Command): +class SearchCommand(Command, SessionCommandMixin): """Search for PyPI packages whose name or summary contains .""" - name = 'search' + usage = """ %prog [options] """ - summary = 'Search PyPI for packages.' ignore_require_venv = True def __init__(self, *args, **kw): @@ -59,11 +63,13 @@ def run(self, options, args): def search(self, query, options): index_url = options.index - with self._build_session(options) as session: - transport = PipXmlrpcTransport(index_url, session) - pypi = xmlrpc_client.ServerProxy(index_url, transport) - hits = pypi.search({'name': query, 'summary': query}, 'or') - return hits + + session = self.get_default_session(options) + + transport = PipXmlrpcTransport(index_url, session) + pypi = xmlrpc_client.ServerProxy(index_url, transport) + hits = pypi.search({'name': query, 'summary': query}, 'or') + return hits def transform_hits(hits): @@ -118,15 +124,19 @@ def print_results(hits, name_column_width=None, terminal_width=None): line = '%-*s - %s' % (name_column_width, '%s (%s)' % (name, latest), summary) try: - logger.info(line) + write_output(line) if name in installed_packages: dist = pkg_resources.get_distribution(name) with indent_log(): if dist.version == latest: - logger.info('INSTALLED: %s (latest)', dist.version) + write_output('INSTALLED: %s (latest)', dist.version) else: - logger.info('INSTALLED: %s', dist.version) - logger.info('LATEST: %s', latest) + write_output('INSTALLED: %s', dist.version) + if parse_version(latest).pre: + write_output('LATEST: %s (pre-release; install' + ' with "pip install --pre")', latest) + else: + write_output('LATEST: %s', latest) except UnicodeEncodeError: pass diff --git a/pipenv/patched/notpip/_internal/commands/show.py b/pipenv/patched/notpip/_internal/commands/show.py index 3fd24482f8..2bffb612a7 100644 --- a/pipenv/patched/notpip/_internal/commands/show.py +++ b/pipenv/patched/notpip/_internal/commands/show.py @@ -1,14 +1,18 @@ +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import import logging import os -from email.parser import FeedParser # type: ignore +from email.parser import FeedParser from pipenv.patched.notpip._vendor import pkg_resources from pipenv.patched.notpip._vendor.packaging.utils import canonicalize_name from pipenv.patched.notpip._internal.cli.base_command import Command from pipenv.patched.notpip._internal.cli.status_codes import ERROR, SUCCESS +from pipenv.patched.notpip._internal.utils.misc import write_output logger = logging.getLogger(__name__) @@ -19,10 +23,9 @@ class ShowCommand(Command): The output is in RFC-compliant mail header format. """ - name = 'show' + usage = """ %prog [options] ...""" - summary = 'Show information about installed packages.' ignore_require_venv = True def __init__(self, *args, **kw): @@ -61,6 +64,20 @@ def search_packages_info(query): installed[canonicalize_name(p.project_name)] = p query_names = [canonicalize_name(name) for name in query] + missing = sorted( + [name for name, pkg in zip(query, query_names) if pkg not in installed] + ) + if missing: + logger.warning('Package(s) not found: %s', ', '.join(missing)) + + def get_requiring_packages(package_name): + canonical_name = canonicalize_name(package_name) + return [ + pkg.project_name for pkg in pkg_resources.working_set + if canonical_name in + [canonicalize_name(required.name) for required in + pkg.requires()] + ] for dist in [installed[pkg] for pkg in query_names if pkg in installed]: package = { @@ -68,6 +85,7 @@ def search_packages_info(query): 'version': dist.version, 'location': dist.location, 'requires': [dep.project_name for dep in dist.requires()], + 'required_by': get_requiring_packages(dist.project_name) } file_list = None metadata = None @@ -130,39 +148,33 @@ def print_results(distributions, list_files=False, verbose=False): for i, dist in enumerate(distributions): results_printed = True if i > 0: - logger.info("---") - - name = dist.get('name', '') - required_by = [ - pkg.project_name for pkg in pkg_resources.working_set - if name in [required.name for required in pkg.requires()] - ] - - logger.info("Name: %s", name) - logger.info("Version: %s", dist.get('version', '')) - logger.info("Summary: %s", dist.get('summary', '')) - logger.info("Home-page: %s", dist.get('home-page', '')) - logger.info("Author: %s", dist.get('author', '')) - logger.info("Author-email: %s", dist.get('author-email', '')) - logger.info("License: %s", dist.get('license', '')) - logger.info("Location: %s", dist.get('location', '')) - logger.info("Requires: %s", ', '.join(dist.get('requires', []))) - logger.info("Required-by: %s", ', '.join(required_by)) + write_output("---") + + write_output("Name: %s", dist.get('name', '')) + write_output("Version: %s", dist.get('version', '')) + write_output("Summary: %s", dist.get('summary', '')) + write_output("Home-page: %s", dist.get('home-page', '')) + write_output("Author: %s", dist.get('author', '')) + write_output("Author-email: %s", dist.get('author-email', '')) + write_output("License: %s", dist.get('license', '')) + write_output("Location: %s", dist.get('location', '')) + write_output("Requires: %s", ', '.join(dist.get('requires', []))) + write_output("Required-by: %s", ', '.join(dist.get('required_by', []))) if verbose: - logger.info("Metadata-Version: %s", - dist.get('metadata-version', '')) - logger.info("Installer: %s", dist.get('installer', '')) - logger.info("Classifiers:") + write_output("Metadata-Version: %s", + dist.get('metadata-version', '')) + write_output("Installer: %s", dist.get('installer', '')) + write_output("Classifiers:") for classifier in dist.get('classifiers', []): - logger.info(" %s", classifier) - logger.info("Entry-points:") + write_output(" %s", classifier) + write_output("Entry-points:") for entry in dist.get('entry_points', []): - logger.info(" %s", entry.strip()) + write_output(" %s", entry.strip()) if list_files: - logger.info("Files:") + write_output("Files:") for line in dist.get('files', []): - logger.info(" %s", line.strip()) + write_output(" %s", line.strip()) if "files" not in dist: - logger.info("Cannot locate installed-files.txt") + write_output("Cannot locate installed-files.txt") return results_printed diff --git a/pipenv/patched/notpip/_internal/commands/uninstall.py b/pipenv/patched/notpip/_internal/commands/uninstall.py index cf6a511c35..0f92baebd8 100644 --- a/pipenv/patched/notpip/_internal/commands/uninstall.py +++ b/pipenv/patched/notpip/_internal/commands/uninstall.py @@ -1,15 +1,19 @@ +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import from pipenv.patched.notpip._vendor.packaging.utils import canonicalize_name from pipenv.patched.notpip._internal.cli.base_command import Command +from pipenv.patched.notpip._internal.cli.req_command import SessionCommandMixin from pipenv.patched.notpip._internal.exceptions import InstallationError from pipenv.patched.notpip._internal.req import parse_requirements from pipenv.patched.notpip._internal.req.constructors import install_req_from_line from pipenv.patched.notpip._internal.utils.misc import protect_pip_from_modification_on_windows -class UninstallCommand(Command): +class UninstallCommand(Command, SessionCommandMixin): """ Uninstall packages. @@ -19,11 +23,10 @@ class UninstallCommand(Command): leave behind no metadata to determine what files were installed. - Script wrappers installed by ``python setup.py develop``. """ - name = 'uninstall' + usage = """ %prog [options] ... %prog [options] -r ...""" - summary = 'Uninstall packages.' def __init__(self, *args, **kw): super(UninstallCommand, self).__init__(*args, **kw) @@ -45,34 +48,35 @@ def __init__(self, *args, **kw): self.parser.insert_option_group(0, self.cmd_opts) def run(self, options, args): - with self._build_session(options) as session: - reqs_to_uninstall = {} - for name in args: - req = install_req_from_line( - name, isolated=options.isolated_mode, - ) + session = self.get_default_session(options) + + reqs_to_uninstall = {} + for name in args: + req = install_req_from_line( + name, isolated=options.isolated_mode, + ) + if req.name: + reqs_to_uninstall[canonicalize_name(req.name)] = req + for filename in options.requirements: + for req in parse_requirements( + filename, + options=options, + session=session): if req.name: reqs_to_uninstall[canonicalize_name(req.name)] = req - for filename in options.requirements: - for req in parse_requirements( - filename, - options=options, - session=session): - if req.name: - reqs_to_uninstall[canonicalize_name(req.name)] = req - if not reqs_to_uninstall: - raise InstallationError( - 'You must give at least one requirement to %(name)s (see ' - '"pip help %(name)s")' % dict(name=self.name) - ) - - protect_pip_from_modification_on_windows( - modifying_pip="pip" in reqs_to_uninstall + if not reqs_to_uninstall: + raise InstallationError( + 'You must give at least one requirement to %(name)s (see ' + '"pip help %(name)s")' % dict(name=self.name) ) - for req in reqs_to_uninstall.values(): - uninstall_pathset = req.uninstall( - auto_confirm=options.yes, verbose=self.verbosity > 0, - ) - if uninstall_pathset: - uninstall_pathset.commit() + protect_pip_from_modification_on_windows( + modifying_pip="pip" in reqs_to_uninstall + ) + + for req in reqs_to_uninstall.values(): + uninstall_pathset = req.uninstall( + auto_confirm=options.yes, verbose=self.verbosity > 0, + ) + if uninstall_pathset: + uninstall_pathset.commit() diff --git a/pipenv/patched/notpip/_internal/commands/wheel.py b/pipenv/patched/notpip/_internal/commands/wheel.py index 801efff8e5..8d963b4ace 100644 --- a/pipenv/patched/notpip/_internal/commands/wheel.py +++ b/pipenv/patched/notpip/_internal/commands/wheel.py @@ -1,4 +1,8 @@ # -*- coding: utf-8 -*- + +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import import logging @@ -6,15 +10,19 @@ from pipenv.patched.notpip._internal.cache import WheelCache from pipenv.patched.notpip._internal.cli import cmdoptions -from pipenv.patched.notpip._internal.cli.base_command import RequirementCommand +from pipenv.patched.notpip._internal.cli.req_command import RequirementCommand from pipenv.patched.notpip._internal.exceptions import CommandError, PreviousBuildDirError -from pipenv.patched.notpip._internal.operations.prepare import RequirementPreparer from pipenv.patched.notpip._internal.req import RequirementSet from pipenv.patched.notpip._internal.req.req_tracker import RequirementTracker -from pipenv.patched.notpip._internal.resolve import Resolver from pipenv.patched.notpip._internal.utils.temp_dir import TempDirectory +from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING from pipenv.patched.notpip._internal.wheel import WheelBuilder +if MYPY_CHECK_RUNNING: + from optparse import Values + from typing import Any, List + + logger = logging.getLogger(__name__) @@ -33,7 +41,6 @@ class WheelCommand(RequirementCommand): """ - name = 'wheel' usage = """ %prog [options] ... %prog [options] -r ... @@ -41,8 +48,6 @@ class WheelCommand(RequirementCommand): %prog [options] [-e] ... %prog [options] ...""" - summary = 'Build wheels from your requirements.' - def __init__(self, *args, **kw): super(WheelCommand, self).__init__(*args, **kw) @@ -106,81 +111,70 @@ def __init__(self, *args, **kw): self.parser.insert_option_group(0, cmd_opts) def run(self, options, args): + # type: (Values, List[Any]) -> None cmdoptions.check_install_build_global(options) - index_urls = [options.index_url] + options.extra_index_urls - if options.no_index: - logger.debug('Ignoring indexes: %s', ','.join(index_urls)) - index_urls = [] - if options.build_dir: options.build_dir = os.path.abspath(options.build_dir) options.src_dir = os.path.abspath(options.src_dir) - with self._build_session(options) as session: - finder = self._build_package_finder(options, session) - build_delete = (not (options.no_clean or options.build_dir)) - wheel_cache = WheelCache(options.cache_dir, options.format_control) + session = self.get_default_session(options) - with RequirementTracker() as req_tracker, TempDirectory( - options.build_dir, delete=build_delete, kind="wheel" - ) as directory: + finder = self._build_package_finder(options, session) + build_delete = (not (options.no_clean or options.build_dir)) + wheel_cache = WheelCache(options.cache_dir, options.format_control) - requirement_set = RequirementSet( - require_hashes=options.require_hashes, - ) + with RequirementTracker() as req_tracker, TempDirectory( + options.build_dir, delete=build_delete, kind="wheel" + ) as directory: - try: - self.populate_requirement_set( - requirement_set, args, options, finder, session, - self.name, wheel_cache - ) + requirement_set = RequirementSet( + require_hashes=options.require_hashes, + ) - preparer = RequirementPreparer( - build_dir=directory.path, - src_dir=options.src_dir, - download_dir=None, - wheel_download_dir=options.wheel_dir, - progress_bar=options.progress_bar, - build_isolation=options.build_isolation, - req_tracker=req_tracker, - ) + try: + self.populate_requirement_set( + requirement_set, args, options, finder, session, + wheel_cache + ) - resolver = Resolver( - preparer=preparer, - finder=finder, - session=session, - wheel_cache=wheel_cache, - use_user_site=False, - upgrade_strategy="to-satisfy-only", - force_reinstall=False, - ignore_dependencies=options.ignore_dependencies, - ignore_requires_python=options.ignore_requires_python, - ignore_installed=True, - isolated=options.isolated_mode, - use_pep517=options.use_pep517 - ) - resolver.resolve(requirement_set) - - # build wheels - wb = WheelBuilder( - finder, preparer, wheel_cache, - build_options=options.build_options or [], - global_options=options.global_options or [], - no_clean=options.no_clean, - ) - build_failures = wb.build( - requirement_set.requirements.values(), session=session, + preparer = self.make_requirement_preparer( + temp_build_dir=directory, + options=options, + req_tracker=req_tracker, + wheel_download_dir=options.wheel_dir, + ) + + resolver = self.make_resolver( + preparer=preparer, + finder=finder, + session=session, + options=options, + wheel_cache=wheel_cache, + ignore_requires_python=options.ignore_requires_python, + use_pep517=options.use_pep517, + ) + resolver.resolve(requirement_set) + + # build wheels + wb = WheelBuilder( + preparer, wheel_cache, + build_options=options.build_options or [], + global_options=options.global_options or [], + no_clean=options.no_clean, + ) + build_failures = wb.build( + requirement_set.requirements.values(), + ) + if len(build_failures) != 0: + raise CommandError( + "Failed to build one or more wheels" ) - if len(build_failures) != 0: - raise CommandError( - "Failed to build one or more wheels" - ) - except PreviousBuildDirError: - options.no_clean = True - raise - finally: - if not options.no_clean: - requirement_set.cleanup_files() - wheel_cache.cleanup() + except PreviousBuildDirError: + options.no_clean = True + raise + finally: + if not options.no_clean: + requirement_set.cleanup_files() + wheel_cache.cleanup() diff --git a/pipenv/patched/notpip/_internal/configuration.py b/pipenv/patched/notpip/_internal/configuration.py index 3c02c95547..101934eb9e 100644 --- a/pipenv/patched/notpip/_internal/configuration.py +++ b/pipenv/patched/notpip/_internal/configuration.py @@ -11,25 +11,28 @@ A single word describing where the configuration key-value pair came from """ +# The following comment should be removed at some point in the future. +# mypy: strict-optional=False +# mypy: disallow-untyped-defs=False + import locale import logging import os +import sys -from pipenv.patched.notpip._vendor import six from pipenv.patched.notpip._vendor.six.moves import configparser from pipenv.patched.notpip._internal.exceptions import ( - ConfigurationError, ConfigurationFileCouldNotBeLoaded, -) -from pipenv.patched.notpip._internal.locations import ( - legacy_config_file, new_config_file, running_under_virtualenv, - site_config_files, venv_config_file, + ConfigurationError, + ConfigurationFileCouldNotBeLoaded, ) +from pipenv.patched.notpip._internal.utils import appdirs +from pipenv.patched.notpip._internal.utils.compat import WINDOWS, expanduser from pipenv.patched.notpip._internal.utils.misc import ensure_dir, enum from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: - from typing import ( # noqa: F401 + from typing import ( Any, Dict, Iterable, List, NewType, Optional, Tuple ) @@ -52,6 +55,12 @@ def _normalize_name(name): def _disassemble_key(name): # type: (str) -> List[str] + if "." not in name: + error_message = ( + "Key does not contain dot separated section and key. " + "Perhaps you wanted to use 'global.{}' instead?" + ).format(name) + raise ConfigurationError(error_message) return name.split(".", 1) @@ -59,12 +68,37 @@ def _disassemble_key(name): kinds = enum( USER="user", # User Specific GLOBAL="global", # System Wide - VENV="venv", # Virtual Environment Specific + SITE="site", # [Virtual] Environment Specific ENV="env", # from PIP_CONFIG_FILE ENV_VAR="env-var", # from Environment Variables ) +CONFIG_BASENAME = 'pip.ini' if WINDOWS else 'pip.conf' + + +def get_configuration_files(): + global_config_files = [ + os.path.join(path, CONFIG_BASENAME) + for path in appdirs.site_config_dirs('pip') + ] + + site_config_file = os.path.join(sys.prefix, CONFIG_BASENAME) + legacy_config_file = os.path.join( + expanduser('~'), + 'pip' if WINDOWS else '.pip', + CONFIG_BASENAME, + ) + new_config_file = os.path.join( + appdirs.user_config_dir("pip"), CONFIG_BASENAME + ) + return { + kinds.GLOBAL: global_config_files, + kinds.SITE: [site_config_file], + kinds.USER: [legacy_config_file, new_config_file], + } + + class Configuration(object): """Handles management of configuration. @@ -83,7 +117,7 @@ def __init__(self, isolated, load_only=None): # type: (bool, Kind) -> None super(Configuration, self).__init__() - _valid_load_only = [kinds.USER, kinds.GLOBAL, kinds.VENV, None] + _valid_load_only = [kinds.USER, kinds.GLOBAL, kinds.SITE, None] if load_only not in _valid_load_only: raise ConfigurationError( "Got invalid value for load_only - should be one of {}".format( @@ -95,7 +129,7 @@ def __init__(self, isolated, load_only=None): # The order here determines the override order. self._override_order = [ - kinds.GLOBAL, kinds.USER, kinds.VENV, kinds.ENV, kinds.ENV_VAR + kinds.GLOBAL, kinds.USER, kinds.SITE, kinds.ENV, kinds.ENV_VAR ] self._ignore_env_names = ["version", "help"] @@ -188,7 +222,7 @@ def unset_value(self, key): # name removed from parser, section may now be empty section_iter = iter(parser.items(section)) try: - val = six.next(section_iter) + val = next(section_iter) except StopIteration: val = None @@ -205,7 +239,7 @@ def unset_value(self, key): def save(self): # type: () -> None - """Save the currentin-memory state. + """Save the current in-memory state. """ self._ensure_have_load_only() @@ -216,7 +250,7 @@ def save(self): ensure_dir(os.path.dirname(fname)) with open(fname, "w") as f: - parser.write(f) # type: ignore + parser.write(f) # # Private routines @@ -351,8 +385,10 @@ def _iter_config_files(self): else: yield kinds.ENV, [] + config_files = get_configuration_files() + # at the base we have any global configuration - yield kinds.GLOBAL, list(site_config_files) + yield kinds.GLOBAL, config_files[kinds.GLOBAL] # per-user configuration next should_load_user_config = not self.isolated and not ( @@ -360,11 +396,10 @@ def _iter_config_files(self): ) if should_load_user_config: # The legacy config file is overridden by the new config file - yield kinds.USER, [legacy_config_file, new_config_file] + yield kinds.USER, config_files[kinds.USER] # finally virtualenv configuration first trumping others - if running_under_virtualenv(): - yield kinds.VENV, [venv_config_file] + yield kinds.SITE, config_files[kinds.SITE] def _get_parser_to_modify(self): # type: () -> Tuple[str, RawConfigParser] diff --git a/pipenv/patched/notpip/_internal/distributions/__init__.py b/pipenv/patched/notpip/_internal/distributions/__init__.py new file mode 100644 index 0000000000..9eb3d7d6ec --- /dev/null +++ b/pipenv/patched/notpip/_internal/distributions/__init__.py @@ -0,0 +1,24 @@ +from pipenv.patched.notpip._internal.distributions.source.legacy import SourceDistribution +from pipenv.patched.notpip._internal.distributions.wheel import WheelDistribution +from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from pipenv.patched.notpip._internal.distributions.base import AbstractDistribution + from pipenv.patched.notpip._internal.req.req_install import InstallRequirement + + +def make_distribution_for_install_requirement(install_req): + # type: (InstallRequirement) -> AbstractDistribution + """Returns a Distribution for the given InstallRequirement + """ + # Editable requirements will always be source distributions. They use the + # legacy logic until we create a modern standard for them. + if install_req.editable: + return SourceDistribution(install_req) + + # If it's a wheel, it's a WheelDistribution + if install_req.is_wheel: + return WheelDistribution(install_req) + + # Otherwise, a SourceDistribution + return SourceDistribution(install_req) diff --git a/pipenv/patched/notpip/_internal/distributions/base.py b/pipenv/patched/notpip/_internal/distributions/base.py new file mode 100644 index 0000000000..b479ff831e --- /dev/null +++ b/pipenv/patched/notpip/_internal/distributions/base.py @@ -0,0 +1,36 @@ +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + +import abc + +from pipenv.patched.notpip._vendor.six import add_metaclass + + +@add_metaclass(abc.ABCMeta) +class AbstractDistribution(object): + """A base class for handling installable artifacts. + + The requirements for anything installable are as follows: + + - we must be able to determine the requirement name + (or we can't correctly handle the non-upgrade case). + + - for packages with setup requirements, we must also be able + to determine their requirements without installing additional + packages (for the same reason as run-time dependencies) + + - we must be able to create a Distribution object exposing the + above metadata. + """ + + def __init__(self, req): + super(AbstractDistribution, self).__init__() + self.req = req + + @abc.abstractmethod + def get_pkg_resources_distribution(self): + raise NotImplementedError() + + @abc.abstractmethod + def prepare_distribution_metadata(self, finder, build_isolation): + raise NotImplementedError() diff --git a/pipenv/patched/notpip/_internal/distributions/installed.py b/pipenv/patched/notpip/_internal/distributions/installed.py new file mode 100644 index 0000000000..78f29d52ba --- /dev/null +++ b/pipenv/patched/notpip/_internal/distributions/installed.py @@ -0,0 +1,18 @@ +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + +from pipenv.patched.notpip._internal.distributions.base import AbstractDistribution + + +class InstalledDistribution(AbstractDistribution): + """Represents an installed package. + + This does not need any preparation as the required information has already + been computed. + """ + + def get_pkg_resources_distribution(self): + return self.req.satisfied_by + + def prepare_distribution_metadata(self, finder, build_isolation): + pass diff --git a/pipenv/patched/notpip/_internal/distributions/source/__init__.py b/pipenv/patched/notpip/_internal/distributions/source/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pipenv/patched/notpip/_internal/distributions/source/legacy.py b/pipenv/patched/notpip/_internal/distributions/source/legacy.py new file mode 100644 index 0000000000..0e700d2a78 --- /dev/null +++ b/pipenv/patched/notpip/_internal/distributions/source/legacy.py @@ -0,0 +1,98 @@ +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + +import logging + +from pipenv.patched.notpip._internal.build_env import BuildEnvironment +from pipenv.patched.notpip._internal.distributions.base import AbstractDistribution +from pipenv.patched.notpip._internal.exceptions import InstallationError +from pipenv.patched.notpip._internal.utils.subprocess import runner_with_spinner_message + +logger = logging.getLogger(__name__) + + +class SourceDistribution(AbstractDistribution): + """Represents a source distribution. + + The preparation step for these needs metadata for the packages to be + generated, either using PEP 517 or using the legacy `setup.py egg_info`. + + NOTE from @pradyunsg (14 June 2019) + I expect SourceDistribution class will need to be split into + `legacy_source` (setup.py based) and `source` (PEP 517 based) when we start + bringing logic for preparation out of InstallRequirement into this class. + """ + + def get_pkg_resources_distribution(self): + return self.req.get_dist() + + def prepare_distribution_metadata(self, finder, build_isolation): + # Prepare for building. We need to: + # 1. Load pyproject.toml (if it exists) + # 2. Set up the build environment + + self.req.load_pyproject_toml() + should_isolate = self.req.use_pep517 and build_isolation + if should_isolate: + self._setup_isolation(finder) + + self.req.prepare_metadata() + self.req.assert_source_matches_version() + + def _setup_isolation(self, finder): + def _raise_conflicts(conflicting_with, conflicting_reqs): + format_string = ( + "Some build dependencies for {requirement} " + "conflict with {conflicting_with}: {description}." + ) + error_message = format_string.format( + requirement=self.req, + conflicting_with=conflicting_with, + description=', '.join( + '%s is incompatible with %s' % (installed, wanted) + for installed, wanted in sorted(conflicting) + ) + ) + raise InstallationError(error_message) + + # Isolate in a BuildEnvironment and install the build-time + # requirements. + self.req.build_env = BuildEnvironment() + self.req.build_env.install_requirements( + finder, self.req.pyproject_requires, 'overlay', + "Installing build dependencies" + ) + conflicting, missing = self.req.build_env.check_requirements( + self.req.requirements_to_check + ) + if conflicting: + _raise_conflicts("PEP 517/518 supported requirements", + conflicting) + if missing: + logger.warning( + "Missing build requirements in pyproject.toml for %s.", + self.req, + ) + logger.warning( + "The project does not specify a build backend, and " + "pip cannot fall back to setuptools without %s.", + " and ".join(map(repr, sorted(missing))) + ) + # Install any extra build dependencies that the backend requests. + # This must be done in a second pass, as the pyproject.toml + # dependencies must be installed before we can call the backend. + with self.req.build_env: + runner = runner_with_spinner_message( + "Getting requirements to build wheel" + ) + backend = self.req.pep517_backend + with backend.subprocess_runner(runner): + reqs = backend.get_requires_for_build_wheel() + + conflicting, missing = self.req.build_env.check_requirements(reqs) + if conflicting: + _raise_conflicts("the backend dependencies", conflicting) + self.req.build_env.install_requirements( + finder, missing, 'normal', + "Installing backend dependencies" + ) diff --git a/pipenv/patched/notpip/_internal/distributions/wheel.py b/pipenv/patched/notpip/_internal/distributions/wheel.py new file mode 100644 index 0000000000..23e73ee710 --- /dev/null +++ b/pipenv/patched/notpip/_internal/distributions/wheel.py @@ -0,0 +1,20 @@ +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + +from pipenv.patched.notpip._vendor import pkg_resources + +from pipenv.patched.notpip._internal.distributions.base import AbstractDistribution + + +class WheelDistribution(AbstractDistribution): + """Represents a wheel distribution. + + This does not need any preparation as wheels can be directly unpacked. + """ + + def get_pkg_resources_distribution(self): + return list(pkg_resources.find_distributions( + self.req.source_dir))[0] + + def prepare_distribution_metadata(self, finder, build_isolation): + pass diff --git a/pipenv/patched/notpip/_internal/download.py b/pipenv/patched/notpip/_internal/download.py index f593c2f205..b8d12e17fd 100644 --- a/pipenv/patched/notpip/_internal/download.py +++ b/pipenv/patched/notpip/_internal/download.py @@ -1,406 +1,85 @@ +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import import cgi -import email.utils -import getpass -import json import logging import mimetypes import os -import platform import re import shutil import sys -from pipenv.patched.notpip._vendor import requests, six, urllib3 -from pipenv.patched.notpip._vendor.cachecontrol import CacheControlAdapter -from pipenv.patched.notpip._vendor.cachecontrol.caches import FileCache -from pipenv.patched.notpip._vendor.lockfile import LockError -from pipenv.patched.notpip._vendor.requests.adapters import BaseAdapter, HTTPAdapter -from pipenv.patched.notpip._vendor.requests.auth import AuthBase, HTTPBasicAuth +from pipenv.patched.notpip._vendor import requests from pipenv.patched.notpip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response -from pipenv.patched.notpip._vendor.requests.structures import CaseInsensitiveDict -from pipenv.patched.notpip._vendor.requests.utils import get_netrc_auth -# NOTE: XMLRPC Client is not annotated in typeshed as on 2017-07-17, which is -# why we ignore the type on this import -from pipenv.patched.notpip._vendor.six.moves import xmlrpc_client # type: ignore +from pipenv.patched.notpip._vendor.six import PY2 from pipenv.patched.notpip._vendor.six.moves.urllib import parse as urllib_parse -from pipenv.patched.notpip._vendor.six.moves.urllib import request as urllib_request -from pipenv.patched.notpip._vendor.urllib3.util import IS_PYOPENSSL -import pipenv.patched.notpip from pipenv.patched.notpip._internal.exceptions import HashMismatch, InstallationError -from pipenv.patched.notpip._internal.locations import write_delete_marker_file from pipenv.patched.notpip._internal.models.index import PyPI +from pipenv.patched.notpip._internal.network.session import PipSession from pipenv.patched.notpip._internal.utils.encoding import auto_decode -from pipenv.patched.notpip._internal.utils.filesystem import check_path_owner -from pipenv.patched.notpip._internal.utils.glibc import libc_ver -from pipenv.patched.notpip._internal.utils.logging import indent_log +from pipenv.patched.notpip._internal.utils.filesystem import copy2_fixed from pipenv.patched.notpip._internal.utils.misc import ( - ARCHIVE_EXTENSIONS, ask_path_exists, backup_dir, call_subprocess, consume, - display_path, format_size, get_installed_version, rmtree, - split_auth_from_netloc, splitext, unpack_file, + ask_path_exists, + backup_dir, + consume, + display_path, + format_size, + hide_url, + path_to_display, + rmtree, + splitext, ) -from pipenv.patched.notpip._internal.utils.setuptools_build import SETUPTOOLS_SHIM from pipenv.patched.notpip._internal.utils.temp_dir import TempDirectory from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING from pipenv.patched.notpip._internal.utils.ui import DownloadProgressProvider +from pipenv.patched.notpip._internal.utils.unpacking import unpack_file +from pipenv.patched.notpip._internal.utils.urls import get_url_scheme from pipenv.patched.notpip._internal.vcs import vcs if MYPY_CHECK_RUNNING: - from typing import ( # noqa: F401 - Optional, Tuple, Dict, IO, Text, Union + from typing import ( + IO, Callable, List, Optional, Text, Tuple, ) - from pipenv.patched.notpip._internal.models.link import Link # noqa: F401 - from pipenv.patched.notpip._internal.utils.hashes import Hashes # noqa: F401 - from pipenv.patched.notpip._internal.vcs import AuthInfo # noqa: F401 - -try: - import ssl # noqa -except ImportError: - ssl = None - -HAS_TLS = (ssl is not None) or IS_PYOPENSSL - -__all__ = ['get_file_content', - 'is_url', 'url_to_path', 'path_to_url', - 'is_archive_file', 'unpack_vcs_link', - 'unpack_file_url', 'is_vcs_url', 'is_file_url', - 'unpack_http_url', 'unpack_url'] + from mypy_extensions import TypedDict -logger = logging.getLogger(__name__) + from pipenv.patched.notpip._internal.models.link import Link + from pipenv.patched.notpip._internal.utils.hashes import Hashes + from pipenv.patched.notpip._internal.vcs.versioncontrol import VersionControl - -def user_agent(): - """ - Return a string representing the user agent. - """ - data = { - "installer": {"name": "pip", "version": pipenv.patched.notpip.__version__}, - "python": platform.python_version(), - "implementation": { - "name": platform.python_implementation(), - }, - } - - if data["implementation"]["name"] == 'CPython': - data["implementation"]["version"] = platform.python_version() - elif data["implementation"]["name"] == 'PyPy': - if sys.pypy_version_info.releaselevel == 'final': - pypy_version_info = sys.pypy_version_info[:3] - else: - pypy_version_info = sys.pypy_version_info - data["implementation"]["version"] = ".".join( - [str(x) for x in pypy_version_info] + if PY2: + CopytreeKwargs = TypedDict( + 'CopytreeKwargs', + { + 'ignore': Callable[[str, List[str]], List[str]], + 'symlinks': bool, + }, + total=False, ) - elif data["implementation"]["name"] == 'Jython': - # Complete Guess - data["implementation"]["version"] = platform.python_version() - elif data["implementation"]["name"] == 'IronPython': - # Complete Guess - data["implementation"]["version"] = platform.python_version() - - if sys.platform.startswith("linux"): - from pipenv.patched.notpip._vendor import distro - distro_infos = dict(filter( - lambda x: x[1], - zip(["name", "version", "id"], distro.linux_distribution()), - )) - libc = dict(filter( - lambda x: x[1], - zip(["lib", "version"], libc_ver()), - )) - if libc: - distro_infos["libc"] = libc - if distro_infos: - data["distro"] = distro_infos - - if sys.platform.startswith("darwin") and platform.mac_ver()[0]: - data["distro"] = {"name": "macOS", "version": platform.mac_ver()[0]} - - if platform.system(): - data.setdefault("system", {})["name"] = platform.system() - - if platform.release(): - data.setdefault("system", {})["release"] = platform.release() - - if platform.machine(): - data["cpu"] = platform.machine() - - if HAS_TLS: - data["openssl_version"] = ssl.OPENSSL_VERSION - - setuptools_version = get_installed_version("setuptools") - if setuptools_version is not None: - data["setuptools_version"] = setuptools_version - - return "{data[installer][name]}/{data[installer][version]} {json}".format( - data=data, - json=json.dumps(data, separators=(",", ":"), sort_keys=True), - ) - - -class MultiDomainBasicAuth(AuthBase): - - def __init__(self, prompting=True): - # type: (bool) -> None - self.prompting = prompting - self.passwords = {} # type: Dict[str, AuthInfo] - - def __call__(self, req): - parsed = urllib_parse.urlparse(req.url) - - # Split the credentials from the netloc. - netloc, url_user_password = split_auth_from_netloc(parsed.netloc) - - # Set the url of the request to the url without any credentials - req.url = urllib_parse.urlunparse(parsed[:1] + (netloc,) + parsed[2:]) - - # Use any stored credentials that we have for this netloc - username, password = self.passwords.get(netloc, (None, None)) - - # Use the credentials embedded in the url if we have none stored - if username is None: - username, password = url_user_password - - # Get creds from netrc if we still don't have them - if username is None and password is None: - netrc_auth = get_netrc_auth(req.url) - username, password = netrc_auth if netrc_auth else (None, None) - - if username or password: - # Store the username and password - self.passwords[netloc] = (username, password) - - # Send the basic auth with this request - req = HTTPBasicAuth(username or "", password or "")(req) - - # Attach a hook to handle 401 responses - req.register_hook("response", self.handle_401) - - return req - - def handle_401(self, resp, **kwargs): - # We only care about 401 responses, anything else we want to just - # pass through the actual response - if resp.status_code != 401: - return resp - - # We are not able to prompt the user so simply return the response - if not self.prompting: - return resp - - parsed = urllib_parse.urlparse(resp.url) - - # Prompt the user for a new username and password - username = six.moves.input("User for %s: " % parsed.netloc) - password = getpass.getpass("Password: ") - - # Store the new username and password to use for future requests - if username or password: - self.passwords[parsed.netloc] = (username, password) - - # Consume content and release the original connection to allow our new - # request to reuse the same one. - resp.content - resp.raw.release_conn() - - # Add our new username and password to the request - req = HTTPBasicAuth(username or "", password or "")(resp.request) - req.register_hook("response", self.warn_on_401) - - # Send our new request - new_resp = resp.connection.send(req, **kwargs) - new_resp.history.append(resp) - - return new_resp - - def warn_on_401(self, resp, **kwargs): - # warn user that they provided incorrect credentials - if resp.status_code == 401: - logger.warning('401 Error, Credentials not correct for %s', - resp.request.url) - - -class LocalFSAdapter(BaseAdapter): - - def send(self, request, stream=None, timeout=None, verify=None, cert=None, - proxies=None): - pathname = url_to_path(request.url) - - resp = Response() - resp.status_code = 200 - resp.url = request.url - - try: - stats = os.stat(pathname) - except OSError as exc: - resp.status_code = 404 - resp.raw = exc - else: - modified = email.utils.formatdate(stats.st_mtime, usegmt=True) - content_type = mimetypes.guess_type(pathname)[0] or "text/plain" - resp.headers = CaseInsensitiveDict({ - "Content-Type": content_type, - "Content-Length": stats.st_size, - "Last-Modified": modified, - }) - - resp.raw = open(pathname, "rb") - resp.close = resp.raw.close - - return resp - - def close(self): - pass - - -class SafeFileCache(FileCache): - """ - A file based cache which is safe to use even when the target directory may - not be accessible or writable. - """ - - def __init__(self, *args, **kwargs): - super(SafeFileCache, self).__init__(*args, **kwargs) - - # Check to ensure that the directory containing our cache directory - # is owned by the user current executing pip. If it does not exist - # we will check the parent directory until we find one that does exist. - # If it is not owned by the user executing pip then we will disable - # the cache and log a warning. - if not check_path_owner(self.directory): - logger.warning( - "The directory '%s' or its parent directory is not owned by " - "the current user and the cache has been disabled. Please " - "check the permissions and owner of that directory. If " - "executing pip with sudo, you may want sudo's -H flag.", - self.directory, - ) - - # Set our directory to None to disable the Cache - self.directory = None - - def get(self, *args, **kwargs): - # If we don't have a directory, then the cache should be a no-op. - if self.directory is None: - return - - try: - return super(SafeFileCache, self).get(*args, **kwargs) - except (LockError, OSError, IOError): - # We intentionally silence this error, if we can't access the cache - # then we can just skip caching and process the request as if - # caching wasn't enabled. - pass - - def set(self, *args, **kwargs): - # If we don't have a directory, then the cache should be a no-op. - if self.directory is None: - return - - try: - return super(SafeFileCache, self).set(*args, **kwargs) - except (LockError, OSError, IOError): - # We intentionally silence this error, if we can't access the cache - # then we can just skip caching and process the request as if - # caching wasn't enabled. - pass - - def delete(self, *args, **kwargs): - # If we don't have a directory, then the cache should be a no-op. - if self.directory is None: - return - - try: - return super(SafeFileCache, self).delete(*args, **kwargs) - except (LockError, OSError, IOError): - # We intentionally silence this error, if we can't access the cache - # then we can just skip caching and process the request as if - # caching wasn't enabled. - pass - - -class InsecureHTTPAdapter(HTTPAdapter): - - def cert_verify(self, conn, url, verify, cert): - conn.cert_reqs = 'CERT_NONE' - conn.ca_certs = None - - -class PipSession(requests.Session): - - timeout = None # type: Optional[int] - - def __init__(self, *args, **kwargs): - retries = kwargs.pop("retries", 0) - cache = kwargs.pop("cache", None) - insecure_hosts = kwargs.pop("insecure_hosts", []) - - super(PipSession, self).__init__(*args, **kwargs) - - # Attach our User Agent to the request - self.headers["User-Agent"] = user_agent() - - # Attach our Authentication handler to the session - self.auth = MultiDomainBasicAuth() - - # Create our urllib3.Retry instance which will allow us to customize - # how we handle retries. - retries = urllib3.Retry( - # Set the total number of retries that a particular request can - # have. - total=retries, - - # A 503 error from PyPI typically means that the Fastly -> Origin - # connection got interrupted in some way. A 503 error in general - # is typically considered a transient error so we'll go ahead and - # retry it. - # A 500 may indicate transient error in Amazon S3 - # A 520 or 527 - may indicate transient error in CloudFlare - status_forcelist=[500, 503, 520, 527], - - # Add a small amount of back off between failed requests in - # order to prevent hammering the service. - backoff_factor=0.25, + else: + CopytreeKwargs = TypedDict( + 'CopytreeKwargs', + { + 'copy_function': Callable[[str, str], None], + 'ignore': Callable[[str, List[str]], List[str]], + 'ignore_dangling_symlinks': bool, + 'symlinks': bool, + }, + total=False, ) - # We want to _only_ cache responses on securely fetched origins. We do - # this because we can't validate the response of an insecurely fetched - # origin, and we don't want someone to be able to poison the cache and - # require manual eviction from the cache to fix it. - if cache: - secure_adapter = CacheControlAdapter( - cache=SafeFileCache(cache, use_dir_lock=True), - max_retries=retries, - ) - else: - secure_adapter = HTTPAdapter(max_retries=retries) - - # Our Insecure HTTPAdapter disables HTTPS validation. It does not - # support caching (see above) so we'll use it for all http:// URLs as - # well as any https:// host that we've marked as ignoring TLS errors - # for. - insecure_adapter = InsecureHTTPAdapter(max_retries=retries) - self.mount("https://", secure_adapter) - self.mount("http://", insecure_adapter) - - # Enable file:// urls - self.mount("file://", LocalFSAdapter()) - - # We want to use a non-validating adapter for any requests which are - # deemed insecure. - for host in insecure_hosts: - self.mount("https://{}/".format(host), insecure_adapter) +__all__ = ['get_file_content', + 'unpack_vcs_link', + 'unpack_file_url', + 'unpack_http_url', 'unpack_url', + 'parse_content_disposition', 'sanitize_content_filename'] - def request(self, method, url, *args, **kwargs): - # Allow setting a default timeout on a session - kwargs.setdefault("timeout", self.timeout) - # Dispatch the actual request - return super(PipSession, self).request(method, url, *args, **kwargs) +logger = logging.getLogger(__name__) def get_file_content(url, comes_from=None, session=None): @@ -417,29 +96,30 @@ def get_file_content(url, comes_from=None, session=None): "get_file_content() missing 1 required keyword argument: 'session'" ) - match = _scheme_re.search(url) - if match: - scheme = match.group(1).lower() - if (scheme == 'file' and comes_from and - comes_from.startswith('http')): + scheme = get_url_scheme(url) + + if scheme in ['http', 'https']: + # FIXME: catch some errors + resp = session.get(url) + resp.raise_for_status() + return resp.url, resp.text + + elif scheme == 'file': + if comes_from and comes_from.startswith('http'): raise InstallationError( 'Requirements file %s references URL %s, which is local' % (comes_from, url)) - if scheme == 'file': - path = url.split(':', 1)[1] - path = path.replace('\\', '/') - match = _url_slash_drive_re.match(path) - if match: - path = match.group(1) + ':' + path.split('|', 1)[1] - path = urllib_parse.unquote(path) - if path.startswith('/'): - path = '/' + path.lstrip('/') - url = path - else: - # FIXME: catch some errors - resp = session.get(url) - resp.raise_for_status() - return resp.url, resp.text + + path = url.split(':', 1)[1] + path = path.replace('\\', '/') + match = _url_slash_drive_re.match(path) + if match: + path = match.group(1) + ':' + path.split('|', 1)[1] + path = urllib_parse.unquote(path) + if path.startswith('/'): + path = '/' + path.lstrip('/') + url = path + try: with open(url, 'rb') as f: content = auto_decode(f.read()) @@ -450,89 +130,25 @@ def get_file_content(url, comes_from=None, session=None): return url, content -_scheme_re = re.compile(r'^(http|https|file):', re.I) _url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I) -def is_url(name): - # type: (Union[str, Text]) -> bool - """Returns true if the name looks like a URL""" - if ':' not in name: - return False - scheme = name.split(':', 1)[0].lower() - return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes - - -def url_to_path(url): - # type: (str) -> str - """ - Convert a file: URL to a path. - """ - assert url.startswith('file:'), ( - "You can only turn file: urls into filenames (not %r)" % url) - - _, netloc, path, _, _ = urllib_parse.urlsplit(url) - - # if we have a UNC path, prepend UNC share notation - if netloc: - netloc = '\\\\' + netloc - - path = urllib_request.url2pathname(netloc + path) - return path - - -def path_to_url(path): - # type: (Union[str, Text]) -> str - """ - Convert a path to a file: URL. The path will be made absolute and have - quoted path parts. - """ - path = os.path.normpath(os.path.abspath(path)) - url = urllib_parse.urljoin('file:', urllib_request.pathname2url(path)) - return url - - -def is_archive_file(name): - # type: (str) -> bool - """Return True if `name` is a considered as an archive file.""" - ext = splitext(name)[1].lower() - if ext in ARCHIVE_EXTENSIONS: - return True - return False - - def unpack_vcs_link(link, location): + # type: (Link, str) -> None vcs_backend = _get_used_vcs_backend(link) - vcs_backend.unpack(location) + assert vcs_backend is not None + vcs_backend.unpack(location, url=hide_url(link.url)) def _get_used_vcs_backend(link): - for backend in vcs.backends: - if link.scheme in backend.schemes: - vcs_backend = backend(link.url) - return vcs_backend - - -def is_vcs_url(link): - # type: (Link) -> bool - return bool(_get_used_vcs_backend(link)) - - -def is_file_url(link): - # type: (Link) -> bool - return link.url.lower().startswith('file:') - - -def is_dir_url(link): - # type: (Link) -> bool - """Return whether a file:// Link points to a directory. - - ``link`` must not have any other scheme but file://. Call is_file_url() - first. - + # type: (Link) -> Optional[VersionControl] + """ + Return a VersionControl object or None. """ - link_path = url_to_path(link.url_without_fragment) - return os.path.isdir(link_path) + for vcs_backend in vcs.backends: + if link.scheme in vcs_backend.schemes: + return vcs_backend + return None def _progress_indicator(iterable, *args, **kwargs): @@ -543,7 +159,7 @@ def _download_url( resp, # type: Response link, # type: Link content_file, # type: IO - hashes, # type: Hashes + hashes, # type: Optional[Hashes] progress_bar # type: str ): # type: (...) -> None @@ -627,8 +243,6 @@ def written_chunks(chunks): else: logger.info("Downloading %s", url) - logger.debug('Downloading from URL %s', link) - downloaded_chunks = written_chunks( progress_indicator( resp_read(CONTENT_CHUNK_SIZE), @@ -703,7 +317,7 @@ def unpack_http_url( # unpack the archive to the build dir location. even when only # downloading archives, they have to be unpacked to parse dependencies - unpack_file(from_path, location, content_type, link) + unpack_file(from_path, location, content_type) # a download dir is specified; let's copy the archive there if download_dir and not already_downloaded_path: @@ -713,6 +327,46 @@ def unpack_http_url( os.unlink(from_path) +def _copy2_ignoring_special_files(src, dest): + # type: (str, str) -> None + """Copying special files is not supported, but as a convenience to users + we skip errors copying them. This supports tools that may create e.g. + socket files in the project source directory. + """ + try: + copy2_fixed(src, dest) + except shutil.SpecialFileError as e: + # SpecialFileError may be raised due to either the source or + # destination. If the destination was the cause then we would actually + # care, but since the destination directory is deleted prior to + # copy we ignore all of them assuming it is caused by the source. + logger.warning( + "Ignoring special file error '%s' encountered copying %s to %s.", + str(e), + path_to_display(src), + path_to_display(dest), + ) + + +def _copy_source_tree(source, target): + # type: (str, str) -> None + def ignore(d, names): + # Pulling in those directories can potentially be very slow, + # exclude the following directories if they appear in the top + # level dir (and only it). + # See discussion at https://github.com/pypa/pip/pull/6770 + return ['.tox', '.nox'] if d == source else [] + + kwargs = dict(ignore=ignore, symlinks=True) # type: CopytreeKwargs + + if not PY2: + # Python 2 does not support copy_function, so we only ignore + # errors on special file copy in Python 3. + kwargs['copy_function'] = _copy2_ignoring_special_files + + shutil.copytree(source, target, **kwargs) + + def unpack_file_url( link, # type: Link location, # type: str @@ -725,13 +379,12 @@ def unpack_file_url( If download_dir is provided and link points to a file, make a copy of the link file inside download_dir. """ - link_path = url_to_path(link.url_without_fragment) - + link_path = link.file_path # If it's a url to a local directory - if is_dir_url(link): + if link.is_existing_dir(): if os.path.isdir(location): rmtree(location) - shutil.copytree(link_path, location, symlinks=True) + _copy_source_tree(link_path, location) if download_dir: logger.info('Link is a directory, ignoring download_dir') return @@ -760,83 +413,17 @@ def unpack_file_url( # unpack the archive to the build dir location. even when only downloading # archives, they have to be unpacked to parse dependencies - unpack_file(from_path, location, content_type, link) + unpack_file(from_path, location, content_type) # a download dir is specified and not already downloaded if download_dir and not already_downloaded_path: _copy_file(from_path, download_dir, link) -def _copy_dist_from_dir(link_path, location): - """Copy distribution files in `link_path` to `location`. - - Invoked when user requests to install a local directory. E.g.: - - pip install . - pip install ~/dev/git-repos/python-prompt-toolkit - - """ - - # Note: This is currently VERY SLOW if you have a lot of data in the - # directory, because it copies everything with `shutil.copytree`. - # What it should really do is build an sdist and install that. - # See https://github.com/pypa/pip/issues/2195 - - if os.path.isdir(location): - rmtree(location) - - # build an sdist - setup_py = 'setup.py' - sdist_args = [sys.executable] - sdist_args.append('-c') - sdist_args.append(SETUPTOOLS_SHIM % setup_py) - sdist_args.append('sdist') - sdist_args += ['--dist-dir', location] - logger.info('Running setup.py sdist for %s', link_path) - - with indent_log(): - call_subprocess(sdist_args, cwd=link_path, show_stdout=False) - - # unpack sdist into `location` - sdist = os.path.join(location, os.listdir(location)[0]) - logger.info('Unpacking sdist %s into %s', sdist, location) - unpack_file(sdist, location, content_type=None, link=None) - - -class PipXmlrpcTransport(xmlrpc_client.Transport): - """Provide a `xmlrpclib.Transport` implementation via a `PipSession` - object. - """ - - def __init__(self, index_url, session, use_datetime=False): - xmlrpc_client.Transport.__init__(self, use_datetime) - index_parts = urllib_parse.urlparse(index_url) - self._scheme = index_parts.scheme - self._session = session - - def request(self, host, handler, request_body, verbose=False): - parts = (self._scheme, host, handler, None, None, None) - url = urllib_parse.urlunparse(parts) - try: - headers = {'Content-Type': 'text/xml'} - response = self._session.post(url, data=request_body, - headers=headers, stream=True) - response.raise_for_status() - self.verbose = verbose - return self.parse_response(response.raw) - except requests.HTTPError as exc: - logger.critical( - "HTTP error %s while getting %s", - exc.response.status_code, url, - ) - raise - - def unpack_url( - link, # type: Optional[Link] - location, # type: Optional[str] + link, # type: Link + location, # type: str download_dir=None, # type: Optional[str] - only_download=False, # type: bool session=None, # type: Optional[PipSession] hashes=None, # type: Optional[Hashes] progress_bar="on" # type: str @@ -857,11 +444,11 @@ def unpack_url( would ordinarily raise HashUnsupported) are allowed. """ # non-editable vcs urls - if is_vcs_url(link): + if link.is_vcs: unpack_vcs_link(link, location) # file urls - elif is_file_url(link): + elif link.is_file: unpack_file_url(link, location, download_dir, hashes=hashes) # http urls @@ -877,15 +464,36 @@ def unpack_url( hashes=hashes, progress_bar=progress_bar ) - if only_download: - write_delete_marker_file(location) + + +def sanitize_content_filename(filename): + # type: (str) -> str + """ + Sanitize the "filename" value from a Content-Disposition header. + """ + return os.path.basename(filename) + + +def parse_content_disposition(content_disposition, default_filename): + # type: (str, str) -> str + """ + Parse the "filename" value from a Content-Disposition header, and + return the default filename if the result is empty. + """ + _type, params = cgi.parse_header(content_disposition) + filename = params.get('filename') + if filename: + # We need to sanitize the filename to prevent directory traversal + # in case the filename contains ".." path parts. + filename = sanitize_content_filename(filename) + return filename or default_filename def _download_http_url( link, # type: Link session, # type: PipSession temp_dir, # type: str - hashes, # type: Hashes + hashes, # type: Optional[Hashes] progress_bar # type: str ): # type: (...) -> Tuple[str, str] @@ -928,11 +536,8 @@ def _download_http_url( # Have a look at the Content-Disposition header for a better guess content_disposition = resp.headers.get('content-disposition') if content_disposition: - type, params = cgi.parse_header(content_disposition) - # We use ``or`` here because we don't want to use an "empty" value - # from the filename param. - filename = params.get('filename') or filename - ext = splitext(filename)[1] + filename = parse_content_disposition(content_disposition, filename) + ext = splitext(filename)[1] # type: Optional[str] if not ext: ext = mimetypes.guess_extension(content_type) if ext: @@ -948,24 +553,26 @@ def _download_http_url( def _check_download_dir(link, download_dir, hashes): - # type: (Link, str, Hashes) -> Optional[str] + # type: (Link, str, Optional[Hashes]) -> Optional[str] """ Check download_dir for previously downloaded file with correct hash If a correct file is found return its path else None """ download_path = os.path.join(download_dir, link.filename) - if os.path.exists(download_path): - # If already downloaded, does its hash match? - logger.info('File was already downloaded %s', download_path) - if hashes: - try: - hashes.check_against_path(download_path) - except HashMismatch: - logger.warning( - 'Previously-downloaded file %s has bad hash. ' - 'Re-downloading.', - download_path - ) - os.unlink(download_path) - return None - return download_path - return None + + if not os.path.exists(download_path): + return None + + # If already downloaded, does its hash match? + logger.info('File was already downloaded %s', download_path) + if hashes: + try: + hashes.check_against_path(download_path) + except HashMismatch: + logger.warning( + 'Previously-downloaded file %s has bad hash. ' + 'Re-downloading.', + download_path + ) + os.unlink(download_path) + return None + return download_path diff --git a/pipenv/patched/notpip/_internal/exceptions.py b/pipenv/patched/notpip/_internal/exceptions.py index 1342935d4f..368b433b4e 100644 --- a/pipenv/patched/notpip/_internal/exceptions.py +++ b/pipenv/patched/notpip/_internal/exceptions.py @@ -1,4 +1,8 @@ """Exceptions used throughout package""" + +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import from itertools import chain, groupby, repeat @@ -8,8 +12,9 @@ from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: - from typing import Optional # noqa: F401 - from pipenv.patched.notpip._internal.req.req_install import InstallRequirement # noqa: F401 + from typing import Optional + from pipenv.patched.notpip._vendor.pkg_resources import Distribution + from pipenv.patched.notpip._internal.req.req_install import InstallRequirement class PipError(Exception): @@ -28,6 +33,36 @@ class UninstallationError(PipError): """General exception during uninstallation""" +class NoneMetadataError(PipError): + """ + Raised when accessing "METADATA" or "PKG-INFO" metadata for a + pip._vendor.pkg_resources.Distribution object and + `dist.has_metadata('METADATA')` returns True but + `dist.get_metadata('METADATA')` returns None (and similarly for + "PKG-INFO"). + """ + + def __init__(self, dist, metadata_name): + # type: (Distribution, str) -> None + """ + :param dist: A Distribution object. + :param metadata_name: The name of the metadata being accessed + (can be "METADATA" or "PKG-INFO"). + """ + self.dist = dist + self.metadata_name = metadata_name + + def __str__(self): + # type: () -> str + # Use `dist` in the error message because its stringification + # includes more information, like the version and location. + return ( + 'None {} metadata found for distribution: {}'.format( + self.metadata_name, self.dist, + ) + ) + + class DistributionNotFound(InstallationError): """Raised when a distribution cannot be found to satisfy a requirement""" @@ -246,7 +281,6 @@ def hash_then_or(hash_name): for e in expecteds) lines.append(' Got %s\n' % self.gots[hash_name].hexdigest()) - prefix = ' or' return '\n'.join(lines) diff --git a/pipenv/patched/notpip/_internal/index.py b/pipenv/patched/notpip/_internal/index.py index ad145fe478..0f212115a8 100644 --- a/pipenv/patched/notpip/_internal/index.py +++ b/pipenv/patched/notpip/_internal/index.py @@ -1,479 +1,535 @@ """Routines related to PyPI, indexes""" + +# The following comment should be removed at some point in the future. +# mypy: strict-optional=False +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import -import cgi -import itertools import logging -import mimetypes -import os -import posixpath import re -import sys -from collections import namedtuple -from pipenv.patched.notpip._vendor import html5lib, requests, six -from pipenv.patched.notpip._vendor.distlib.compat import unescape from pipenv.patched.notpip._vendor.packaging import specifiers from pipenv.patched.notpip._vendor.packaging.utils import canonicalize_name from pipenv.patched.notpip._vendor.packaging.version import parse as parse_version -from pipenv.patched.notpip._vendor.requests.exceptions import RetryError, SSLError -from pipenv.patched.notpip._vendor.six.moves.urllib import parse as urllib_parse -from pipenv.patched.notpip._vendor.six.moves.urllib import request as urllib_request -from pipenv.patched.notpip._internal.download import HAS_TLS, is_url, path_to_url, url_to_path from pipenv.patched.notpip._internal.exceptions import ( - BestVersionAlreadyInstalled, DistributionNotFound, InvalidWheelFilename, + BestVersionAlreadyInstalled, + DistributionNotFound, + InvalidWheelFilename, UnsupportedWheel, ) from pipenv.patched.notpip._internal.models.candidate import InstallationCandidate from pipenv.patched.notpip._internal.models.format_control import FormatControl -from pipenv.patched.notpip._internal.models.index import PyPI from pipenv.patched.notpip._internal.models.link import Link -from pipenv.patched.notpip._internal.pep425tags import get_supported -from pipenv.patched.notpip._internal.utils.compat import ipaddress +from pipenv.patched.notpip._internal.models.selection_prefs import SelectionPreferences +from pipenv.patched.notpip._internal.models.target_python import TargetPython +from pipenv.patched.notpip._internal.utils.filetypes import WHEEL_EXTENSION from pipenv.patched.notpip._internal.utils.logging import indent_log -from pipenv.patched.notpip._internal.utils.misc import ( - ARCHIVE_EXTENSIONS, SUPPORTED_EXTENSIONS, WHEEL_EXTENSION, normalize_path, - redact_password_from_url, -) +from pipenv.patched.notpip._internal.utils.misc import build_netloc from pipenv.patched.notpip._internal.utils.packaging import check_requires_python from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING +from pipenv.patched.notpip._internal.utils.unpacking import SUPPORTED_EXTENSIONS +from pipenv.patched.notpip._internal.utils.urls import url_to_path from pipenv.patched.notpip._internal.wheel import Wheel if MYPY_CHECK_RUNNING: - from logging import Logger # noqa: F401 - from typing import ( # noqa: F401 - Tuple, Optional, Any, List, Union, Callable, Set, Sequence, - Iterable, MutableMapping + from typing import ( + FrozenSet, Iterable, List, Optional, Set, Text, Tuple, Union, + ) + from pipenv.patched.notpip._vendor.packaging.version import _BaseVersion + from pipenv.patched.notpip._internal.collector import LinkCollector + from pipenv.patched.notpip._internal.models.search_scope import SearchScope + from pipenv.patched.notpip._internal.req import InstallRequirement + from pipenv.patched.notpip._internal.pep425tags import Pep425Tag + from pipenv.patched.notpip._internal.utils.hashes import Hashes + + BuildTag = Union[Tuple[()], Tuple[int, str]] + CandidateSortingKey = ( + Tuple[int, int, int, _BaseVersion, BuildTag, Optional[int]] ) - from pipenv.patched.notpip._vendor.packaging.version import _BaseVersion # noqa: F401 - from pipenv.patched.notpip._vendor.requests import Response # noqa: F401 - from pipenv.patched.notpip._internal.req import InstallRequirement # noqa: F401 - from pipenv.patched.notpip._internal.download import PipSession # noqa: F401 - - SecureOrigin = Tuple[str, str, Optional[str]] - BuildTag = Tuple[Any, ...] # either emply tuple or Tuple[int, str] - CandidateSortingKey = Tuple[int, _BaseVersion, BuildTag, Optional[int]] - -__all__ = ['FormatControl', 'PackageFinder'] -SECURE_ORIGINS = [ - # protocol, hostname, port - # Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC) - ("https", "*", "*"), - ("*", "localhost", "*"), - ("*", "127.0.0.0/8", "*"), - ("*", "::1/128", "*"), - ("file", "*", None), - # ssh is always secure. - ("ssh", "*", "*"), -] # type: List[SecureOrigin] +__all__ = ['FormatControl', 'BestCandidateResult', 'PackageFinder'] logger = logging.getLogger(__name__) -def _match_vcs_scheme(url): - # type: (str) -> Optional[str] - """Look for VCS schemes in the URL. - - Returns the matched VCS scheme, or None if there's no match. +def _check_link_requires_python( + link, # type: Link + version_info, # type: Tuple[int, int, int] + ignore_requires_python=False, # type: bool +): + # type: (...) -> bool """ - from pipenv.patched.notpip._internal.vcs import VcsSupport - for scheme in VcsSupport.schemes: - if url.lower().startswith(scheme) and url[len(scheme)] in '+:': - return scheme - return None + Return whether the given Python version is compatible with a link's + "Requires-Python" value. - -def _is_url_like_archive(url): - # type: (str) -> bool - """Return whether the URL looks like an archive. + :param version_info: A 3-tuple of ints representing the Python + major-minor-micro version to check. + :param ignore_requires_python: Whether to ignore the "Requires-Python" + value if the given Python version isn't compatible. """ - filename = Link(url).filename - for bad_ext in ARCHIVE_EXTENSIONS: - if filename.endswith(bad_ext): - return True - return False + try: + is_compatible = check_requires_python( + link.requires_python, version_info=version_info, + ) + except specifiers.InvalidSpecifier: + logger.debug( + "Ignoring invalid Requires-Python (%r) for link: %s", + link.requires_python, link, + ) + else: + if not is_compatible: + version = '.'.join(map(str, version_info)) + if not ignore_requires_python: + logger.debug( + 'Link requires a different Python (%s not in: %r): %s', + version, link.requires_python, link, + ) + return False + logger.debug( + 'Ignoring failed Requires-Python check (%s not in: %r) ' + 'for link: %s', + version, link.requires_python, link, + ) -class _NotHTML(Exception): - def __init__(self, content_type, request_desc): - # type: (str, str) -> None - super(_NotHTML, self).__init__(content_type, request_desc) - self.content_type = content_type - self.request_desc = request_desc + return True -def _ensure_html_header(response): - # type: (Response) -> None - """Check the Content-Type header to ensure the response contains HTML. +class LinkEvaluator(object): - Raises `_NotHTML` if the content type is not text/html. """ - content_type = response.headers.get("Content-Type", "") - if not content_type.lower().startswith("text/html"): - raise _NotHTML(content_type, response.request.method) - - -class _NotHTTP(Exception): - pass + Responsible for evaluating links for a particular project. + """ + _py_version_re = re.compile(r'-py([123]\.?[0-9]?)$') -def _ensure_html_response(url, session): - # type: (str, PipSession) -> None - """Send a HEAD request to the URL, and ensure the response contains HTML. + # Don't include an allow_yanked default value to make sure each call + # site considers whether yanked releases are allowed. This also causes + # that decision to be made explicit in the calling code, which helps + # people when reading the code. + def __init__( + self, + project_name, # type: str + canonical_name, # type: str + formats, # type: FrozenSet + target_python, # type: TargetPython + allow_yanked, # type: bool + ignore_requires_python=None, # type: Optional[bool] + ignore_compatibility=None, # type: Optional[bool] + ): + # type: (...) -> None + """ + :param project_name: The user supplied package name. + :param canonical_name: The canonical package name. + :param formats: The formats allowed for this package. Should be a set + with 'binary' or 'source' or both in it. + :param target_python: The target Python interpreter to use when + evaluating link compatibility. This is used, for example, to + check wheel compatibility, as well as when checking the Python + version, e.g. the Python version embedded in a link filename + (or egg fragment) and against an HTML link's optional PEP 503 + "data-requires-python" attribute. + :param allow_yanked: Whether files marked as yanked (in the sense + of PEP 592) are permitted to be candidates for install. + :param ignore_requires_python: Whether to ignore incompatible + PEP 503 "data-requires-python" values in HTML links. Defaults + to False. + :param Optional[bool] ignore_compatibility: Whether to ignore + compatibility of python versions and allow all versions of packages. + """ + if ignore_requires_python is None: + ignore_requires_python = False + if ignore_compatibility is None: + ignore_compatibility = True + + self._allow_yanked = allow_yanked + self._canonical_name = canonical_name + self._ignore_requires_python = ignore_requires_python + self._formats = formats + self._target_python = target_python + self._ignore_compatibility = ignore_compatibility + + self.project_name = project_name + + def evaluate_link(self, link): + # type: (Link) -> Tuple[bool, Optional[Text]] + """ + Determine whether a link is a candidate for installation. - Raises `_NotHTTP` if the URL is not available for a HEAD request, or - `_NotHTML` if the content type is not text/html. - """ - scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url) - if scheme not in {'http', 'https'}: - raise _NotHTTP() + :return: A tuple (is_candidate, result), where `result` is (1) a + version string if `is_candidate` is True, and (2) if + `is_candidate` is False, an optional string to log the reason + the link fails to qualify. + """ + version = None + if link.is_yanked and not self._allow_yanked: + reason = link.yanked_reason or '' + # Mark this as a unicode string to prevent "UnicodeEncodeError: + # 'ascii' codec can't encode character" in Python 2 when + # the reason contains non-ascii characters. + return (False, u'yanked for reason: {}'.format(reason)) - resp = session.head(url, allow_redirects=True) - resp.raise_for_status() + if link.egg_fragment: + egg_info = link.egg_fragment + ext = link.ext + else: + egg_info, ext = link.splitext() + if not ext: + return (False, 'not a file') + if ext not in SUPPORTED_EXTENSIONS: + return (False, 'unsupported archive format: %s' % ext) + if "binary" not in self._formats and ext == WHEEL_EXTENSION and not self._ignore_compatibility: + reason = 'No binaries permitted for %s' % self.project_name + return (False, reason) + if "macosx10" in link.path and ext == '.zip' and not self._ignore_compatibility: + return (False, 'macosx10 one') + if ext == WHEEL_EXTENSION: + try: + wheel = Wheel(link.filename) + except InvalidWheelFilename: + return (False, 'invalid wheel filename') + if canonicalize_name(wheel.name) != self._canonical_name: + reason = 'wrong project name (not %s)' % self.project_name + return (False, reason) + + supported_tags = self._target_python.get_tags() + if not wheel.supported(supported_tags) and not self._ignore_compatibility: + # Include the wheel's tags in the reason string to + # simplify troubleshooting compatibility issues. + file_tags = wheel.get_formatted_file_tags() + reason = ( + "none of the wheel's tags match: {}".format( + ', '.join(file_tags) + ) + ) + return (False, reason) - _ensure_html_header(resp) + version = wheel.version + # This should be up by the self.ok_binary check, but see issue 2700. + if "source" not in self._formats and ext != WHEEL_EXTENSION: + return (False, 'No sources permitted for %s' % self.project_name) -def _get_html_response(url, session): - # type: (str, PipSession) -> Response - """Access an HTML page with GET, and return the response. + if not version: + version = _extract_version_from_fragment( + egg_info, self._canonical_name, + ) + if not version: + return ( + False, 'Missing project version for %s' % self.project_name, + ) - This consists of three parts: + match = self._py_version_re.search(version) + if match: + version = version[:match.start()] + py_version = match.group(1) + if py_version != self._target_python.py_version: + return (False, 'Python version is incorrect') - 1. If the URL looks suspiciously like an archive, send a HEAD first to - check the Content-Type is HTML, to avoid downloading a large file. - Raise `_NotHTTP` if the content type cannot be determined, or - `_NotHTML` if it is not HTML. - 2. Actually perform the request. Raise HTTP exceptions on network failures. - 3. Check the Content-Type header to make sure we got HTML, and raise - `_NotHTML` otherwise. - """ - if _is_url_like_archive(url): - _ensure_html_response(url, session=session) - - logger.debug('Getting page %s', url) - - resp = session.get( - url, - headers={ - "Accept": "text/html", - # We don't want to blindly returned cached data for - # /simple/, because authors generally expecting that - # twine upload && pip install will function, but if - # they've done a pip install in the last ~10 minutes - # it won't. Thus by setting this to zero we will not - # blindly use any cached data, however the benefit of - # using max-age=0 instead of no-cache, is that we will - # still support conditional requests, so we will still - # minimize traffic sent in cases where the page hasn't - # changed at all, we will just always incur the round - # trip for the conditional GET now instead of only - # once per 10 minutes. - # For more information, please see pypa/pip#5670. - "Cache-Control": "max-age=0", - }, - ) - resp.raise_for_status() + supports_python = _check_link_requires_python( + link, version_info=self._target_python.py_version_info, + ignore_requires_python=self._ignore_requires_python, + ) + if not supports_python and not self._ignore_compatibility: + # Return None for the reason text to suppress calling + # _log_skipped_link(). + return (False, None) - # The check for archives above only works if the url ends with - # something that looks like an archive. However that is not a - # requirement of an url. Unless we issue a HEAD request on every - # url we cannot know ahead of time for sure if something is HTML - # or not. However we can check after we've downloaded it. - _ensure_html_header(resp) + logger.debug('Found link %s, version: %s', link, version) - return resp + return (True, version) -def _handle_get_page_fail( - link, # type: Link - reason, # type: Union[str, Exception] - meth=None # type: Optional[Callable[..., None]] +def filter_unallowed_hashes( + candidates, # type: List[InstallationCandidate] + hashes, # type: Hashes + project_name, # type: str ): - # type: (...) -> None - if meth is None: - meth = logger.debug - meth("Could not fetch URL %s: %s - skipping", link, reason) - - -def _get_html_page(link, session=None): - # type: (Link, Optional[PipSession]) -> Optional[HTMLPage] - if session is None: - raise TypeError( - "_get_html_page() missing 1 required keyword argument: 'session'" + # type: (...) -> List[InstallationCandidate] + """ + Filter out candidates whose hashes aren't allowed, and return a new + list of candidates. + + If at least one candidate has an allowed hash, then all candidates with + either an allowed hash or no hash specified are returned. Otherwise, + the given candidates are returned. + + Including the candidates with no hash specified when there is a match + allows a warning to be logged if there is a more preferred candidate + with no hash specified. Returning all candidates in the case of no + matches lets pip report the hash of the candidate that would otherwise + have been installed (e.g. permitting the user to more easily update + their requirements file with the desired hash). + """ + if not hashes: + logger.debug( + 'Given no hashes to check %s links for project %r: ' + 'discarding no candidates', + len(candidates), + project_name, ) + # Make sure we're not returning back the given value. + return list(candidates) + + matches_or_no_digest = [] + # Collect the non-matches for logging purposes. + non_matches = [] + match_count = 0 + for candidate in candidates: + link = candidate.link + if not link.has_hash: + pass + elif link.is_hash_allowed(hashes=hashes): + match_count += 1 + else: + non_matches.append(candidate) + continue - url = link.url.split('#', 1)[0] - - # Check for VCS schemes that do not support lookup as web pages. - vcs_scheme = _match_vcs_scheme(url) - if vcs_scheme: - logger.debug('Cannot look at %s URL %s', vcs_scheme, link) - return None + matches_or_no_digest.append(candidate) - # Tack index.html onto file:// URLs that point to directories - scheme, _, path, _, _, _ = urllib_parse.urlparse(url) - if (scheme == 'file' and os.path.isdir(urllib_request.url2pathname(path))): - # add trailing slash if not present so urljoin doesn't trim - # final segment - if not url.endswith('/'): - url += '/' - url = urllib_parse.urljoin(url, 'index.html') - logger.debug(' file: URL is directory, getting %s', url) + if match_count: + filtered = matches_or_no_digest + else: + # Make sure we're not returning back the given value. + filtered = list(candidates) - try: - resp = _get_html_response(url, session=session) - except _NotHTTP as exc: - logger.debug( - 'Skipping page %s because it looks like an archive, and cannot ' - 'be checked by HEAD.', link, - ) - except _NotHTML as exc: - logger.debug( - 'Skipping page %s because the %s request got Content-Type: %s', - link, exc.request_desc, exc.content_type, - ) - except requests.HTTPError as exc: - _handle_get_page_fail(link, exc) - except RetryError as exc: - _handle_get_page_fail(link, exc) - except SSLError as exc: - reason = "There was a problem confirming the ssl certificate: " - reason += str(exc) - _handle_get_page_fail(link, reason, meth=logger.info) - except requests.ConnectionError as exc: - _handle_get_page_fail(link, "connection error: %s" % exc) - except requests.Timeout: - _handle_get_page_fail(link, "timed out") + if len(filtered) == len(candidates): + discard_message = 'discarding no candidates' else: - return HTMLPage(resp.content, resp.url, resp.headers) - return None + discard_message = 'discarding {} non-matches:\n {}'.format( + len(non_matches), + '\n '.join(str(candidate.link) for candidate in non_matches) + ) + logger.debug( + 'Checked %s links for project %r against %s hashes ' + '(%s matches, %s no digest): %s', + len(candidates), + project_name, + hashes.digest_count, + match_count, + len(matches_or_no_digest) - match_count, + discard_message + ) -class PackageFinder(object): - """This finds packages. + return filtered - This is meant to match easy_install's technique for looking for - packages, by reading pages and looking for appropriate links. + +class CandidatePreferences(object): + + """ + Encapsulates some of the preferences for filtering and sorting + InstallationCandidate objects. """ def __init__( self, - find_links, # type: List[str] - index_urls, # type: List[str] + prefer_binary=False, # type: bool allow_all_prereleases=False, # type: bool - trusted_hosts=None, # type: Optional[Iterable[str]] - session=None, # type: Optional[PipSession] - format_control=None, # type: Optional[FormatControl] - platform=None, # type: Optional[str] - versions=None, # type: Optional[List[str]] - abi=None, # type: Optional[str] - implementation=None, # type: Optional[str] - prefer_binary=False # type: bool ): # type: (...) -> None - """Create a PackageFinder. - - :param format_control: A FormatControl object or None. Used to control - the selection of source packages / binary packages when consulting - the index and links. - :param platform: A string or None. If None, searches for packages - that are supported by the current system. Otherwise, will find - packages that can be built on the platform passed in. These - packages will only be downloaded for distribution: they will - not be built locally. - :param versions: A list of strings or None. This is passed directly - to pep425tags.py in the get_supported() method. - :param abi: A string or None. This is passed directly - to pep425tags.py in the get_supported() method. - :param implementation: A string or None. This is passed directly - to pep425tags.py in the get_supported() method. - """ - if session is None: - raise TypeError( - "PackageFinder() missing 1 required keyword argument: " - "'session'" - ) - - # Build find_links. If an argument starts with ~, it may be - # a local file relative to a home directory. So try normalizing - # it and if it exists, use the normalized version. - # This is deliberately conservative - it might be fine just to - # blindly normalize anything starting with a ~... - self.find_links = [] # type: List[str] - for link in find_links: - if link.startswith('~'): - new_link = normalize_path(link) - if os.path.exists(new_link): - link = new_link - self.find_links.append(link) - - self.index_urls = index_urls - - # These are boring links that have already been logged somehow: - self.logged_links = set() # type: Set[Link] - - self.format_control = format_control or FormatControl(set(), set()) - - # Domains that we won't emit warnings for when not using HTTPS - self.secure_origins = [ - ("*", host, "*") - for host in (trusted_hosts if trusted_hosts else []) - ] # type: List[SecureOrigin] - - # Do we want to allow _all_ pre-releases? + """ + :param allow_all_prereleases: Whether to allow all pre-releases. + """ self.allow_all_prereleases = allow_all_prereleases + self.prefer_binary = prefer_binary - # The Session we'll use to make requests - self.session = session - - # Kenneth's Hack - self.extra = None - # The valid tags to check potential found wheel candidates against - self.valid_tags = get_supported( - versions=versions, - platform=platform, - abi=abi, - impl=implementation, - ) +class BestCandidateResult(object): + """A collection of candidates, returned by `PackageFinder.find_best_candidate`. - # Do we prefer old, but valid, binary dist over new source dist - self.prefer_binary = prefer_binary + This class is only intended to be instantiated by CandidateEvaluator's + `compute_best_candidate()` method. + """ - # If we don't have TLS enabled, then WARN if anyplace we're looking - # relies on TLS. - if not HAS_TLS: - for link in itertools.chain(self.index_urls, self.find_links): - parsed = urllib_parse.urlparse(link) - if parsed.scheme == "https": - logger.warning( - "pip is configured with locations that require " - "TLS/SSL, however the ssl module in Python is not " - "available." - ) - break - - def get_formatted_locations(self): - # type: () -> str - lines = [] - if self.index_urls and self.index_urls != [PyPI.simple_url]: - lines.append( - "Looking in indexes: {}".format(", ".join( - redact_password_from_url(url) for url in self.index_urls)) - ) - if self.find_links: - lines.append( - "Looking in links: {}".format(", ".join(self.find_links)) - ) - return "\n".join(lines) + def __init__( + self, + candidates, # type: List[InstallationCandidate] + applicable_candidates, # type: List[InstallationCandidate] + best_candidate, # type: Optional[InstallationCandidate] + ): + # type: (...) -> None + """ + :param candidates: A sequence of all available candidates found. + :param applicable_candidates: The applicable candidates. + :param best_candidate: The most preferred candidate found, or None + if no applicable candidates were found. + """ + assert set(applicable_candidates) <= set(candidates) - @staticmethod - def get_extras_links(links): - requires = [] - extras = {} + if best_candidate is None: + assert not applicable_candidates + else: + assert best_candidate in applicable_candidates - current_list = requires + self._applicable_candidates = applicable_candidates + self._candidates = candidates - for link in links: - if not link: - current_list = requires - if link.startswith('['): - current_list = [] - extras[link[1:-1]] = current_list - else: - current_list.append(link) - return extras + self.best_candidate = best_candidate - @staticmethod - def _sort_locations(locations, expand_dir=False): - # type: (Sequence[str], bool) -> Tuple[List[str], List[str]] + def iter_all(self): + # type: () -> Iterable[InstallationCandidate] + """Iterate through all candidates. """ - Sort locations into "files" (archives) and "urls", and return - a pair of lists (files,urls) + return iter(self._candidates) + + def iter_applicable(self): + # type: () -> Iterable[InstallationCandidate] + """Iterate through the applicable candidates. """ - files = [] - urls = [] + return iter(self._applicable_candidates) - # puts the url for the given file path into the appropriate list - def sort_path(path): - url = path_to_url(path) - if mimetypes.guess_type(url, strict=False)[0] == 'text/html': - urls.append(url) - else: - files.append(url) - for url in locations: +class CandidateEvaluator(object): - is_local_path = os.path.exists(url) - is_file_url = url.startswith('file:') + """ + Responsible for filtering and sorting candidates for installation based + on what tags are valid. + """ - if is_local_path or is_file_url: - if is_local_path: - path = url - else: - path = url_to_path(url) - if os.path.isdir(path): - if expand_dir: - path = os.path.realpath(path) - for item in os.listdir(path): - sort_path(os.path.join(path, item)) - elif is_file_url: - urls.append(url) - else: - logger.warning( - "Path '{0}' is ignored: " - "it is a directory.".format(path), - ) - elif os.path.isfile(path): - sort_path(path) - else: - logger.warning( - "Url '%s' is ignored: it is neither a file " - "nor a directory.", url, - ) - elif is_url(url): - # Only add url with clear scheme - urls.append(url) - else: - logger.warning( - "Url '%s' is ignored. It is either a non-existing " - "path or lacks a specific scheme.", url, - ) + @classmethod + def create( + cls, + project_name, # type: str + target_python=None, # type: Optional[TargetPython] + prefer_binary=False, # type: bool + allow_all_prereleases=False, # type: bool + specifier=None, # type: Optional[specifiers.BaseSpecifier] + hashes=None, # type: Optional[Hashes] + ): + # type: (...) -> CandidateEvaluator + """Create a CandidateEvaluator object. + + :param target_python: The target Python interpreter to use when + checking compatibility. If None (the default), a TargetPython + object will be constructed from the running Python. + :param specifier: An optional object implementing `filter` + (e.g. `packaging.specifiers.SpecifierSet`) to filter applicable + versions. + :param hashes: An optional collection of allowed hashes. + """ + if target_python is None: + target_python = TargetPython() + if specifier is None: + specifier = specifiers.SpecifierSet() + + supported_tags = target_python.get_tags() + + return cls( + project_name=project_name, + supported_tags=supported_tags, + specifier=specifier, + prefer_binary=prefer_binary, + allow_all_prereleases=allow_all_prereleases, + hashes=hashes, + ) + + def __init__( + self, + project_name, # type: str + supported_tags, # type: List[Pep425Tag] + specifier, # type: specifiers.BaseSpecifier + prefer_binary=False, # type: bool + allow_all_prereleases=False, # type: bool + hashes=None, # type: Optional[Hashes] + ): + # type: (...) -> None + """ + :param supported_tags: The PEP 425 tags supported by the target + Python in order of preference (most preferred first). + """ + self._allow_all_prereleases = allow_all_prereleases + self._hashes = hashes + self._prefer_binary = prefer_binary + self._project_name = project_name + self._specifier = specifier + self._supported_tags = supported_tags + + def get_applicable_candidates( + self, + candidates, # type: List[InstallationCandidate] + ): + # type: (...) -> List[InstallationCandidate] + """ + Return the applicable candidates from a list of candidates. + """ + # Using None infers from the specifier instead. + allow_prereleases = self._allow_all_prereleases or None + specifier = self._specifier + versions = { + str(v) for v in specifier.filter( + # We turn the version object into a str here because otherwise + # when we're debundled but setuptools isn't, Python will see + # packaging.version.Version and + # pkg_resources._vendor.packaging.version.Version as different + # types. This way we'll use a str as a common data interchange + # format. If we stop using the pkg_resources provided specifier + # and start using our own, we can drop the cast to str(). + (str(c.version) for c in candidates), + prereleases=allow_prereleases, + ) + } + + # Again, converting version to str to deal with debundling. + applicable_candidates = [ + c for c in candidates if str(c.version) in versions + ] - return files, urls + return filter_unallowed_hashes( + candidates=applicable_candidates, + hashes=self._hashes, + project_name=self._project_name, + ) - def _candidate_sort_key(self, candidate, ignore_compatibility=True): + def _sort_key(self, candidate, ignore_compatibility=True): # type: (InstallationCandidate, bool) -> CandidateSortingKey """ - Function used to generate link sort key for link tuples. - The greater the return value, the more preferred it is. - If not finding wheels, then sorted by version only. + Function to pass as the `key` argument to a call to sorted() to sort + InstallationCandidates by preference. + + Returns a tuple such that tuples sorting as greater using Python's + default comparison operator are more preferred. + + The preference is as follows: + + First and foremost, candidates with allowed (matching) hashes are + always preferred over candidates without matching hashes. This is + because e.g. if the only candidate with an allowed hash is yanked, + we still want to use that candidate. + + Second, excepting hash considerations, candidates that have been + yanked (in the sense of PEP 592) are always less preferred than + candidates that haven't been yanked. Then: + + If not finding wheels, they are sorted by version only. If finding wheels, then the sort order is by version, then: 1. existing installs - 2. wheels ordered via Wheel.support_index_min(self.valid_tags) + 2. wheels ordered via Wheel.support_index_min(self._supported_tags) 3. source archives If prefer_binary was set, then all wheels are sorted above sources. + Note: it was considered to embed this logic into the Link comparison operators, but then different sdist links with the same version, would have to be considered equal """ - support_num = len(self.valid_tags) - build_tag = tuple() # type: BuildTag + valid_tags = self._supported_tags + support_num = len(valid_tags) + build_tag = () # type: BuildTag binary_preference = 0 - if candidate.location.is_wheel: + link = candidate.link + if link.is_wheel: # can raise InvalidWheelFilename - wheel = Wheel(candidate.location.filename) - if not wheel.supported(self.valid_tags) and not ignore_compatibility: + wheel = Wheel(link.filename) + if not wheel.supported(valid_tags) and not ignore_compatibility: raise UnsupportedWheel( "%s is not a supported wheel for this platform. It " "can't be sorted." % wheel.filename ) - if self.prefer_binary: + if self._prefer_binary: binary_preference = 1 tags = self.valid_tags if not ignore_compatibility else None try: @@ -486,168 +542,317 @@ def _candidate_sort_key(self, candidate, ignore_compatibility=True): build_tag = (int(build_tag_groups[0]), build_tag_groups[1]) else: # sdist pri = -(support_num) - return (binary_preference, candidate.version, build_tag, pri) - - def _validate_secure_origin(self, logger, location): - # type: (Logger, Link) -> bool - # Determine if this url used a secure transport mechanism - parsed = urllib_parse.urlparse(str(location)) - origin = (parsed.scheme, parsed.hostname, parsed.port) - - # The protocol to use to see if the protocol matches. - # Don't count the repository type as part of the protocol: in - # cases such as "git+ssh", only use "ssh". (I.e., Only verify against - # the last scheme.) - protocol = origin[0].rsplit('+', 1)[-1] - - # Determine if our origin is a secure origin by looking through our - # hardcoded list of secure origins, as well as any additional ones - # configured on this PackageFinder instance. - for secure_origin in (SECURE_ORIGINS + self.secure_origins): - if protocol != secure_origin[0] and secure_origin[0] != "*": - continue + has_allowed_hash = int(link.is_hash_allowed(self._hashes)) + yank_value = -1 * int(link.is_yanked) # -1 for yanked. + return ( + has_allowed_hash, yank_value, binary_preference, candidate.version, + build_tag, pri, + ) - try: - # We need to do this decode dance to ensure that we have a - # unicode object, even on Python 2.x. - addr = ipaddress.ip_address( - origin[1] - if ( - isinstance(origin[1], six.text_type) or - origin[1] is None - ) - else origin[1].decode("utf8") - ) - network = ipaddress.ip_network( - secure_origin[1] - if isinstance(secure_origin[1], six.text_type) - # setting secure_origin[1] to proper Union[bytes, str] - # creates problems in other places - else secure_origin[1].decode("utf8") # type: ignore - ) - except ValueError: - # We don't have both a valid address or a valid network, so - # we'll check this origin against hostnames. - if (origin[1] and - origin[1].lower() != secure_origin[1].lower() and - secure_origin[1] != "*"): - continue - else: - # We have a valid address and network, so see if the address - # is contained within the network. - if addr not in network: - continue - - # Check to see if the port patches - if (origin[2] != secure_origin[2] and - secure_origin[2] != "*" and - secure_origin[2] is not None): - continue - - # If we've gotten here, then this origin matches the current - # secure origin and we should return True - return True - - # If we've gotten to this point, then the origin isn't secure and we - # will not accept it as a valid location to search. We will however - # log a warning that we are ignoring it. - logger.warning( - "The repository located at %s is not a trusted or secure host and " - "is being ignored. If this repository is available via HTTPS we " - "recommend you use HTTPS instead, otherwise you may silence " - "this warning and allow it anyway with '--trusted-host %s'.", - parsed.hostname, - parsed.hostname, + def sort_best_candidate( + self, + candidates, # type: List[InstallationCandidate] + ): + # type: (...) -> Optional[InstallationCandidate] + """ + Return the best candidate per the instance's sort order, or None if + no candidate is acceptable. + """ + if not candidates: + return None + + best_candidate = max(candidates, key=self._sort_key) + + # Log a warning per PEP 592 if necessary before returning. + link = best_candidate.link + if link.is_yanked: + reason = link.yanked_reason or '' + msg = ( + # Mark this as a unicode string to prevent + # "UnicodeEncodeError: 'ascii' codec can't encode character" + # in Python 2 when the reason contains non-ascii characters. + u'The candidate selected for download or install is a ' + 'yanked version: {candidate}\n' + 'Reason for being yanked: {reason}' + ).format(candidate=best_candidate, reason=reason) + logger.warning(msg) + + return best_candidate + + def compute_best_candidate( + self, + candidates, # type: List[InstallationCandidate] + ): + # type: (...) -> BestCandidateResult + """ + Compute and return a `BestCandidateResult` instance. + """ + applicable_candidates = self.get_applicable_candidates(candidates) + + best_candidate = self.sort_best_candidate(applicable_candidates) + + return BestCandidateResult( + candidates, + applicable_candidates=applicable_candidates, + best_candidate=best_candidate, ) - return False - def _get_index_urls_locations(self, project_name): - # type: (str) -> List[str] - """Returns the locations found via self.index_urls +class PackageFinder(object): + """This finds packages. - Checks the url_name on the main (first in the list) index and - use this url_name to produce all locations + This is meant to match easy_install's technique for looking for + packages, by reading pages and looking for appropriate links. + """ + + def __init__( + self, + link_collector, # type: LinkCollector + target_python, # type: TargetPython + allow_yanked, # type: bool + format_control=None, # type: Optional[FormatControl] + candidate_prefs=None, # type: CandidatePreferences + ignore_requires_python=None, # type: Optional[bool] + ignore_compatibility=None, # type: Optional[bool] + ): + # type: (...) -> None """ + This constructor is primarily meant to be used by the create() class + method and from tests. - def mkurl_pypi_url(url): - loc = posixpath.join( - url, - urllib_parse.quote(canonicalize_name(project_name))) - # For maximum compatibility with easy_install, ensure the path - # ends in a trailing slash. Although this isn't in the spec - # (and PyPI can handle it without the slash) some other index - # implementations might break if they relied on easy_install's - # behavior. - if not loc.endswith('/'): - loc = loc + '/' - return loc + :param format_control: A FormatControl object, used to control + the selection of source packages / binary packages when consulting + the index and links. + :param candidate_prefs: Options to use when creating a + CandidateEvaluator object. + """ + if candidate_prefs is None: + candidate_prefs = CandidatePreferences() + if ignore_compatibility is None: + ignore_compatibility = False - return [mkurl_pypi_url(url) for url in self.index_urls] + format_control = format_control or FormatControl(set(), set()) - def find_all_candidates(self, project_name): - # type: (str) -> List[Optional[InstallationCandidate]] - """Find all available InstallationCandidate for project_name + self._allow_yanked = allow_yanked + self._candidate_prefs = candidate_prefs + self._ignore_requires_python = ignore_requires_python + self._link_collector = link_collector + self._target_python = target_python + self._ignore_compatibility = ignore_compatibility - This checks index_urls and find_links. - All versions found are returned as an InstallationCandidate list. + self.format_control = format_control + + # These are boring links that have already been logged somehow. + self._logged_links = set() # type: Set[Link] + + # Kenneth's Hack + self.extra = None + + # Don't include an allow_yanked default value to make sure each call + # site considers whether yanked releases are allowed. This also causes + # that decision to be made explicit in the calling code, which helps + # people when reading the code. + @classmethod + def create( + cls, + link_collector, # type: LinkCollector + selection_prefs, # type: SelectionPreferences + target_python=None, # type: Optional[TargetPython] + ): + # type: (...) -> PackageFinder + """Create a PackageFinder. - See _link_package_versions for details on which files are accepted + :param selection_prefs: The candidate selection preferences, as a + SelectionPreferences object. + :param target_python: The target Python interpreter to use when + checking compatibility. If None (the default), a TargetPython + object will be constructed from the running Python. """ - index_locations = self._get_index_urls_locations(project_name) - index_file_loc, index_url_loc = self._sort_locations(index_locations) - fl_file_loc, fl_url_loc = self._sort_locations( - self.find_links, expand_dir=True, + if target_python is None: + target_python = TargetPython() + + candidate_prefs = CandidatePreferences( + prefer_binary=selection_prefs.prefer_binary, + allow_all_prereleases=selection_prefs.allow_all_prereleases, ) - file_locations = (Link(url) for url in itertools.chain( - index_file_loc, fl_file_loc, - )) - - # We trust every url that the user has given us whether it was given - # via --index-url or --find-links. - # We want to filter out any thing which does not have a secure origin. - url_locations = [ - link for link in itertools.chain( - (Link(url) for url in index_url_loc), - (Link(url) for url in fl_url_loc), - ) - if self._validate_secure_origin(logger, link) - ] + return cls( + candidate_prefs=candidate_prefs, + link_collector=link_collector, + target_python=target_python, + allow_yanked=selection_prefs.allow_yanked, + format_control=selection_prefs.format_control, + ignore_requires_python=selection_prefs.ignore_requires_python, + ) - logger.debug('%d location(s) to search for versions of %s:', - len(url_locations), project_name) + @staticmethod + def get_extras_links(links): + requires = [] + extras = {} + + current_list = requires - for location in url_locations: - logger.debug('* %s', location) + for link in links: + if not link: + current_list = requires + if link.startswith('['): + current_list = [] + extras[link[1:-1]] = current_list + else: + current_list.append(link) + return extras + @property + def search_scope(self): + # type: () -> SearchScope + return self._link_collector.search_scope + + @search_scope.setter + def search_scope(self, search_scope): + # type: (SearchScope) -> None + self._link_collector.search_scope = search_scope + + @property + def find_links(self): + # type: () -> List[str] + return self._link_collector.find_links + + @property + def index_urls(self): + # type: () -> List[str] + return self.search_scope.index_urls + + @property + def trusted_hosts(self): + # type: () -> Iterable[str] + for host_port in self._link_collector.session.pip_trusted_origins: + yield build_netloc(*host_port) + + @property + def allow_all_prereleases(self): + # type: () -> bool + return self._candidate_prefs.allow_all_prereleases + + def set_allow_all_prereleases(self): + # type: () -> None + self._candidate_prefs.allow_all_prereleases = True + + def make_link_evaluator(self, project_name): + # type: (str) -> LinkEvaluator canonical_name = canonicalize_name(project_name) formats = self.format_control.get_allowed_formats(canonical_name) - search = Search(project_name, canonical_name, formats) - find_links_versions = self._package_versions( - # We trust every directly linked archive in find_links - (Link(url, '-f') for url in self.find_links), - search + + return LinkEvaluator( + project_name=project_name, + canonical_name=canonical_name, + formats=formats, + target_python=self._target_python, + allow_yanked=self._allow_yanked, + ignore_requires_python=self._ignore_requires_python, + ignore_compatibility=self._ignore_compatibility + ) + + def _sort_links(self, links): + # type: (Iterable[Link]) -> List[Link] + """ + Returns elements of links in order, non-egg links first, egg links + second, while eliminating duplicates + """ + eggs, no_eggs = [], [] + seen = set() # type: Set[Link] + for link in links: + if link not in seen: + seen.add(link) + if link.egg_fragment: + eggs.append(link) + else: + no_eggs.append(link) + return no_eggs + eggs + + def _log_skipped_link(self, link, reason): + # type: (Link, Text) -> None + if link not in self._logged_links: + # Mark this as a unicode string to prevent "UnicodeEncodeError: + # 'ascii' codec can't encode character" in Python 2 when + # the reason contains non-ascii characters. + # Also, put the link at the end so the reason is more visible + # and because the link string is usually very long. + logger.debug(u'Skipping link: %s: %s', reason, link) + self._logged_links.add(link) + + def get_install_candidate(self, link_evaluator, link): + # type: (LinkEvaluator, Link) -> Optional[InstallationCandidate] + """ + If the link is a candidate for install, convert it to an + InstallationCandidate and return it. Otherwise, return None. + """ + is_candidate, result = link_evaluator.evaluate_link(link) + if not is_candidate: + if result: + self._log_skipped_link(link, reason=result) + return None + + return InstallationCandidate( + project=link_evaluator.project_name, + link=link, + # Convert the Text result to str since InstallationCandidate + # accepts str. + version=str(result), + requires_python=getattr(link, "requires_python", None) + ) + + def evaluate_links(self, link_evaluator, links): + # type: (LinkEvaluator, Iterable[Link]) -> List[InstallationCandidate] + """ + Convert links that are candidates to InstallationCandidate objects. + """ + candidates = [] + for link in self._sort_links(links): + candidate = self.get_install_candidate(link_evaluator, link) + if candidate is not None: + candidates.append(candidate) + + return candidates + + def find_all_candidates(self, project_name): + # type: (str) -> List[InstallationCandidate] + """Find all available InstallationCandidate for project_name + + This checks index_urls and find_links. + All versions found are returned as an InstallationCandidate list. + + See LinkEvaluator.evaluate_link() for details on which files + are accepted. + """ + collected_links = self._link_collector.collect_links(project_name) + + link_evaluator = self.make_link_evaluator(project_name) + + find_links_versions = self.evaluate_links( + link_evaluator, + links=collected_links.find_links, ) page_versions = [] - for page in self._get_pages(url_locations, project_name): - try: - logger.debug('Analyzing links from page %s', page.url) - except AttributeError: - continue + for page_url, page_links in collected_links.pages.items(): + logger.debug('Analyzing links from page %s', page_url) with indent_log(): - page_versions.extend( - self._package_versions(page.iter_links(), search) + new_versions = self.evaluate_links( + link_evaluator, + links=page_links, ) + page_versions.extend(new_versions) - file_versions = self._package_versions(file_locations, search) + file_versions = self.evaluate_links( + link_evaluator, + links=collected_links.files, + ) if file_versions: file_versions.sort(reverse=True) logger.debug( 'Local files found: %s', ', '.join([ - url_to_path(candidate.location.url) + url_to_path(candidate.link.url) for candidate in file_versions ]) ) @@ -655,60 +860,82 @@ def find_all_candidates(self, project_name): # This is an intentional priority ordering return file_versions + find_links_versions + page_versions - def find_requirement(self, req, upgrade, ignore_compatibility=False): - # type: (InstallRequirement, bool, bool) -> Optional[Link] + def make_candidate_evaluator( + self, + project_name, # type: str + specifier=None, # type: Optional[specifiers.BaseSpecifier] + hashes=None, # type: Optional[Hashes] + ): + # type: (...) -> CandidateEvaluator + """Create a CandidateEvaluator object to use. + """ + candidate_prefs = self._candidate_prefs + return CandidateEvaluator.create( + project_name=project_name, + target_python=self._target_python, + prefer_binary=candidate_prefs.prefer_binary, + allow_all_prereleases=candidate_prefs.allow_all_prereleases, + specifier=specifier, + hashes=hashes, + ) + + def find_best_candidate( + self, + project_name, # type: str + specifier=None, # type: Optional[specifiers.BaseSpecifier] + hashes=None, # type: Optional[Hashes] + ): + # type: (...) -> BestCandidateResult + """Find matches for the given project and specifier. + + :param specifier: An optional object implementing `filter` + (e.g. `packaging.specifiers.SpecifierSet`) to filter applicable + versions. + + :return: A `BestCandidateResult` instance. + """ + candidates = self.find_all_candidates(project_name) + candidate_evaluator = self.make_candidate_evaluator( + project_name=project_name, + specifier=specifier, + hashes=hashes, + ) + return candidate_evaluator.compute_best_candidate(candidates) + + def find_requirement(self, req, upgrade): + # type: (InstallRequirement, bool) -> Optional[Link] """Try to find a Link matching req Expects req, an InstallRequirement and upgrade, a boolean Returns a Link if found, Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise """ - all_candidates = self.find_all_candidates(req.name) - - # Filter out anything which doesn't match our specifier - compatible_versions = set( - req.specifier.filter( - # We turn the version object into a str here because otherwise - # when we're debundled but setuptools isn't, Python will see - # packaging.version.Version and - # pkg_resources._vendor.packaging.version.Version as different - # types. This way we'll use a str as a common data interchange - # format. If we stop using the pkg_resources provided specifier - # and start using our own, we can drop the cast to str(). - [str(c.version) for c in all_candidates], - prereleases=( - self.allow_all_prereleases - if self.allow_all_prereleases else None - ), - ) + hashes = req.hashes(trust_internet=False) + best_candidate_result = self.find_best_candidate( + req.name, specifier=req.specifier, hashes=hashes, ) - applicable_candidates = [ - # Again, converting to str to deal with debundling. - c for c in all_candidates if str(c.version) in compatible_versions - ] - - if applicable_candidates: - best_candidate = max(applicable_candidates, - key=self._candidate_sort_key) - else: - best_candidate = None + best_candidate = best_candidate_result.best_candidate + installed_version = None # type: Optional[_BaseVersion] if req.satisfied_by is not None: installed_version = parse_version(req.satisfied_by.version) - else: - installed_version = None + + def _format_versions(cand_iter): + # This repeated parse_version and str() conversion is needed to + # handle different vendoring sources from pipenv.patched.notpip and pkg_resources. + # If we stop using the pkg_resources provided specifier and start + # using our own, we can drop the cast to str(). + return ", ".join(sorted( + {str(c.version) for c in cand_iter}, + key=parse_version, + )) or "none" if installed_version is None and best_candidate is None: logger.critical( 'Could not find a version that satisfies the requirement %s ' '(from versions: %s)', req, - ', '.join( - sorted( - {str(c.version) for c in all_candidates}, - key=parse_version, - ) - ) + _format_versions(best_candidate_result.iter_all()), ) raise DistributionNotFound( @@ -743,275 +970,59 @@ def find_requirement(self, req, upgrade, ignore_compatibility=False): 'Installed version (%s) is most up-to-date (past versions: ' '%s)', installed_version, - ', '.join(sorted(compatible_versions, key=parse_version)) or - "none", + _format_versions(best_candidate_result.iter_applicable()), ) raise BestVersionAlreadyInstalled logger.debug( 'Using version %s (newest of versions: %s)', best_candidate.version, - ', '.join(sorted(compatible_versions, key=parse_version)) + _format_versions(best_candidate_result.iter_applicable()), ) - return best_candidate.location - - def _get_pages(self, locations, project_name): - # type: (Iterable[Link], str) -> Iterable[HTMLPage] - """ - Yields (page, page_url) from the given locations, skipping - locations that have errors. - """ - seen = set() # type: Set[Link] - for location in locations: - if location in seen: - continue - seen.add(location) - - page = _get_html_page(location, session=self.session) - if page is None: - continue - - yield page - - _py_version_re = re.compile(r'-py([123]\.?[0-9]?)$') - - def _sort_links(self, links): - # type: (Iterable[Link]) -> List[Link] - """ - Returns elements of links in order, non-egg links first, egg links - second, while eliminating duplicates - """ - eggs, no_eggs = [], [] - seen = set() # type: Set[Link] - for link in links: - if link not in seen: - seen.add(link) - if link.egg_fragment: - eggs.append(link) - else: - no_eggs.append(link) - return no_eggs + eggs + return best_candidate.link - def _package_versions( - self, - links, # type: Iterable[Link] - search # type: Search - ): - # type: (...) -> List[Optional[InstallationCandidate]] - result = [] - for link in self._sort_links(links): - v = self._link_package_versions(link, search) - if v is not None: - result.append(v) - return result - def _log_skipped_link(self, link, reason): - # type: (Link, str) -> None - if link not in self.logged_links: - logger.debug('Skipping link %s; %s', link, reason) - self.logged_links.add(link) - - def _link_package_versions(self, link, search, ignore_compatibility=True): - # type: (Link, Search, bool) -> Optional[InstallationCandidate] - """Return an InstallationCandidate or None""" - version = None - if link.egg_fragment: - egg_info = link.egg_fragment - ext = link.ext - else: - egg_info, ext = link.splitext() - if not ext: - self._log_skipped_link(link, 'not a file') - return None - if ext not in SUPPORTED_EXTENSIONS: - self._log_skipped_link( - link, 'unsupported archive format: %s' % ext, - ) - return None - if "binary" not in search.formats and ext == WHEEL_EXTENSION and not ignore_compatibility: - self._log_skipped_link( - link, 'No binaries permitted for %s' % search.supplied, - ) - return None - if "macosx10" in link.path and ext == '.zip' and not ignore_compatibility: - self._log_skipped_link(link, 'macosx10 one') - return None - if ext == WHEEL_EXTENSION: - try: - wheel = Wheel(link.filename) - except InvalidWheelFilename: - self._log_skipped_link(link, 'invalid wheel filename') - return None - if canonicalize_name(wheel.name) != search.canonical: - self._log_skipped_link( - link, 'wrong project name (not %s)' % search.supplied) - return None - - if not wheel.supported(self.valid_tags) and not ignore_compatibility: - self._log_skipped_link( - link, 'it is not compatible with this Python') - return None - - version = wheel.version - - # This should be up by the search.ok_binary check, but see issue 2700. - if "source" not in search.formats and ext != WHEEL_EXTENSION: - self._log_skipped_link( - link, 'No sources permitted for %s' % search.supplied, - ) - return None - - if not version: - version = _egg_info_matches(egg_info, search.canonical) - if not version: - self._log_skipped_link( - link, 'Missing project version for %s' % search.supplied) - return None - - match = self._py_version_re.search(version) - if match: - version = version[:match.start()] - py_version = match.group(1) - if py_version != sys.version[:3]: - self._log_skipped_link( - link, 'Python version is incorrect') - return None - try: - support_this_python = check_requires_python(link.requires_python) - except specifiers.InvalidSpecifier: - logger.debug("Package %s has an invalid Requires-Python entry: %s", - link.filename, link.requires_python) - support_this_python = True - - if not support_this_python and not ignore_compatibility: - logger.debug("The package %s is incompatible with the python " - "version in use. Acceptable python versions are: %s", - link, link.requires_python) - return None - logger.debug('Found link %s, version: %s', link, version) - - return InstallationCandidate(search.supplied, version, link, link.requires_python) - - -def _find_name_version_sep(egg_info, canonical_name): +def _find_name_version_sep(fragment, canonical_name): # type: (str, str) -> int """Find the separator's index based on the package's canonical name. - `egg_info` must be an egg info string for the given package, and - `canonical_name` must be the package's canonical name. + :param fragment: A + filename "fragment" (stem) or + egg fragment. + :param canonical_name: The package's canonical name. This function is needed since the canonicalized name does not necessarily have the same length as the egg info's name part. An example:: - >>> egg_info = 'foo__bar-1.0' + >>> fragment = 'foo__bar-1.0' >>> canonical_name = 'foo-bar' - >>> _find_name_version_sep(egg_info, canonical_name) + >>> _find_name_version_sep(fragment, canonical_name) 8 """ # Project name and version must be separated by one single dash. Find all # occurrences of dashes; if the string in front of it matches the canonical # name, this is the one separating the name and version parts. - for i, c in enumerate(egg_info): + for i, c in enumerate(fragment): if c != "-": continue - if canonicalize_name(egg_info[:i]) == canonical_name: + if canonicalize_name(fragment[:i]) == canonical_name: return i - raise ValueError("{} does not match {}".format(egg_info, canonical_name)) + raise ValueError("{} does not match {}".format(fragment, canonical_name)) -def _egg_info_matches(egg_info, canonical_name): +def _extract_version_from_fragment(fragment, canonical_name): # type: (str, str) -> Optional[str] - """Pull the version part out of a string. + """Parse the version string from a + filename + "fragment" (stem) or egg fragment. - :param egg_info: The string to parse. E.g. foo-2.1 + :param fragment: The string to parse. E.g. foo-2.1 :param canonical_name: The canonicalized name of the package this belongs to. """ try: - version_start = _find_name_version_sep(egg_info, canonical_name) + 1 + version_start = _find_name_version_sep(fragment, canonical_name) + 1 except ValueError: return None - version = egg_info[version_start:] + version = fragment[version_start:] if not version: return None return version - - -def _determine_base_url(document, page_url): - """Determine the HTML document's base URL. - - This looks for a ```` tag in the HTML document. If present, its href - attribute denotes the base URL of anchor tags in the document. If there is - no such tag (or if it does not have a valid href attribute), the HTML - file's URL is used as the base URL. - - :param document: An HTML document representation. The current - implementation expects the result of ``html5lib.parse()``. - :param page_url: The URL of the HTML document. - """ - for base in document.findall(".//base"): - href = base.get("href") - if href is not None: - return href - return page_url - - -def _get_encoding_from_headers(headers): - """Determine if we have any encoding information in our headers. - """ - if headers and "Content-Type" in headers: - content_type, params = cgi.parse_header(headers["Content-Type"]) - if "charset" in params: - return params['charset'] - return None - - -_CLEAN_LINK_RE = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I) - - -def _clean_link(url): - # type: (str) -> str - """Makes sure a link is fully encoded. That is, if a ' ' shows up in - the link, it will be rewritten to %20 (while not over-quoting - % or other characters).""" - return _CLEAN_LINK_RE.sub(lambda match: '%%%2x' % ord(match.group(0)), url) - - -class HTMLPage(object): - """Represents one page, along with its URL""" - - def __init__(self, content, url, headers=None): - # type: (bytes, str, MutableMapping[str, str]) -> None - self.content = content - self.url = url - self.headers = headers - - def __str__(self): - return redact_password_from_url(self.url) - - def iter_links(self): - # type: () -> Iterable[Link] - """Yields all links in the page""" - document = html5lib.parse( - self.content, - transport_encoding=_get_encoding_from_headers(self.headers), - namespaceHTMLElements=False, - ) - base_url = _determine_base_url(document, self.url) - for anchor in document.findall(".//a"): - if anchor.get("href"): - href = anchor.get("href") - url = _clean_link(urllib_parse.urljoin(base_url, href)) - pyrequire = anchor.get('data-requires-python') - pyrequire = unescape(pyrequire) if pyrequire else None - yield Link(url, self.url, requires_python=pyrequire) - - -Search = namedtuple('Search', 'supplied canonical formats') -"""Capture key aspects of a search. - -:attribute supplied: The user supplied package. -:attribute canonical: The canonical package name. -:attribute formats: The formats allowed for this package. Should be a set - with 'binary' or 'source' or both in it. -""" diff --git a/pipenv/patched/notpip/_internal/resolve.py b/pipenv/patched/notpip/_internal/legacy_resolve.py similarity index 77% rename from pipenv/patched/notpip/_internal/resolve.py rename to pipenv/patched/notpip/_internal/legacy_resolve.py index e42dd3d4ef..9fc1ae1efb 100644 --- a/pipenv/patched/notpip/_internal/resolve.py +++ b/pipenv/patched/notpip/_internal/legacy_resolve.py @@ -10,35 +10,102 @@ a. "first found, wins" (where the order is breadth first) """ +# The following comment should be removed at some point in the future. +# mypy: strict-optional=False +# mypy: disallow-untyped-defs=False + import logging +import sys from collections import defaultdict from itertools import chain +from pipenv.patched.notpip._vendor.packaging import specifiers + from pipenv.patched.notpip._internal.exceptions import ( - BestVersionAlreadyInstalled, DistributionNotFound, HashError, HashErrors, + BestVersionAlreadyInstalled, + DistributionNotFound, + HashError, + HashErrors, UnsupportedPythonVersion, ) -from pipenv.patched.notpip._internal.req.constructors import install_req_from_req_string -from pipenv.patched.notpip._internal.req.req_install import InstallRequirement from pipenv.patched.notpip._internal.utils.logging import indent_log -from pipenv.patched.notpip._internal.utils.misc import dist_in_usersite, ensure_dir -from pipenv.patched.notpip._internal.utils.packaging import check_dist_requires_python +from pipenv.patched.notpip._internal.utils.misc import ( + dist_in_usersite, + ensure_dir, + normalize_version_info, +) +from pipenv.patched.notpip._internal.utils.packaging import ( + check_requires_python, + get_requires_python, +) from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: - from typing import Optional, DefaultDict, List, Set # noqa: F401 - from pipenv.patched.notpip._internal.download import PipSession # noqa: F401 - from pipenv.patched.notpip._internal.req.req_install import InstallRequirement # noqa: F401 - from pipenv.patched.notpip._internal.index import PackageFinder # noqa: F401 - from pipenv.patched.notpip._internal.req.req_set import RequirementSet # noqa: F401 - from pipenv.patched.notpip._internal.operations.prepare import ( # noqa: F401 - DistAbstraction, RequirementPreparer - ) - from pipenv.patched.notpip._internal.cache import WheelCache # noqa: F401 + from typing import Callable, DefaultDict, List, Optional, Set, Tuple + from pipenv.patched.notpip._vendor import pkg_resources + + from pipenv.patched.notpip._internal.distributions import AbstractDistribution + from pipenv.patched.notpip._internal.network.session import PipSession + from pipenv.patched.notpip._internal.index import PackageFinder + from pipenv.patched.notpip._internal.operations.prepare import RequirementPreparer + from pipenv.patched.notpip._internal.req.req_install import InstallRequirement + from pipenv.patched.notpip._internal.req.req_set import RequirementSet + + InstallRequirementProvider = Callable[ + [str, InstallRequirement], InstallRequirement + ] logger = logging.getLogger(__name__) +def _check_dist_requires_python( + dist, # type: pkg_resources.Distribution + version_info, # type: Tuple[int, int, int] + ignore_requires_python=False, # type: bool +): + # type: (...) -> None + """ + Check whether the given Python version is compatible with a distribution's + "Requires-Python" value. + + :param version_info: A 3-tuple of ints representing the Python + major-minor-micro version to check. + :param ignore_requires_python: Whether to ignore the "Requires-Python" + value if the given Python version isn't compatible. + + :raises UnsupportedPythonVersion: When the given Python version isn't + compatible. + """ + requires_python = get_requires_python(dist) + try: + is_compatible = check_requires_python( + requires_python, version_info=version_info, + ) + except specifiers.InvalidSpecifier as exc: + logger.warning( + "Package %r has an invalid Requires-Python: %s", + dist.project_name, exc, + ) + return + + if is_compatible: + return + + version = '.'.join(map(str, version_info)) + if ignore_requires_python: + logger.debug( + 'Ignoring failed Requires-Python check for package %r: ' + '%s not in %r', + dist.project_name, version, requires_python, + ) + return + + raise UnsupportedPythonVersion( + 'Package {!r} requires a different Python: {} not in {!r}'.format( + dist.project_name, version, requires_python, + )) + + class Resolver(object): """Resolves which packages need to be installed/uninstalled to perform \ the requested operation without breaking the requirements of any package. @@ -51,41 +118,42 @@ def __init__( preparer, # type: RequirementPreparer session, # type: PipSession finder, # type: PackageFinder - wheel_cache, # type: Optional[WheelCache] + make_install_req, # type: InstallRequirementProvider use_user_site, # type: bool ignore_dependencies, # type: bool ignore_installed, # type: bool ignore_requires_python, # type: bool force_reinstall, # type: bool - isolated, # type: bool upgrade_strategy, # type: str - use_pep517=None, # type: Optional[bool] + py_version_info=None, # type: Optional[Tuple[int, ...]] ignore_compatibility=False, # type: bool ): # type: (...) -> None super(Resolver, self).__init__() assert upgrade_strategy in self._allowed_strategies + if py_version_info is None: + py_version_info = sys.version_info[:3] + else: + py_version_info = normalize_version_info(py_version_info) + + self._py_version_info = py_version_info + self.preparer = preparer self.finder = finder self.session = session - # NOTE: This would eventually be replaced with a cache that can give - # information about both sdist and wheels transparently. - self.wheel_cache = wheel_cache - # This is set in resolve self.require_hashes = None # type: Optional[bool] self.upgrade_strategy = upgrade_strategy self.force_reinstall = force_reinstall - self.isolated = isolated self.ignore_dependencies = ignore_dependencies self.ignore_installed = ignore_installed self.ignore_requires_python = ignore_requires_python - self.ignore_compatibility = ignore_compatibility self.use_user_site = use_user_site - self.use_pep517 = use_pep517 + self._make_install_req = make_install_req + self.ignore_compatibility = ignore_compatibility self.requires_python = None if self.ignore_compatibility: self.ignore_requires_python = True @@ -121,7 +189,8 @@ def resolve(self, requirement_set): ) # Display where finder is looking for packages - locations = self.finder.get_formatted_locations() + search_scope = self.finder.search_scope + locations = search_scope.get_formatted_locations() if locations: logger.info(locations) @@ -164,7 +233,6 @@ def _set_req_to_reinstall(self, req): req.conflicts_with = req.satisfied_by req.satisfied_by = None - # XXX: Stop passing requirement_set for options def _check_skip_installed(self, req_to_install): # type: (InstallRequirement) -> Optional[str] """Check if req_to_install should be skipped. @@ -219,7 +287,7 @@ def _check_skip_installed(self, req_to_install): return None def _get_abstract_dist_for(self, req): - # type: (InstallRequirement) -> DistAbstraction + # type: (InstallRequirement) -> AbstractDistribution """Takes a InstallRequirement and returns a single AbstractDist \ representing a prepared variant of the same. """ @@ -243,9 +311,11 @@ def _get_abstract_dist_for(self, req): ) upgrade_allowed = self._is_upgrade_allowed(req) + + # We eagerly populate the link, since that's our "legacy" behavior. + req.populate_link(self.finder, upgrade_allowed, self.require_hashes) abstract_dist = self.preparer.prepare_linked_requirement( - req, self.session, self.finder, upgrade_allowed, - self.require_hashes + req, self.session, self.finder, self.require_hashes ) # NOTE @@ -280,7 +350,7 @@ def _resolve_one( self, requirement_set, # type: RequirementSet req_to_install, # type: InstallRequirement - ignore_requires_python=False # type: bool + ignore_requires_python=False, # type: bool ): # type: (...) -> List[InstallRequirement] """Prepare a single requirements file. @@ -301,31 +371,30 @@ def _resolve_one( abstract_dist = self._get_abstract_dist_for(req_to_install) # Parse and return dependencies - dist = abstract_dist.dist() - try: - check_dist_requires_python(dist) - except UnsupportedPythonVersion as err: - if self.ignore_requires_python or ignore_requires_python or self.ignore_compatibility: - logger.warning(err.args[0]) - else: - raise - - # A huge hack, by Kenneth Reitz. + dist = abstract_dist.get_pkg_resources_distribution() + # This will raise UnsupportedPythonVersion if the given Python + # version isn't compatible with the distribution's Requires-Python. + ignore_requires_python = ( + ignore_requires_python or self.ignore_requires_python or + self.ignore_compatibility + ) + _check_dist_requires_python( + dist, version_info=self._py_version_info, + ignore_requires_python=ignore_requires_python, + ) + # Patched in - lets get the python version on here then + # FIXME: Does this patch even work? it puts the python version + # on the resolver... why? try: - self.requires_python = check_dist_requires_python(dist, absorb=False) + self.requires_python = get_requires_python(dist) except TypeError: self.requires_python = None - - more_reqs = [] # type: List[InstallRequirement] def add_req(subreq, extras_requested): - sub_install_req = install_req_from_req_string( + sub_install_req = self._make_install_req( str(subreq), req_to_install, - isolated=self.isolated, - wheel_cache=self.wheel_cache, - use_pep517=self.use_pep517 ) parent_req_name = req_to_install.name to_scan_again, add_to_parent = requirement_set.add_requirement( @@ -343,10 +412,10 @@ def add_req(subreq, extras_requested): # We add req_to_install before its dependencies, so that we # can refer to it when adding dependencies. if not requirement_set.has_requirement(req_to_install.name): + # 'unnamed' requirements will get added here available_requested = sorted( set(dist.extras) & set(req_to_install.extras) ) - # 'unnamed' requirements will get added here req_to_install.is_direct = True requirement_set.add_requirement( req_to_install, parent_req_name=None, @@ -378,12 +447,9 @@ def add_req(subreq, extras_requested): for available in available_requested: if hasattr(dist, '_DistInfoDistribution__dep_map'): for req in dist._DistInfoDistribution__dep_map[available]: - req = InstallRequirement( + req = self._make_install_req( req, - req_to_install, - isolated=self.isolated, - wheel_cache=self.wheel_cache, - use_pep517=None + req_to_install ) more_reqs.append(req) diff --git a/pipenv/patched/notpip/_internal/locations.py b/pipenv/patched/notpip/_internal/locations.py index e8f5a26865..4bd3c87ab4 100644 --- a/pipenv/patched/notpip/_internal/locations.py +++ b/pipenv/patched/notpip/_internal/locations.py @@ -1,4 +1,9 @@ """Locations where we look for configs, install stuff, etc""" + +# The following comment should be removed at some point in the future. +# mypy: strict-optional=False +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import import os @@ -11,82 +16,44 @@ from distutils.command.install import SCHEME_KEYS # type: ignore from pipenv.patched.notpip._internal.utils import appdirs -from pipenv.patched.notpip._internal.utils.compat import WINDOWS, expanduser +from pipenv.patched.notpip._internal.utils.compat import WINDOWS from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING +from pipenv.patched.notpip._internal.utils.virtualenv import running_under_virtualenv if MYPY_CHECK_RUNNING: - from typing import Any, Union, Dict, List, Optional # noqa: F401 + from typing import Any, Union, Dict, List, Optional # Application Directories USER_CACHE_DIR = appdirs.user_cache_dir("pip") -DELETE_MARKER_MESSAGE = '''\ -This file is placed here by pip to indicate the source was put -here by pip. - -Once this package is successfully installed this source code will be -deleted (unless you remove this file). -''' -PIP_DELETE_MARKER_FILENAME = 'pip-delete-this-directory.txt' - - -def write_delete_marker_file(directory): - # type: (str) -> None - """ - Write the pip delete marker file into this directory. - """ - filepath = os.path.join(directory, PIP_DELETE_MARKER_FILENAME) - with open(filepath, 'w') as marker_fp: - marker_fp.write(DELETE_MARKER_MESSAGE) - - -def running_under_virtualenv(): - # type: () -> bool +def get_major_minor_version(): + # type: () -> str """ - Return True if we're running inside a virtualenv, False otherwise. - + Return the major-minor version of the current Python as a string, e.g. + "3.7" or "3.10". """ - if hasattr(sys, 'real_prefix'): - return True - elif sys.prefix != getattr(sys, "base_prefix", sys.prefix): - return True - - return False + return '{}.{}'.format(*sys.version_info) -def virtualenv_no_global(): - # type: () -> bool - """ - Return True if in a venv and no system site packages. - """ - # this mirrors the logic in virtualenv.py for locating the - # no-global-site-packages.txt file - site_mod_dir = os.path.dirname(os.path.abspath(site.__file__)) - no_global_file = os.path.join(site_mod_dir, 'no-global-site-packages.txt') - if running_under_virtualenv() and os.path.isfile(no_global_file): - return True +def get_src_prefix(): + if running_under_virtualenv(): + src_prefix = os.path.join(sys.prefix, 'src') else: - return False - + # FIXME: keep src in cwd for now (it is not a temporary folder) + try: + src_prefix = os.path.join(os.getcwd(), 'src') + except OSError: + # In case the current working directory has been renamed or deleted + sys.exit( + "The folder you are executing pip from can no longer be found." + ) -if running_under_virtualenv(): - src_prefix = os.path.join(sys.prefix, 'src') -else: - # FIXME: keep src in cwd for now (it is not a temporary folder) - try: - src_prefix = os.path.join(os.getcwd(), 'src') - except OSError: - # In case the current working directory has been renamed or deleted - sys.exit( - "The folder you are executing pip from can no longer be found." - ) + # under macOS + virtualenv sys.prefix is not properly resolved + # it is something like /path/to/python/bin/.. + return os.path.abspath(src_prefix) -# under macOS + virtualenv sys.prefix is not properly resolved -# it is something like /path/to/python/bin/.. -# Note: using realpath due to tmp dirs on OSX being symlinks -src_prefix = os.path.abspath(src_prefix) # FIXME doesn't account for venv linked to global site-packages @@ -103,7 +70,7 @@ def virtualenv_no_global(): user_site = site.getusersitepackages() except AttributeError: user_site = site.USER_SITE -user_dir = expanduser('~') + if WINDOWS: bin_py = os.path.join(sys.prefix, 'Scripts') bin_user = os.path.join(user_site, 'Scripts') @@ -111,38 +78,15 @@ def virtualenv_no_global(): if not os.path.exists(bin_py): bin_py = os.path.join(sys.prefix, 'bin') bin_user = os.path.join(user_site, 'bin') - - config_basename = 'pip.ini' - - legacy_storage_dir = os.path.join(user_dir, 'pip') - legacy_config_file = os.path.join( - legacy_storage_dir, - config_basename, - ) else: bin_py = os.path.join(sys.prefix, 'bin') bin_user = os.path.join(user_site, 'bin') - config_basename = 'pip.conf' - - legacy_storage_dir = os.path.join(user_dir, '.pip') - legacy_config_file = os.path.join( - legacy_storage_dir, - config_basename, - ) # Forcing to use /usr/local/bin for standard macOS framework installs # Also log to ~/Library/Logs/ for use with the Console.app log viewer if sys.platform[:6] == 'darwin' and sys.prefix[:16] == '/System/Library/': bin_py = '/usr/local/bin' -site_config_files = [ - os.path.join(path, config_basename) - for path in appdirs.site_config_dirs('pip') -] - -venv_config_file = os.path.join(sys.prefix, config_basename) -new_config_file = os.path.join(appdirs.user_config_dir("pip"), config_basename) - def distutils_scheme(dist_name, user=False, home=None, root=None, isolated=False, prefix=None): @@ -171,8 +115,9 @@ def distutils_scheme(dist_name, user=False, home=None, root=None, # or user base for installations during finalize_options() # ideally, we'd prefer a scheme class that has no side-effects. assert not (user and prefix), "user={} prefix={}".format(user, prefix) + assert not (home and prefix), "home={} prefix={}".format(home, prefix) i.user = user or i.user - if user: + if user or home: i.prefix = "" i.prefix = prefix or i.prefix i.home = home or i.home @@ -196,7 +141,7 @@ def distutils_scheme(dist_name, user=False, home=None, root=None, sys.prefix, 'include', 'site', - 'python' + sys.version[:3], + 'python{}'.format(get_major_minor_version()), dist_name, ) diff --git a/pipenv/patched/notpip/_internal/main.py b/pipenv/patched/notpip/_internal/main.py new file mode 100644 index 0000000000..ed712c4243 --- /dev/null +++ b/pipenv/patched/notpip/_internal/main.py @@ -0,0 +1,47 @@ +"""Primary application entrypoint. +""" +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + +from __future__ import absolute_import + +import locale +import logging +import os +import sys + +from pipenv.patched.notpip._internal.cli.autocompletion import autocomplete +from pipenv.patched.notpip._internal.cli.main_parser import parse_command +from pipenv.patched.notpip._internal.commands import create_command +from pipenv.patched.notpip._internal.exceptions import PipError +from pipenv.patched.notpip._internal.utils import deprecation + +logger = logging.getLogger(__name__) + + +def main(args=None): + if args is None: + args = sys.argv[1:] + + # Configure our deprecation warnings to be sent through loggers + deprecation.install_warning_logger() + + autocomplete() + + try: + cmd_name, cmd_args = parse_command(args) + except PipError as exc: + sys.stderr.write("ERROR: %s" % exc) + sys.stderr.write(os.linesep) + sys.exit(1) + + # Needed for locale.getpreferredencoding(False) to work + # in pip._internal.utils.encoding.auto_decode + try: + locale.setlocale(locale.LC_ALL, '') + except locale.Error as e: + # setlocale can apparently crash if locale are uninitialized + logger.debug("Ignoring error %s when setting locale", e) + command = create_command(cmd_name, isolated=("--isolated" in cmd_args)) + + return command.main(cmd_args) diff --git a/pipenv/patched/notpip/_internal/models/candidate.py b/pipenv/patched/notpip/_internal/models/candidate.py index adc3550ea2..937d872f3f 100644 --- a/pipenv/patched/notpip/_internal/models/candidate.py +++ b/pipenv/patched/notpip/_internal/models/candidate.py @@ -1,32 +1,40 @@ +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + from pipenv.patched.notpip._vendor.packaging.version import parse as parse_version from pipenv.patched.notpip._internal.utils.models import KeyBasedCompareMixin from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: - from pipenv.patched.notpip._vendor.packaging.version import _BaseVersion # noqa: F401 - from pipenv.patched.notpip._internal.models.link import Link # noqa: F401 - from typing import Any, Union # noqa: F401 + from pipenv.patched.notpip._vendor.packaging.version import _BaseVersion + from pipenv.patched.notpip._internal.models.link import Link + from typing import Any class InstallationCandidate(KeyBasedCompareMixin): """Represents a potential "candidate" for installation. """ - def __init__(self, project, version, location, requires_python=None): + def __init__(self, project, version, link, requires_python=None): # type: (Any, str, Link, Any) -> None self.project = project self.version = parse_version(version) # type: _BaseVersion - self.location = location + self.link = link self.requires_python = requires_python super(InstallationCandidate, self).__init__( - key=(self.project, self.version, self.location), + key=(self.project, self.version, self.link), defining_class=InstallationCandidate ) def __repr__(self): # type: () -> str return "".format( - self.project, self.version, self.location, + self.project, self.version, self.link, + ) + + def __str__(self): + return '{!r} candidate (version {} at {})'.format( + self.project, self.version, self.link, ) diff --git a/pipenv/patched/notpip/_internal/models/format_control.py b/pipenv/patched/notpip/_internal/models/format_control.py index 7172ad9ff9..cbb5795876 100644 --- a/pipenv/patched/notpip/_internal/models/format_control.py +++ b/pipenv/patched/notpip/_internal/models/format_control.py @@ -1,9 +1,14 @@ +# The following comment should be removed at some point in the future. +# mypy: strict-optional=False +# mypy: disallow-untyped-defs=False + from pipenv.patched.notpip._vendor.packaging.utils import canonicalize_name +from pipenv.patched.notpip._internal.exceptions import CommandError from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: - from typing import Optional, Set, FrozenSet # noqa: F401 + from typing import Optional, Set, FrozenSet class FormatControl(object): @@ -36,6 +41,10 @@ def __repr__(self): @staticmethod def handle_mutual_excludes(value, target, other): # type: (str, Optional[Set], Optional[Set]) -> None + if value.startswith('-'): + raise CommandError( + "--no-binary / --only-binary option requires 1 argument." + ) new = value.split(',') while ':all:' in new: other.clear() diff --git a/pipenv/patched/notpip/_internal/models/link.py b/pipenv/patched/notpip/_internal/models/link.py index ded4de43b0..688bd14f6c 100644 --- a/pipenv/patched/notpip/_internal/models/link.py +++ b/pipenv/patched/notpip/_internal/models/link.py @@ -1,49 +1,70 @@ +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + +import os import posixpath import re from pipenv.patched.notpip._vendor.six.moves.urllib import parse as urllib_parse -from pipenv.patched.notpip._internal.download import path_to_url +from pipenv.patched.notpip._internal.utils.filetypes import WHEEL_EXTENSION from pipenv.patched.notpip._internal.utils.misc import ( - WHEEL_EXTENSION, redact_password_from_url, splitext, + redact_auth_from_url, + split_auth_from_netloc, + splitext, ) from pipenv.patched.notpip._internal.utils.models import KeyBasedCompareMixin from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING +from pipenv.patched.notpip._internal.utils.urls import path_to_url, url_to_path if MYPY_CHECK_RUNNING: - from typing import Optional, Tuple, Union, Text # noqa: F401 - from pipenv.patched.notpip._internal.index import HTMLPage # noqa: F401 + from typing import Optional, Text, Tuple, Union + from pipenv.patched.notpip._internal.collector import HTMLPage + from pipenv.patched.notpip._internal.utils.hashes import Hashes class Link(KeyBasedCompareMixin): """Represents a parsed link from a Package Index's simple URL """ - def __init__(self, url, comes_from=None, requires_python=None): - # type: (str, Optional[Union[str, HTMLPage]], Optional[str]) -> None + def __init__( + self, + url, # type: str + comes_from=None, # type: Optional[Union[str, HTMLPage]] + requires_python=None, # type: Optional[str] + yanked_reason=None, # type: Optional[Text] + ): + # type: (...) -> None """ - url: - url of the resource pointed to (href of the link) - comes_from: - instance of HTMLPage where the link was found, or string. - requires_python: - String containing the `Requires-Python` metadata field, specified - in PEP 345. This may be specified by a data-requires-python - attribute in the HTML link tag, as described in PEP 503. + :param url: url of the resource pointed to (href of the link) + :param comes_from: instance of HTMLPage where the link was found, + or string. + :param requires_python: String containing the `Requires-Python` + metadata field, specified in PEP 345. This may be specified by + a data-requires-python attribute in the HTML link tag, as + described in PEP 503. + :param yanked_reason: the reason the file has been yanked, if the + file has been yanked, or None if the file hasn't been yanked. + This is the value of the "data-yanked" attribute, if present, in + a simple repository HTML link. If the file has been yanked but + no reason was provided, this should be the empty string. See + PEP 592 for more information and the specification. """ # url can be a UNC windows share if url.startswith('\\\\'): url = path_to_url(url) - self.url = url + self._parsed_url = urllib_parse.urlsplit(url) + # Store the url as a private attribute to prevent accidentally + # trying to set a new value. + self._url = url + self.comes_from = comes_from self.requires_python = requires_python if requires_python else None + self.yanked_reason = yanked_reason - super(Link, self).__init__( - key=(self.url), - defining_class=Link - ) + super(Link, self).__init__(key=url, defining_class=Link) def __str__(self): if self.requires_python: @@ -51,37 +72,56 @@ def __str__(self): else: rp = '' if self.comes_from: - return '%s (from %s)%s' % (redact_password_from_url(self.url), + return '%s (from %s)%s' % (redact_auth_from_url(self._url), self.comes_from, rp) else: - return redact_password_from_url(str(self.url)) + return redact_auth_from_url(str(self._url)) def __repr__(self): return '' % self + @property + def url(self): + # type: () -> str + return self._url + @property def filename(self): # type: () -> str - _, netloc, path, _, _ = urllib_parse.urlsplit(self.url) - name = posixpath.basename(path.rstrip('/')) or netloc + path = self.path.rstrip('/') + name = posixpath.basename(path) + if not name: + # Make sure we don't leak auth information if the netloc + # includes a username and password. + netloc, user_pass = split_auth_from_netloc(self.netloc) + return netloc + name = urllib_parse.unquote(name) - assert name, ('URL %r produced no filename' % self.url) + assert name, ('URL %r produced no filename' % self._url) return name + @property + def file_path(self): + # type: () -> str + return url_to_path(self.url) + @property def scheme(self): # type: () -> str - return urllib_parse.urlsplit(self.url)[0] + return self._parsed_url.scheme @property def netloc(self): # type: () -> str - return urllib_parse.urlsplit(self.url)[1] + """ + This can contain auth information. + """ + return self._parsed_url.netloc @property def path(self): # type: () -> str - return urllib_parse.unquote(urllib_parse.urlsplit(self.url)[2]) + return urllib_parse.unquote(self._parsed_url.path) def splitext(self): # type: () -> Tuple[str, str] @@ -95,7 +135,7 @@ def ext(self): @property def url_without_fragment(self): # type: () -> str - scheme, netloc, path, query, fragment = urllib_parse.urlsplit(self.url) + scheme, netloc, path, query, fragment = self._parsed_url return urllib_parse.urlunsplit((scheme, netloc, path, query, None)) _egg_fragment_re = re.compile(r'[#&]egg=([^&]*)') @@ -103,7 +143,7 @@ def url_without_fragment(self): @property def egg_fragment(self): # type: () -> Optional[str] - match = self._egg_fragment_re.search(self.url) + match = self._egg_fragment_re.search(self._url) if not match: return None return match.group(1) @@ -113,7 +153,7 @@ def egg_fragment(self): @property def subdirectory_fragment(self): # type: () -> Optional[str] - match = self._subdirectory_fragment_re.search(self.url) + match = self._subdirectory_fragment_re.search(self._url) if not match: return None return match.group(1) @@ -125,7 +165,7 @@ def subdirectory_fragment(self): @property def hash(self): # type: () -> Optional[str] - match = self._hash_re.search(self.url) + match = self._hash_re.search(self._url) if match: return match.group(2) return None @@ -133,7 +173,7 @@ def hash(self): @property def hash_name(self): # type: () -> Optional[str] - match = self._hash_re.search(self.url) + match = self._hash_re.search(self._url) if match: return match.group(1) return None @@ -141,7 +181,16 @@ def hash_name(self): @property def show_url(self): # type: () -> Optional[str] - return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0]) + return posixpath.basename(self._url.split('#', 1)[0].split('?', 1)[0]) + + @property + def is_file(self): + # type: () -> bool + return self.scheme == 'file' + + def is_existing_dir(self): + # type: () -> bool + return self.is_file and os.path.isdir(self.file_path) @property def is_wheel(self): @@ -149,15 +198,30 @@ def is_wheel(self): return self.ext == WHEEL_EXTENSION @property - def is_artifact(self): + def is_vcs(self): # type: () -> bool - """ - Determines if this points to an actual artifact (e.g. a tarball) or if - it points to an "abstract" thing like a path or a VCS location. - """ from pipenv.patched.notpip._internal.vcs import vcs - if self.scheme in vcs.all_schemes: + return self.scheme in vcs.all_schemes + + @property + def is_yanked(self): + # type: () -> bool + return self.yanked_reason is not None + + @property + def has_hash(self): + return self.hash_name is not None + + def is_hash_allowed(self, hashes): + # type: (Optional[Hashes]) -> bool + """ + Return True if the link has a hash and it is allowed. + """ + if hashes is None or not self.has_hash: return False + # Assert non-None so mypy knows self.hash_name and self.hash are str. + assert self.hash_name is not None + assert self.hash is not None - return True + return hashes.is_hash_allowed(self.hash_name, hex_digest=self.hash) diff --git a/pipenv/patched/notpip/_internal/models/search_scope.py b/pipenv/patched/notpip/_internal/models/search_scope.py new file mode 100644 index 0000000000..9e82ccb36f --- /dev/null +++ b/pipenv/patched/notpip/_internal/models/search_scope.py @@ -0,0 +1,116 @@ +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + +import itertools +import logging +import os +import posixpath + +from pipenv.patched.notpip._vendor.packaging.utils import canonicalize_name +from pipenv.patched.notpip._vendor.six.moves.urllib import parse as urllib_parse + +from pipenv.patched.notpip._internal.models.index import PyPI +from pipenv.patched.notpip._internal.utils.compat import HAS_TLS +from pipenv.patched.notpip._internal.utils.misc import normalize_path, redact_auth_from_url +from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import List + + +logger = logging.getLogger(__name__) + + +class SearchScope(object): + + """ + Encapsulates the locations that pip is configured to search. + """ + + @classmethod + def create( + cls, + find_links, # type: List[str] + index_urls, # type: List[str] + ): + # type: (...) -> SearchScope + """ + Create a SearchScope object after normalizing the `find_links`. + """ + # Build find_links. If an argument starts with ~, it may be + # a local file relative to a home directory. So try normalizing + # it and if it exists, use the normalized version. + # This is deliberately conservative - it might be fine just to + # blindly normalize anything starting with a ~... + built_find_links = [] # type: List[str] + for link in find_links: + if link.startswith('~'): + new_link = normalize_path(link) + if os.path.exists(new_link): + link = new_link + built_find_links.append(link) + + # If we don't have TLS enabled, then WARN if anyplace we're looking + # relies on TLS. + if not HAS_TLS: + for link in itertools.chain(index_urls, built_find_links): + parsed = urllib_parse.urlparse(link) + if parsed.scheme == 'https': + logger.warning( + 'pip is configured with locations that require ' + 'TLS/SSL, however the ssl module in Python is not ' + 'available.' + ) + break + + return cls( + find_links=built_find_links, + index_urls=index_urls, + ) + + def __init__( + self, + find_links, # type: List[str] + index_urls, # type: List[str] + ): + # type: (...) -> None + self.find_links = find_links + self.index_urls = index_urls + + def get_formatted_locations(self): + # type: () -> str + lines = [] + if self.index_urls and self.index_urls != [PyPI.simple_url]: + lines.append( + 'Looking in indexes: {}'.format(', '.join( + redact_auth_from_url(url) for url in self.index_urls)) + ) + if self.find_links: + lines.append( + 'Looking in links: {}'.format(', '.join( + redact_auth_from_url(url) for url in self.find_links)) + ) + return '\n'.join(lines) + + def get_index_urls_locations(self, project_name): + # type: (str) -> List[str] + """Returns the locations found via self.index_urls + + Checks the url_name on the main (first in the list) index and + use this url_name to produce all locations + """ + + def mkurl_pypi_url(url): + loc = posixpath.join( + url, + urllib_parse.quote(canonicalize_name(project_name))) + # For maximum compatibility with easy_install, ensure the path + # ends in a trailing slash. Although this isn't in the spec + # (and PyPI can handle it without the slash) some other index + # implementations might break if they relied on easy_install's + # behavior. + if not loc.endswith('/'): + loc = loc + '/' + return loc + + return [mkurl_pypi_url(url) for url in self.index_urls] diff --git a/pipenv/patched/notpip/_internal/models/selection_prefs.py b/pipenv/patched/notpip/_internal/models/selection_prefs.py new file mode 100644 index 0000000000..256e903aa2 --- /dev/null +++ b/pipenv/patched/notpip/_internal/models/selection_prefs.py @@ -0,0 +1,47 @@ +from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import Optional + from pipenv.patched.notpip._internal.models.format_control import FormatControl + + +class SelectionPreferences(object): + + """ + Encapsulates the candidate selection preferences for downloading + and installing files. + """ + + # Don't include an allow_yanked default value to make sure each call + # site considers whether yanked releases are allowed. This also causes + # that decision to be made explicit in the calling code, which helps + # people when reading the code. + def __init__( + self, + allow_yanked, # type: bool + allow_all_prereleases=False, # type: bool + format_control=None, # type: Optional[FormatControl] + prefer_binary=False, # type: bool + ignore_requires_python=None, # type: Optional[bool] + ): + # type: (...) -> None + """Create a SelectionPreferences object. + + :param allow_yanked: Whether files marked as yanked (in the sense + of PEP 592) are permitted to be candidates for install. + :param format_control: A FormatControl object or None. Used to control + the selection of source packages / binary packages when consulting + the index and links. + :param prefer_binary: Whether to prefer an old, but valid, binary + dist over a new source dist. + :param ignore_requires_python: Whether to ignore incompatible + "Requires-Python" values in links. Defaults to False. + """ + if ignore_requires_python is None: + ignore_requires_python = False + + self.allow_yanked = allow_yanked + self.allow_all_prereleases = allow_all_prereleases + self.format_control = format_control + self.prefer_binary = prefer_binary + self.ignore_requires_python = ignore_requires_python diff --git a/pipenv/patched/notpip/_internal/models/target_python.py b/pipenv/patched/notpip/_internal/models/target_python.py new file mode 100644 index 0000000000..c815b743cc --- /dev/null +++ b/pipenv/patched/notpip/_internal/models/target_python.py @@ -0,0 +1,106 @@ +import sys + +from pipenv.patched.notpip._internal.pep425tags import get_supported, version_info_to_nodot +from pipenv.patched.notpip._internal.utils.misc import normalize_version_info +from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import List, Optional, Tuple + from pipenv.patched.notpip._internal.pep425tags import Pep425Tag + + +class TargetPython(object): + + """ + Encapsulates the properties of a Python interpreter one is targeting + for a package install, download, etc. + """ + + def __init__( + self, + platform=None, # type: Optional[str] + py_version_info=None, # type: Optional[Tuple[int, ...]] + abi=None, # type: Optional[str] + implementation=None, # type: Optional[str] + ): + # type: (...) -> None + """ + :param platform: A string or None. If None, searches for packages + that are supported by the current system. Otherwise, will find + packages that can be built on the platform passed in. These + packages will only be downloaded for distribution: they will + not be built locally. + :param py_version_info: An optional tuple of ints representing the + Python version information to use (e.g. `sys.version_info[:3]`). + This can have length 1, 2, or 3 when provided. + :param abi: A string or None. This is passed to pep425tags.py's + get_supported() function as is. + :param implementation: A string or None. This is passed to + pep425tags.py's get_supported() function as is. + """ + # Store the given py_version_info for when we call get_supported(). + self._given_py_version_info = py_version_info + + if py_version_info is None: + py_version_info = sys.version_info[:3] + else: + py_version_info = normalize_version_info(py_version_info) + + py_version = '.'.join(map(str, py_version_info[:2])) + + self.abi = abi + self.implementation = implementation + self.platform = platform + self.py_version = py_version + self.py_version_info = py_version_info + + # This is used to cache the return value of get_tags(). + self._valid_tags = None # type: Optional[List[Pep425Tag]] + + def format_given(self): + # type: () -> str + """ + Format the given, non-None attributes for display. + """ + display_version = None + if self._given_py_version_info is not None: + display_version = '.'.join( + str(part) for part in self._given_py_version_info + ) + + key_values = [ + ('platform', self.platform), + ('version_info', display_version), + ('abi', self.abi), + ('implementation', self.implementation), + ] + return ' '.join( + '{}={!r}'.format(key, value) for key, value in key_values + if value is not None + ) + + def get_tags(self): + # type: () -> List[Pep425Tag] + """ + Return the supported PEP 425 tags to check wheel candidates against. + + The tags are returned in order of preference (most preferred first). + """ + if self._valid_tags is None: + # Pass versions=None if no py_version_info was given since + # versions=None uses special default logic. + py_version_info = self._given_py_version_info + if py_version_info is None: + versions = None + else: + versions = [version_info_to_nodot(py_version_info)] + + tags = get_supported( + versions=versions, + platform=self.platform, + abi=self.abi, + impl=self.implementation, + ) + self._valid_tags = tags + + return self._valid_tags diff --git a/pipenv/patched/notpip/_internal/network/__init__.py b/pipenv/patched/notpip/_internal/network/__init__.py new file mode 100644 index 0000000000..b51bde91b2 --- /dev/null +++ b/pipenv/patched/notpip/_internal/network/__init__.py @@ -0,0 +1,2 @@ +"""Contains purely network-related utilities. +""" diff --git a/pipenv/patched/notpip/_internal/network/auth.py b/pipenv/patched/notpip/_internal/network/auth.py new file mode 100644 index 0000000000..943c48bc63 --- /dev/null +++ b/pipenv/patched/notpip/_internal/network/auth.py @@ -0,0 +1,298 @@ +"""Network Authentication Helpers + +Contains interface (MultiDomainBasicAuth) and associated glue code for +providing credentials in the context of network requests. +""" + +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + +import logging + +from pipenv.patched.notpip._vendor.requests.auth import AuthBase, HTTPBasicAuth +from pipenv.patched.notpip._vendor.requests.utils import get_netrc_auth +from pipenv.patched.notpip._vendor.six.moves.urllib import parse as urllib_parse + +from pipenv.patched.notpip._internal.utils.misc import ( + ask, + ask_input, + ask_password, + remove_auth_from_url, + split_auth_netloc_from_url, +) +from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from optparse import Values + from typing import Dict, Optional, Tuple + + from pipenv.patched.notpip._internal.vcs.versioncontrol import AuthInfo + + Credentials = Tuple[str, str, str] + +logger = logging.getLogger(__name__) + +try: + import keyring # noqa +except ImportError: + keyring = None +except Exception as exc: + logger.warning( + "Keyring is skipped due to an exception: %s", str(exc), + ) + keyring = None + + +def get_keyring_auth(url, username): + """Return the tuple auth for a given url from keyring.""" + if not url or not keyring: + return None + + try: + try: + get_credential = keyring.get_credential + except AttributeError: + pass + else: + logger.debug("Getting credentials from keyring for %s", url) + cred = get_credential(url, username) + if cred is not None: + return cred.username, cred.password + return None + + if username: + logger.debug("Getting password from keyring for %s", url) + password = keyring.get_password(url, username) + if password: + return username, password + + except Exception as exc: + logger.warning( + "Keyring is skipped due to an exception: %s", str(exc), + ) + + +class MultiDomainBasicAuth(AuthBase): + + def __init__(self, prompting=True, index_urls=None): + # type: (bool, Optional[Values]) -> None + self.prompting = prompting + self.index_urls = index_urls + self.passwords = {} # type: Dict[str, AuthInfo] + # When the user is prompted to enter credentials and keyring is + # available, we will offer to save them. If the user accepts, + # this value is set to the credentials they entered. After the + # request authenticates, the caller should call + # ``save_credentials`` to save these. + self._credentials_to_save = None # type: Optional[Credentials] + + def _get_index_url(self, url): + """Return the original index URL matching the requested URL. + + Cached or dynamically generated credentials may work against + the original index URL rather than just the netloc. + + The provided url should have had its username and password + removed already. If the original index url had credentials then + they will be included in the return value. + + Returns None if no matching index was found, or if --no-index + was specified by the user. + """ + if not url or not self.index_urls: + return None + + for u in self.index_urls: + prefix = remove_auth_from_url(u).rstrip("/") + "/" + if url.startswith(prefix): + return u + + def _get_new_credentials(self, original_url, allow_netrc=True, + allow_keyring=True): + """Find and return credentials for the specified URL.""" + # Split the credentials and netloc from the url. + url, netloc, url_user_password = split_auth_netloc_from_url( + original_url, + ) + + # Start with the credentials embedded in the url + username, password = url_user_password + if username is not None and password is not None: + logger.debug("Found credentials in url for %s", netloc) + return url_user_password + + # Find a matching index url for this request + index_url = self._get_index_url(url) + if index_url: + # Split the credentials from the url. + index_info = split_auth_netloc_from_url(index_url) + if index_info: + index_url, _, index_url_user_password = index_info + logger.debug("Found index url %s", index_url) + + # If an index URL was found, try its embedded credentials + if index_url and index_url_user_password[0] is not None: + username, password = index_url_user_password + if username is not None and password is not None: + logger.debug("Found credentials in index url for %s", netloc) + return index_url_user_password + + # Get creds from netrc if we still don't have them + if allow_netrc: + netrc_auth = get_netrc_auth(original_url) + if netrc_auth: + logger.debug("Found credentials in netrc for %s", netloc) + return netrc_auth + + # If we don't have a password and keyring is available, use it. + if allow_keyring: + # The index url is more specific than the netloc, so try it first + kr_auth = ( + get_keyring_auth(index_url, username) or + get_keyring_auth(netloc, username) + ) + if kr_auth: + logger.debug("Found credentials in keyring for %s", netloc) + return kr_auth + + return username, password + + def _get_url_and_credentials(self, original_url): + """Return the credentials to use for the provided URL. + + If allowed, netrc and keyring may be used to obtain the + correct credentials. + + Returns (url_without_credentials, username, password). Note + that even if the original URL contains credentials, this + function may return a different username and password. + """ + url, netloc, _ = split_auth_netloc_from_url(original_url) + + # Use any stored credentials that we have for this netloc + username, password = self.passwords.get(netloc, (None, None)) + + if username is None and password is None: + # No stored credentials. Acquire new credentials without prompting + # the user. (e.g. from netrc, keyring, or the URL itself) + username, password = self._get_new_credentials(original_url) + + if username is not None or password is not None: + # Convert the username and password if they're None, so that + # this netloc will show up as "cached" in the conditional above. + # Further, HTTPBasicAuth doesn't accept None, so it makes sense to + # cache the value that is going to be used. + username = username or "" + password = password or "" + + # Store any acquired credentials. + self.passwords[netloc] = (username, password) + + assert ( + # Credentials were found + (username is not None and password is not None) or + # Credentials were not found + (username is None and password is None) + ), "Could not load credentials from url: {}".format(original_url) + + return url, username, password + + def __call__(self, req): + # Get credentials for this request + url, username, password = self._get_url_and_credentials(req.url) + + # Set the url of the request to the url without any credentials + req.url = url + + if username is not None and password is not None: + # Send the basic auth with this request + req = HTTPBasicAuth(username, password)(req) + + # Attach a hook to handle 401 responses + req.register_hook("response", self.handle_401) + + return req + + # Factored out to allow for easy patching in tests + def _prompt_for_password(self, netloc): + username = ask_input("User for %s: " % netloc) + if not username: + return None, None + auth = get_keyring_auth(netloc, username) + if auth: + return auth[0], auth[1], False + password = ask_password("Password: ") + return username, password, True + + # Factored out to allow for easy patching in tests + def _should_save_password_to_keyring(self): + if not keyring: + return False + return ask("Save credentials to keyring [y/N]: ", ["y", "n"]) == "y" + + def handle_401(self, resp, **kwargs): + # We only care about 401 responses, anything else we want to just + # pass through the actual response + if resp.status_code != 401: + return resp + + # We are not able to prompt the user so simply return the response + if not self.prompting: + return resp + + parsed = urllib_parse.urlparse(resp.url) + + # Prompt the user for a new username and password + username, password, save = self._prompt_for_password(parsed.netloc) + + # Store the new username and password to use for future requests + self._credentials_to_save = None + if username is not None and password is not None: + self.passwords[parsed.netloc] = (username, password) + + # Prompt to save the password to keyring + if save and self._should_save_password_to_keyring(): + self._credentials_to_save = (parsed.netloc, username, password) + + # Consume content and release the original connection to allow our new + # request to reuse the same one. + resp.content + resp.raw.release_conn() + + # Add our new username and password to the request + req = HTTPBasicAuth(username or "", password or "")(resp.request) + req.register_hook("response", self.warn_on_401) + + # On successful request, save the credentials that were used to + # keyring. (Note that if the user responded "no" above, this member + # is not set and nothing will be saved.) + if self._credentials_to_save: + req.register_hook("response", self.save_credentials) + + # Send our new request + new_resp = resp.connection.send(req, **kwargs) + new_resp.history.append(resp) + + return new_resp + + def warn_on_401(self, resp, **kwargs): + """Response callback to warn about incorrect credentials.""" + if resp.status_code == 401: + logger.warning( + '401 Error, Credentials not correct for %s', resp.request.url, + ) + + def save_credentials(self, resp, **kwargs): + """Response callback to save credentials on success.""" + assert keyring is not None, "should never reach here without keyring" + if not keyring: + return + + creds = self._credentials_to_save + self._credentials_to_save = None + if creds and resp.status_code < 400: + try: + logger.info('Saving credentials to keyring') + keyring.set_password(*creds) + except Exception: + logger.exception('Failed to save credentials') diff --git a/pipenv/patched/notpip/_internal/network/cache.py b/pipenv/patched/notpip/_internal/network/cache.py new file mode 100644 index 0000000000..9954009c85 --- /dev/null +++ b/pipenv/patched/notpip/_internal/network/cache.py @@ -0,0 +1,75 @@ +"""HTTP cache implementation. +""" + +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + +import os +from contextlib import contextmanager + +from pipenv.patched.notpip._vendor.cachecontrol.cache import BaseCache +from pipenv.patched.notpip._vendor.cachecontrol.caches import FileCache + +from pipenv.patched.notpip._internal.utils.filesystem import adjacent_tmp_file, replace +from pipenv.patched.notpip._internal.utils.misc import ensure_dir +from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import Optional + + +@contextmanager +def suppressed_cache_errors(): + """If we can't access the cache then we can just skip caching and process + requests as if caching wasn't enabled. + """ + try: + yield + except (OSError, IOError): + pass + + +class SafeFileCache(BaseCache): + """ + A file based cache which is safe to use even when the target directory may + not be accessible or writable. + """ + + def __init__(self, directory): + # type: (str) -> None + assert directory is not None, "Cache directory must not be None." + super(SafeFileCache, self).__init__() + self.directory = directory + + def _get_cache_path(self, name): + # type: (str) -> str + # From cachecontrol.caches.file_cache.FileCache._fn, brought into our + # class for backwards-compatibility and to avoid using a non-public + # method. + hashed = FileCache.encode(name) + parts = list(hashed[:5]) + [hashed] + return os.path.join(self.directory, *parts) + + def get(self, key): + # type: (str) -> Optional[bytes] + path = self._get_cache_path(key) + with suppressed_cache_errors(): + with open(path, 'rb') as f: + return f.read() + + def set(self, key, value): + # type: (str, bytes) -> None + path = self._get_cache_path(key) + with suppressed_cache_errors(): + ensure_dir(os.path.dirname(path)) + + with adjacent_tmp_file(path) as f: + f.write(value) + + replace(f.name, path) + + def delete(self, key): + # type: (str) -> None + path = self._get_cache_path(key) + with suppressed_cache_errors(): + os.remove(path) diff --git a/pipenv/patched/notpip/_internal/network/session.py b/pipenv/patched/notpip/_internal/network/session.py new file mode 100644 index 0000000000..178c045764 --- /dev/null +++ b/pipenv/patched/notpip/_internal/network/session.py @@ -0,0 +1,426 @@ +"""PipSession and supporting code, containing all pip-specific +network request configuration and behavior. +""" + +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + +import email.utils +import json +import logging +import mimetypes +import os +import platform +import sys +import warnings + +from pipenv.patched.notpip._vendor import requests, six, urllib3 +from pipenv.patched.notpip._vendor.cachecontrol import CacheControlAdapter +from pipenv.patched.notpip._vendor.requests.adapters import BaseAdapter, HTTPAdapter +from pipenv.patched.notpip._vendor.requests.models import Response +from pipenv.patched.notpip._vendor.requests.structures import CaseInsensitiveDict +from pipenv.patched.notpip._vendor.six.moves.urllib import parse as urllib_parse +from pipenv.patched.notpip._vendor.urllib3.exceptions import InsecureRequestWarning + +from pipenv.patched.notpip import __version__ +from pipenv.patched.notpip._internal.network.auth import MultiDomainBasicAuth +from pipenv.patched.notpip._internal.network.cache import SafeFileCache +# Import ssl from compat so the initial import occurs in only one place. +from pipenv.patched.notpip._internal.utils.compat import HAS_TLS, ipaddress, ssl +from pipenv.patched.notpip._internal.utils.filesystem import check_path_owner +from pipenv.patched.notpip._internal.utils.glibc import libc_ver +from pipenv.patched.notpip._internal.utils.misc import ( + build_url_from_netloc, + get_installed_version, + parse_netloc, +) +from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING +from pipenv.patched.notpip._internal.utils.urls import url_to_path + +if MYPY_CHECK_RUNNING: + from typing import ( + Iterator, List, Optional, Tuple, Union, + ) + + from pipenv.patched.notpip._internal.models.link import Link + + SecureOrigin = Tuple[str, str, Optional[Union[int, str]]] + + +logger = logging.getLogger(__name__) + + +# Ignore warning raised when using --trusted-host. +warnings.filterwarnings("ignore", category=InsecureRequestWarning) + + +SECURE_ORIGINS = [ + # protocol, hostname, port + # Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC) + ("https", "*", "*"), + ("*", "localhost", "*"), + ("*", "127.0.0.0/8", "*"), + ("*", "::1/128", "*"), + ("file", "*", None), + # ssh is always secure. + ("ssh", "*", "*"), +] # type: List[SecureOrigin] + + +# These are environment variables present when running under various +# CI systems. For each variable, some CI systems that use the variable +# are indicated. The collection was chosen so that for each of a number +# of popular systems, at least one of the environment variables is used. +# This list is used to provide some indication of and lower bound for +# CI traffic to PyPI. Thus, it is okay if the list is not comprehensive. +# For more background, see: https://github.com/pypa/pip/issues/5499 +CI_ENVIRONMENT_VARIABLES = ( + # Azure Pipelines + 'BUILD_BUILDID', + # Jenkins + 'BUILD_ID', + # AppVeyor, CircleCI, Codeship, Gitlab CI, Shippable, Travis CI + 'CI', + # Explicit environment variable. + 'PIP_IS_CI', +) + + +def looks_like_ci(): + # type: () -> bool + """ + Return whether it looks like pip is running under CI. + """ + # We don't use the method of checking for a tty (e.g. using isatty()) + # because some CI systems mimic a tty (e.g. Travis CI). Thus that + # method doesn't provide definitive information in either direction. + return any(name in os.environ for name in CI_ENVIRONMENT_VARIABLES) + + +def user_agent(): + """ + Return a string representing the user agent. + """ + data = { + "installer": {"name": "pip", "version": __version__}, + "python": platform.python_version(), + "implementation": { + "name": platform.python_implementation(), + }, + } + + if data["implementation"]["name"] == 'CPython': + data["implementation"]["version"] = platform.python_version() + elif data["implementation"]["name"] == 'PyPy': + if sys.pypy_version_info.releaselevel == 'final': + pypy_version_info = sys.pypy_version_info[:3] + else: + pypy_version_info = sys.pypy_version_info + data["implementation"]["version"] = ".".join( + [str(x) for x in pypy_version_info] + ) + elif data["implementation"]["name"] == 'Jython': + # Complete Guess + data["implementation"]["version"] = platform.python_version() + elif data["implementation"]["name"] == 'IronPython': + # Complete Guess + data["implementation"]["version"] = platform.python_version() + + if sys.platform.startswith("linux"): + from pipenv.patched.notpip._vendor import distro + distro_infos = dict(filter( + lambda x: x[1], + zip(["name", "version", "id"], distro.linux_distribution()), + )) + libc = dict(filter( + lambda x: x[1], + zip(["lib", "version"], libc_ver()), + )) + if libc: + distro_infos["libc"] = libc + if distro_infos: + data["distro"] = distro_infos + + if sys.platform.startswith("darwin") and platform.mac_ver()[0]: + data["distro"] = {"name": "macOS", "version": platform.mac_ver()[0]} + + if platform.system(): + data.setdefault("system", {})["name"] = platform.system() + + if platform.release(): + data.setdefault("system", {})["release"] = platform.release() + + if platform.machine(): + data["cpu"] = platform.machine() + + if HAS_TLS: + data["openssl_version"] = ssl.OPENSSL_VERSION + + setuptools_version = get_installed_version("setuptools") + if setuptools_version is not None: + data["setuptools_version"] = setuptools_version + + # Use None rather than False so as not to give the impression that + # pip knows it is not being run under CI. Rather, it is a null or + # inconclusive result. Also, we include some value rather than no + # value to make it easier to know that the check has been run. + data["ci"] = True if looks_like_ci() else None + + user_data = os.environ.get("PIP_USER_AGENT_USER_DATA") + if user_data is not None: + data["user_data"] = user_data + + return "{data[installer][name]}/{data[installer][version]} {json}".format( + data=data, + json=json.dumps(data, separators=(",", ":"), sort_keys=True), + ) + + +class LocalFSAdapter(BaseAdapter): + + def send(self, request, stream=None, timeout=None, verify=None, cert=None, + proxies=None): + pathname = url_to_path(request.url) + + resp = Response() + resp.status_code = 200 + resp.url = request.url + + try: + stats = os.stat(pathname) + except OSError as exc: + resp.status_code = 404 + resp.raw = exc + else: + modified = email.utils.formatdate(stats.st_mtime, usegmt=True) + content_type = mimetypes.guess_type(pathname)[0] or "text/plain" + resp.headers = CaseInsensitiveDict({ + "Content-Type": content_type, + "Content-Length": stats.st_size, + "Last-Modified": modified, + }) + + resp.raw = open(pathname, "rb") + resp.close = resp.raw.close + + return resp + + def close(self): + pass + + +class InsecureHTTPAdapter(HTTPAdapter): + + def cert_verify(self, conn, url, verify, cert): + conn.cert_reqs = 'CERT_NONE' + conn.ca_certs = None + + +class PipSession(requests.Session): + + timeout = None # type: Optional[int] + + def __init__(self, *args, **kwargs): + """ + :param trusted_hosts: Domains not to emit warnings for when not using + HTTPS. + """ + retries = kwargs.pop("retries", 0) + cache = kwargs.pop("cache", None) + trusted_hosts = kwargs.pop("trusted_hosts", []) # type: List[str] + index_urls = kwargs.pop("index_urls", None) + + super(PipSession, self).__init__(*args, **kwargs) + + # Namespace the attribute with "pip_" just in case to prevent + # possible conflicts with the base class. + self.pip_trusted_origins = [] # type: List[Tuple[str, Optional[int]]] + + # Attach our User Agent to the request + self.headers["User-Agent"] = user_agent() + + # Attach our Authentication handler to the session + self.auth = MultiDomainBasicAuth(index_urls=index_urls) + + # Create our urllib3.Retry instance which will allow us to customize + # how we handle retries. + retries = urllib3.Retry( + # Set the total number of retries that a particular request can + # have. + total=retries, + + # A 503 error from PyPI typically means that the Fastly -> Origin + # connection got interrupted in some way. A 503 error in general + # is typically considered a transient error so we'll go ahead and + # retry it. + # A 500 may indicate transient error in Amazon S3 + # A 520 or 527 - may indicate transient error in CloudFlare + status_forcelist=[500, 503, 520, 527], + + # Add a small amount of back off between failed requests in + # order to prevent hammering the service. + backoff_factor=0.25, + ) + + # Check to ensure that the directory containing our cache directory + # is owned by the user current executing pip. If it does not exist + # we will check the parent directory until we find one that does exist. + if cache and not check_path_owner(cache): + logger.warning( + "The directory '%s' or its parent directory is not owned by " + "the current user and the cache has been disabled. Please " + "check the permissions and owner of that directory. If " + "executing pip with sudo, you may want sudo's -H flag.", + cache, + ) + cache = None + + # We want to _only_ cache responses on securely fetched origins. We do + # this because we can't validate the response of an insecurely fetched + # origin, and we don't want someone to be able to poison the cache and + # require manual eviction from the cache to fix it. + if cache: + secure_adapter = CacheControlAdapter( + cache=SafeFileCache(cache), + max_retries=retries, + ) + else: + secure_adapter = HTTPAdapter(max_retries=retries) + + # Our Insecure HTTPAdapter disables HTTPS validation. It does not + # support caching (see above) so we'll use it for all http:// URLs as + # well as any https:// host that we've marked as ignoring TLS errors + # for. + insecure_adapter = InsecureHTTPAdapter(max_retries=retries) + # Save this for later use in add_insecure_host(). + self._insecure_adapter = insecure_adapter + + self.mount("https://", secure_adapter) + self.mount("http://", insecure_adapter) + + # Enable file:// urls + self.mount("file://", LocalFSAdapter()) + + for host in trusted_hosts: + self.add_trusted_host(host, suppress_logging=True) + + def add_trusted_host(self, host, source=None, suppress_logging=False): + # type: (str, Optional[str], bool) -> None + """ + :param host: It is okay to provide a host that has previously been + added. + :param source: An optional source string, for logging where the host + string came from. + """ + if not suppress_logging: + msg = 'adding trusted host: {!r}'.format(host) + if source is not None: + msg += ' (from {})'.format(source) + logger.info(msg) + + host_port = parse_netloc(host) + if host_port not in self.pip_trusted_origins: + self.pip_trusted_origins.append(host_port) + + self.mount(build_url_from_netloc(host) + '/', self._insecure_adapter) + if not host_port[1]: + # Mount wildcard ports for the same host. + self.mount( + build_url_from_netloc(host) + ':', + self._insecure_adapter + ) + + def iter_secure_origins(self): + # type: () -> Iterator[SecureOrigin] + for secure_origin in SECURE_ORIGINS: + yield secure_origin + for host, port in self.pip_trusted_origins: + yield ('*', host, '*' if port is None else port) + + def is_secure_origin(self, location): + # type: (Link) -> bool + # Determine if this url used a secure transport mechanism + parsed = urllib_parse.urlparse(str(location)) + origin_protocol, origin_host, origin_port = ( + parsed.scheme, parsed.hostname, parsed.port, + ) + + # The protocol to use to see if the protocol matches. + # Don't count the repository type as part of the protocol: in + # cases such as "git+ssh", only use "ssh". (I.e., Only verify against + # the last scheme.) + origin_protocol = origin_protocol.rsplit('+', 1)[-1] + + # Determine if our origin is a secure origin by looking through our + # hardcoded list of secure origins, as well as any additional ones + # configured on this PackageFinder instance. + for secure_origin in self.iter_secure_origins(): + secure_protocol, secure_host, secure_port = secure_origin + if origin_protocol != secure_protocol and secure_protocol != "*": + continue + + try: + # We need to do this decode dance to ensure that we have a + # unicode object, even on Python 2.x. + addr = ipaddress.ip_address( + origin_host + if ( + isinstance(origin_host, six.text_type) or + origin_host is None + ) + else origin_host.decode("utf8") + ) + network = ipaddress.ip_network( + secure_host + if isinstance(secure_host, six.text_type) + # setting secure_host to proper Union[bytes, str] + # creates problems in other places + else secure_host.decode("utf8") # type: ignore + ) + except ValueError: + # We don't have both a valid address or a valid network, so + # we'll check this origin against hostnames. + if ( + origin_host and + origin_host.lower() != secure_host.lower() and + secure_host != "*" + ): + continue + else: + # We have a valid address and network, so see if the address + # is contained within the network. + if addr not in network: + continue + + # Check to see if the port matches. + if ( + origin_port != secure_port and + secure_port != "*" and + secure_port is not None + ): + continue + + # If we've gotten here, then this origin matches the current + # secure origin and we should return True + return True + + # If we've gotten to this point, then the origin isn't secure and we + # will not accept it as a valid location to search. We will however + # log a warning that we are ignoring it. + logger.warning( + "The repository located at %s is not a trusted or secure host and " + "is being ignored. If this repository is available via HTTPS we " + "recommend you use HTTPS instead, otherwise you may silence " + "this warning and allow it anyway with '--trusted-host %s'.", + origin_host, + origin_host, + ) + + return False + + def request(self, method, url, *args, **kwargs): + # Allow setting a default timeout on a session + kwargs.setdefault("timeout", self.timeout) + + # Dispatch the actual request + return super(PipSession, self).request(method, url, *args, **kwargs) diff --git a/pipenv/patched/notpip/_internal/network/xmlrpc.py b/pipenv/patched/notpip/_internal/network/xmlrpc.py new file mode 100644 index 0000000000..a519d74499 --- /dev/null +++ b/pipenv/patched/notpip/_internal/network/xmlrpc.py @@ -0,0 +1,44 @@ +"""xmlrpclib.Transport implementation +""" + +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + +import logging + +from pipenv.patched.notpip._vendor import requests +# NOTE: XMLRPC Client is not annotated in typeshed as on 2017-07-17, which is +# why we ignore the type on this import +from pipenv.patched.notpip._vendor.six.moves import xmlrpc_client # type: ignore +from pipenv.patched.notpip._vendor.six.moves.urllib import parse as urllib_parse + +logger = logging.getLogger(__name__) + + +class PipXmlrpcTransport(xmlrpc_client.Transport): + """Provide a `xmlrpclib.Transport` implementation via a `PipSession` + object. + """ + + def __init__(self, index_url, session, use_datetime=False): + xmlrpc_client.Transport.__init__(self, use_datetime) + index_parts = urllib_parse.urlparse(index_url) + self._scheme = index_parts.scheme + self._session = session + + def request(self, host, handler, request_body, verbose=False): + parts = (self._scheme, host, handler, None, None, None) + url = urllib_parse.urlunparse(parts) + try: + headers = {'Content-Type': 'text/xml'} + response = self._session.post(url, data=request_body, + headers=headers, stream=True) + response.raise_for_status() + self.verbose = verbose + return self.parse_response(response.raw) + except requests.HTTPError as exc: + logger.critical( + "HTTP error %s while getting %s", + exc.response.status_code, url, + ) + raise diff --git a/pipenv/patched/notpip/_internal/operations/check.py b/pipenv/patched/notpip/_internal/operations/check.py index a73611d4d7..9f2fb18717 100644 --- a/pipenv/patched/notpip/_internal/operations/check.py +++ b/pipenv/patched/notpip/_internal/operations/check.py @@ -1,21 +1,27 @@ """Validation of dependencies of packages """ +# The following comment should be removed at some point in the future. +# mypy: strict-optional=False +# mypy: disallow-untyped-defs=False + import logging from collections import namedtuple from pipenv.patched.notpip._vendor.packaging.utils import canonicalize_name from pipenv.patched.notpip._vendor.pkg_resources import RequirementParseError -from pipenv.patched.notpip._internal.operations.prepare import make_abstract_dist +from pipenv.patched.notpip._internal.distributions import ( + make_distribution_for_install_requirement, +) from pipenv.patched.notpip._internal.utils.misc import get_installed_distributions from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING logger = logging.getLogger(__name__) if MYPY_CHECK_RUNNING: - from pipenv.patched.notpip._internal.req.req_install import InstallRequirement # noqa: F401 - from typing import ( # noqa: F401 + from pipenv.patched.notpip._internal.req.req_install import InstallRequirement + from typing import ( Any, Callable, Dict, Optional, Set, Tuple, List ) @@ -63,8 +69,8 @@ def check_package_set(package_set, should_ignore=None): def should_ignore(name): return False - missing = dict() - conflicting = dict() + missing = {} + conflicting = {} for package_name in package_set: # Info about dependencies of package_name @@ -130,7 +136,9 @@ def _simulate_installation_of(to_install, package_set): # Modify it as installing requirement_set would (assuming no errors) for inst_req in to_install: - dist = make_abstract_dist(inst_req).dist() + abstract_dist = make_distribution_for_install_requirement(inst_req) + dist = abstract_dist.get_pkg_resources_distribution() + name = canonicalize_name(dist.key) package_set[name] = PackageDetails(dist.version, dist.requires()) diff --git a/pipenv/patched/notpip/_internal/operations/freeze.py b/pipenv/patched/notpip/_internal/operations/freeze.py index 8fd755e880..0fe5399f96 100644 --- a/pipenv/patched/notpip/_internal/operations/freeze.py +++ b/pipenv/patched/notpip/_internal/operations/freeze.py @@ -1,3 +1,7 @@ +# The following comment should be removed at some point in the future. +# mypy: strict-optional=False +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import import collections @@ -11,20 +15,22 @@ from pipenv.patched.notpip._internal.exceptions import BadCommand, InstallationError from pipenv.patched.notpip._internal.req.constructors import ( - install_req_from_editable, install_req_from_line, + install_req_from_editable, + install_req_from_line, ) from pipenv.patched.notpip._internal.req.req_file import COMMENT_RE from pipenv.patched.notpip._internal.utils.misc import ( - dist_is_editable, get_installed_distributions, + dist_is_editable, + get_installed_distributions, ) from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: - from typing import ( # noqa: F401 + from typing import ( Iterator, Optional, List, Container, Set, Dict, Tuple, Iterable, Union ) - from pipenv.patched.notpip._internal.cache import WheelCache # noqa: F401 - from pipenv.patched.notpip._vendor.pkg_resources import ( # noqa: F401 + from pipenv.patched.notpip._internal.cache import WheelCache + from pipenv.patched.notpip._vendor.pkg_resources import ( Distribution, Requirement ) @@ -39,6 +45,7 @@ def freeze( find_links=None, # type: Optional[List[str]] local_only=None, # type: Optional[bool] user_only=None, # type: Optional[bool] + paths=None, # type: Optional[List[str]] skip_regex=None, # type: Optional[str] isolated=False, # type: bool wheel_cache=None, # type: Optional[WheelCache] @@ -57,13 +64,18 @@ def freeze( installations = {} # type: Dict[str, FrozenRequirement] for dist in get_installed_distributions(local_only=local_only, skip=(), - user_only=user_only): + user_only=user_only, + paths=paths): try: req = FrozenRequirement.from_dist(dist) - except RequirementParseError: + except RequirementParseError as exc: + # We include dist rather than dist.project_name because the + # dist string includes more information, like the version and + # location. We also include the exception message to aid + # troubleshooting. logger.warning( - "Could not parse requirement: %s", - dist.project_name + 'Could not generate requirement for distribution %r: %s', + dist, exc ) continue if exclude_editable and req.editable: @@ -173,12 +185,12 @@ def get_requirement_info(dist): location = os.path.normcase(os.path.abspath(dist.location)) from pipenv.patched.notpip._internal.vcs import vcs, RemoteNotFoundError - vc_type = vcs.get_backend_type(location) + vcs_backend = vcs.get_backend_for_dir(location) - if not vc_type: + if vcs_backend is None: req = dist.as_requirement() logger.debug( - 'No VCS found for editable requirement {!r} in: {!r}', req, + 'No VCS found for editable requirement "%s" in: %r', req, location, ) comments = [ @@ -187,12 +199,12 @@ def get_requirement_info(dist): return (location, True, comments) try: - req = vc_type.get_src_requirement(location, dist.project_name) + req = vcs_backend.get_src_requirement(location, dist.project_name) except RemoteNotFoundError: req = dist.as_requirement() comments = [ '# Editable {} install with no remote ({})'.format( - vc_type.__name__, req, + type(vcs_backend).__name__, req, ) ] return (location, True, comments) @@ -202,7 +214,7 @@ def get_requirement_info(dist): 'cannot determine version of editable source in %s ' '(%s command not found in path)', location, - vc_type.name, + vcs_backend.name, ) return (None, True, []) diff --git a/pipenv/patched/notpip/_internal/operations/generate_metadata.py b/pipenv/patched/notpip/_internal/operations/generate_metadata.py new file mode 100644 index 0000000000..dd30f5536a --- /dev/null +++ b/pipenv/patched/notpip/_internal/operations/generate_metadata.py @@ -0,0 +1,136 @@ +"""Metadata generation logic for source distributions. +""" + +import logging +import os + +from pipenv.patched.notpip._internal.exceptions import InstallationError +from pipenv.patched.notpip._internal.utils.misc import ensure_dir +from pipenv.patched.notpip._internal.utils.setuptools_build import make_setuptools_shim_args +from pipenv.patched.notpip._internal.utils.subprocess import call_subprocess +from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING +from pipenv.patched.notpip._internal.vcs import vcs + +if MYPY_CHECK_RUNNING: + from typing import Callable, List + from pipenv.patched.notpip._internal.req.req_install import InstallRequirement + +logger = logging.getLogger(__name__) + + +def get_metadata_generator(install_req): + # type: (InstallRequirement) -> Callable[[InstallRequirement], str] + """Return a callable metadata generator for this InstallRequirement. + + A metadata generator takes an InstallRequirement (install_req) as an input, + generates metadata via the appropriate process for that install_req and + returns the generated metadata directory. + """ + if not install_req.use_pep517: + return _generate_metadata_legacy + + return _generate_metadata + + +def _find_egg_info(source_directory, is_editable): + # type: (str, bool) -> str + """Find an .egg-info in `source_directory`, based on `is_editable`. + """ + + def looks_like_virtual_env(path): + # type: (str) -> bool + return ( + os.path.lexists(os.path.join(path, 'bin', 'python')) or + os.path.exists(os.path.join(path, 'Scripts', 'Python.exe')) + ) + + def locate_editable_egg_info(base): + # type: (str) -> List[str] + candidates = [] # type: List[str] + for root, dirs, files in os.walk(base): + for dir_ in vcs.dirnames: + if dir_ in dirs: + dirs.remove(dir_) + # Iterate over a copy of ``dirs``, since mutating + # a list while iterating over it can cause trouble. + # (See https://github.com/pypa/pip/pull/462.) + for dir_ in list(dirs): + if looks_like_virtual_env(os.path.join(root, dir_)): + dirs.remove(dir_) + # Also don't search through tests + elif dir_ == 'test' or dir_ == 'tests': + dirs.remove(dir_) + candidates.extend(os.path.join(root, dir_) for dir_ in dirs) + return [f for f in candidates if f.endswith('.egg-info')] + + def depth_of_directory(dir_): + # type: (str) -> int + return ( + dir_.count(os.path.sep) + + (os.path.altsep and dir_.count(os.path.altsep) or 0) + ) + + base = source_directory + if is_editable: + filenames = locate_editable_egg_info(base) + else: + base = os.path.join(base, 'pip-egg-info') + filenames = os.listdir(base) + + if not filenames: + raise InstallationError( + "Files/directories not found in %s" % base + ) + + # If we have more than one match, we pick the toplevel one. This + # can easily be the case if there is a dist folder which contains + # an extracted tarball for testing purposes. + if len(filenames) > 1: + filenames.sort(key=depth_of_directory) + + return os.path.join(base, filenames[0]) + + +def _generate_metadata_legacy(install_req): + # type: (InstallRequirement) -> str + req_details_str = install_req.name or "from {}".format(install_req.link) + logger.debug( + 'Running setup.py (path:%s) egg_info for package %s', + install_req.setup_py_path, req_details_str, + ) + + # Compose arguments for subprocess call + base_cmd = make_setuptools_shim_args(install_req.setup_py_path) + if install_req.isolated: + base_cmd += ["--no-user-cfg"] + + # For non-editable installs, don't put the .egg-info files at the root, + # to avoid confusion due to the source code being considered an installed + # egg. + egg_base_option = [] # type: List[str] + if not install_req.editable: + egg_info_dir = os.path.join( + install_req.unpacked_source_directory, 'pip-egg-info', + ) + egg_base_option = ['--egg-base', egg_info_dir] + + # setuptools complains if the target directory does not exist. + ensure_dir(egg_info_dir) + + with install_req.build_env: + call_subprocess( + base_cmd + ["egg_info"] + egg_base_option, + cwd=install_req.unpacked_source_directory, + command_desc='python setup.py egg_info', + ) + + # Return the .egg-info directory. + return _find_egg_info( + install_req.unpacked_source_directory, + install_req.editable, + ) + + +def _generate_metadata(install_req): + # type: (InstallRequirement) -> str + return install_req.prepare_pep517_metadata() diff --git a/pipenv/patched/notpip/_internal/operations/prepare.py b/pipenv/patched/notpip/_internal/operations/prepare.py index 018fca9740..6128f9b965 100644 --- a/pipenv/patched/notpip/_internal/operations/prepare.py +++ b/pipenv/patched/notpip/_internal/operations/prepare.py @@ -1,173 +1,53 @@ """Prepares a distribution for installation """ +# The following comment should be removed at some point in the future. +# mypy: strict-optional=False +# mypy: disallow-untyped-defs=False + import logging import os -from pipenv.patched.notpip._vendor import pkg_resources, requests +from pipenv.patched.notpip._vendor import requests -from pipenv.patched.notpip._internal.build_env import BuildEnvironment -from pipenv.patched.notpip._internal.download import ( - is_dir_url, is_file_url, is_vcs_url, unpack_url, url_to_path, +from pipenv.patched.notpip._internal.distributions import ( + make_distribution_for_install_requirement, ) +from pipenv.patched.notpip._internal.distributions.installed import InstalledDistribution +from pipenv.patched.notpip._internal.download import unpack_url from pipenv.patched.notpip._internal.exceptions import ( - DirectoryUrlHashUnsupported, HashUnpinned, InstallationError, - PreviousBuildDirError, VcsHashUnsupported, + DirectoryUrlHashUnsupported, + HashUnpinned, + InstallationError, + PreviousBuildDirError, + VcsHashUnsupported, ) from pipenv.patched.notpip._internal.utils.compat import expanduser from pipenv.patched.notpip._internal.utils.hashes import MissingHashes from pipenv.patched.notpip._internal.utils.logging import indent_log -from pipenv.patched.notpip._internal.utils.misc import display_path, normalize_path, rmtree +from pipenv.patched.notpip._internal.utils.marker_files import write_delete_marker_file +from pipenv.patched.notpip._internal.utils.misc import display_path, normalize_path from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING -from pipenv.patched.notpip._internal.vcs import vcs if MYPY_CHECK_RUNNING: - from typing import Any, Optional # noqa: F401 - from pipenv.patched.notpip._internal.req.req_install import InstallRequirement # noqa: F401 - from pipenv.patched.notpip._internal.index import PackageFinder # noqa: F401 - from pipenv.patched.notpip._internal.download import PipSession # noqa: F401 - from pipenv.patched.notpip._internal.req.req_tracker import RequirementTracker # noqa: F401 - -logger = logging.getLogger(__name__) + from typing import Optional + from pipenv.patched.notpip._internal.distributions import AbstractDistribution + from pipenv.patched.notpip._internal.index import PackageFinder + from pipenv.patched.notpip._internal.network.session import PipSession + from pipenv.patched.notpip._internal.req.req_install import InstallRequirement + from pipenv.patched.notpip._internal.req.req_tracker import RequirementTracker -def make_abstract_dist(req): - # type: (InstallRequirement) -> DistAbstraction - """Factory to make an abstract dist object. +logger = logging.getLogger(__name__) - Preconditions: Either an editable req with a source_dir, or satisfied_by or - a wheel link, or a non-editable req with a source_dir. - :return: A concrete DistAbstraction. +def _get_prepared_distribution(req, req_tracker, finder, build_isolation): + """Prepare a distribution for installation. """ - if req.editable: - return IsSDist(req) - elif req.link and req.link.is_wheel: - return IsWheel(req) - else: - return IsSDist(req) - - -class DistAbstraction(object): - """Abstracts out the wheel vs non-wheel Resolver.resolve() logic. - - The requirements for anything installable are as follows: - - we must be able to determine the requirement name - (or we can't correctly handle the non-upgrade case). - - we must be able to generate a list of run-time dependencies - without installing any additional packages (or we would - have to either burn time by doing temporary isolated installs - or alternatively violate pips 'don't start installing unless - all requirements are available' rule - neither of which are - desirable). - - for packages with setup requirements, we must also be able - to determine their requirements without installing additional - packages (for the same reason as run-time dependencies) - - we must be able to create a Distribution object exposing the - above metadata. - """ - - def __init__(self, req): - # type: (InstallRequirement) -> None - self.req = req # type: InstallRequirement - - def dist(self): - # type: () -> Any - """Return a setuptools Dist object.""" - raise NotImplementedError - - def prep_for_dist(self, finder, build_isolation): - # type: (PackageFinder, bool) -> Any - """Ensure that we can get a Dist for this requirement.""" - raise NotImplementedError - - -class IsWheel(DistAbstraction): - - def dist(self): - # type: () -> pkg_resources.Distribution - return list(pkg_resources.find_distributions( - self.req.source_dir))[0] - - def prep_for_dist(self, finder, build_isolation): - # type: (PackageFinder, bool) -> Any - # FIXME:https://github.com/pypa/pip/issues/1112 - pass - - -class IsSDist(DistAbstraction): - - def dist(self): - return self.req.get_dist() - - def prep_for_dist(self, finder, build_isolation): - # type: (PackageFinder, bool) -> None - # Prepare for building. We need to: - # 1. Load pyproject.toml (if it exists) - # 2. Set up the build environment - - self.req.load_pyproject_toml() - should_isolate = self.req.use_pep517 and build_isolation - - def _raise_conflicts(conflicting_with, conflicting_reqs): - raise InstallationError( - "Some build dependencies for %s conflict with %s: %s." % ( - self.req, conflicting_with, ', '.join( - '%s is incompatible with %s' % (installed, wanted) - for installed, wanted in sorted(conflicting)))) - - if should_isolate: - # Isolate in a BuildEnvironment and install the build-time - # requirements. - self.req.build_env = BuildEnvironment() - self.req.build_env.install_requirements( - finder, self.req.pyproject_requires, 'overlay', - "Installing build dependencies" - ) - conflicting, missing = self.req.build_env.check_requirements( - self.req.requirements_to_check - ) - if conflicting: - _raise_conflicts("PEP 517/518 supported requirements", - conflicting) - if missing: - logger.warning( - "Missing build requirements in pyproject.toml for %s.", - self.req, - ) - logger.warning( - "The project does not specify a build backend, and " - "pip cannot fall back to setuptools without %s.", - " and ".join(map(repr, sorted(missing))) - ) - # Install any extra build dependencies that the backend requests. - # This must be done in a second pass, as the pyproject.toml - # dependencies must be installed before we can call the backend. - with self.req.build_env: - # We need to have the env active when calling the hook. - self.req.spin_message = "Getting requirements to build wheel" - reqs = self.req.pep517_backend.get_requires_for_build_wheel() - conflicting, missing = self.req.build_env.check_requirements(reqs) - if conflicting: - _raise_conflicts("the backend dependencies", conflicting) - self.req.build_env.install_requirements( - finder, missing, 'normal', - "Installing backend dependencies" - ) - - self.req.prepare_metadata() - self.req.assert_source_matches_version() - - -class Installed(DistAbstraction): - - def dist(self): - # type: () -> pkg_resources.Distribution - return self.req.satisfied_by - - def prep_for_dist(self, finder, build_isolation): - # type: (PackageFinder, bool) -> Any - pass + abstract_dist = make_distribution_for_install_requirement(req) + with req_tracker.track(req): + abstract_dist.prepare_distribution_metadata(finder, build_isolation) + return abstract_dist class RequirementPreparer(object): @@ -191,8 +71,10 @@ def __init__( self.build_dir = build_dir self.req_tracker = req_tracker - # Where still packed archives should be written to. If None, they are + # Where still-packed archives should be written to. If None, they are # not saved, and are deleted immediately after unpacking. + if download_dir: + download_dir = expanduser(download_dir) self.download_dir = download_dir # Where still-packed .whl files should be written to. If None, they are @@ -215,35 +97,36 @@ def __init__( @property def _download_should_save(self): # type: () -> bool - # TODO: Modify to reduce indentation needed - if self.download_dir: - self.download_dir = expanduser(self.download_dir) - if os.path.exists(self.download_dir): - return True - else: - logger.critical('Could not find download directory') - raise InstallationError( - "Could not find or access download directory '%s'" - % display_path(self.download_dir)) - return False + if not self.download_dir: + return False + + if os.path.exists(self.download_dir): + return True + + logger.critical('Could not find download directory') + raise InstallationError( + "Could not find or access download directory '%s'" + % display_path(self.download_dir)) def prepare_linked_requirement( self, req, # type: InstallRequirement session, # type: PipSession finder, # type: PackageFinder - upgrade_allowed, # type: bool - require_hashes # type: bool + require_hashes, # type: bool ): - # type: (...) -> DistAbstraction + # type: (...) -> AbstractDistribution """Prepare a requirement that would be obtained from req.link """ + assert req.link + link = req.link + # TODO: Breakup into smaller functions - if req.link and req.link.scheme == 'file': - path = url_to_path(req.link.url) + if link.scheme == 'file': + path = link.file_path logger.info('Processing %s', display_path(path)) else: - logger.info('Collecting %s', req) + logger.info('Collecting %s', req.req or req) with indent_log(): # @@ if filesystem packages are not marked @@ -256,20 +139,8 @@ def prepare_linked_requirement( # installation. # FIXME: this won't upgrade when there's an existing # package unpacked in `req.source_dir` - # package unpacked in `req.source_dir` if os.path.exists(os.path.join(req.source_dir, 'setup.py')): rmtree(req.source_dir) - req.populate_link(finder, upgrade_allowed, require_hashes) - - # We can't hit this spot and have populate_link return None. - # req.satisfied_by is None here (because we're - # guarded) and upgrade has no impact except when satisfied_by - # is not None. - # Then inside find_requirement existing_applicable -> False - # If no new versions are found, DistributionNotFound is raised, - # otherwise a result is guaranteed. - assert req.link - link = req.link # Now that we have the real link, we can tell what kind of # requirements we have and raise some more informative errors @@ -281,9 +152,9 @@ def prepare_linked_requirement( # we would report less-useful error messages for # unhashable requirements, complaining that there's no # hash provided. - if is_vcs_url(link): + if link.is_vcs: raise VcsHashUnsupported() - elif is_file_url(link) and is_dir_url(link): + elif link.is_existing_dir(): raise DirectoryUrlHashUnsupported() if not req.original_link and not req.is_pinned: # Unpinned packages are asking for trouble when a new @@ -303,26 +174,15 @@ def prepare_linked_requirement( # showing the user what the hash should be. hashes = MissingHashes() + download_dir = self.download_dir + if link.is_wheel and self.wheel_download_dir: + # when doing 'pip wheel` we download wheels to a + # dedicated dir. + download_dir = self.wheel_download_dir + try: - download_dir = self.download_dir - # We always delete unpacked sdists after pip ran. - autodelete_unpacked = True - if req.link.is_wheel and self.wheel_download_dir: - # when doing 'pip wheel` we download wheels to a - # dedicated dir. - download_dir = self.wheel_download_dir - if req.link.is_wheel: - if download_dir: - # When downloading, we only unpack wheels to get - # metadata. - autodelete_unpacked = True - else: - # When installing a wheel, we use the unpacked - # wheel. - autodelete_unpacked = False unpack_url( - req.link, req.source_dir, - download_dir, autodelete_unpacked, + link, req.source_dir, download_dir, session=session, hashes=hashes, progress_bar=self.progress_bar ) @@ -335,14 +195,31 @@ def prepare_linked_requirement( raise InstallationError( 'Could not install requirement %s because of HTTP ' 'error %s for URL %s' % - (req, exc, req.link) + (req, exc, link) ) - abstract_dist = make_abstract_dist(req) - with self.req_tracker.track(req): - abstract_dist.prep_for_dist(finder, self.build_isolation) + + if link.is_wheel: + if download_dir: + # When downloading, we only unpack wheels to get + # metadata. + autodelete_unpacked = True + else: + # When installing a wheel, we use the unpacked + # wheel. + autodelete_unpacked = False + else: + # We always delete unpacked sdists after pip runs. + autodelete_unpacked = True + if autodelete_unpacked: + write_delete_marker_file(req.source_dir) + + abstract_dist = _get_prepared_distribution( + req, self.req_tracker, finder, self.build_isolation, + ) + if self._download_should_save: # Make a .zip of the source_dir we already created. - if req.link.scheme in vcs.all_schemes: + if link.is_vcs: req.archive(self.download_dir) return abstract_dist @@ -353,7 +230,7 @@ def prepare_editable_requirement( use_user_site, # type: bool finder # type: PackageFinder ): - # type: (...) -> DistAbstraction + # type: (...) -> AbstractDistribution """Prepare an editable requirement """ assert req.editable, "cannot prepare a non-editable req as editable" @@ -370,9 +247,9 @@ def prepare_editable_requirement( req.ensure_has_source_dir(self.src_dir) req.update_editable(not self._download_should_save) - abstract_dist = make_abstract_dist(req) - with self.req_tracker.track(req): - abstract_dist.prep_for_dist(finder, self.build_isolation) + abstract_dist = _get_prepared_distribution( + req, self.req_tracker, finder, self.build_isolation, + ) if self._download_should_save: req.archive(self.download_dir) @@ -380,8 +257,13 @@ def prepare_editable_requirement( return abstract_dist - def prepare_installed_requirement(self, req, require_hashes, skip_reason): - # type: (InstallRequirement, bool, Optional[str]) -> DistAbstraction + def prepare_installed_requirement( + self, + req, # type: InstallRequirement + require_hashes, # type: bool + skip_reason # type: str + ): + # type: (...) -> AbstractDistribution """Prepare an already-installed requirement """ assert req.satisfied_by, "req should have been satisfied but isn't" @@ -401,6 +283,6 @@ def prepare_installed_requirement(self, req, require_hashes, skip_reason): 'completely repeatable environment, install into an ' 'empty virtualenv.' ) - abstract_dist = Installed(req) + abstract_dist = InstalledDistribution(req) return abstract_dist diff --git a/pipenv/patched/notpip/_internal/pep425tags.py b/pipenv/patched/notpip/_internal/pep425tags.py index 4b6eb2bcaf..c2a1e346bc 100644 --- a/pipenv/patched/notpip/_internal/pep425tags.py +++ b/pipenv/patched/notpip/_internal/pep425tags.py @@ -10,16 +10,13 @@ import warnings from collections import OrderedDict -try: - import pipenv.patched.notpip._internal.utils.glibc -except ImportError: - import pipenv.patched.notpip.utils.glibc +import pipenv.patched.notpip._internal.utils.glibc from pipenv.patched.notpip._internal.utils.compat import get_extension_suffixes from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: - from typing import ( # noqa: F401 - Tuple, Callable, List, Optional, Union, Dict + from typing import ( + Tuple, Callable, List, Optional, Union, Dict, Set ) Pep425Tag = Tuple[str, str, str] @@ -52,6 +49,12 @@ def get_abbr_impl(): return pyimpl +def version_info_to_nodot(version_info): + # type: (Tuple[int, ...]) -> str + # Only use up to the first two numbers. + return ''.join(map(str, version_info[:2])) + + def get_impl_ver(): # type: () -> str """Return implementation version.""" @@ -102,32 +105,30 @@ def get_abi_tag(): (CPython 2, PyPy).""" soabi = get_config_var('SOABI') impl = get_abbr_impl() + abi = None # type: Optional[str] + if not soabi and impl in {'cp', 'pp'} and hasattr(sys, 'maxunicode'): d = '' m = '' u = '' - if get_flag('Py_DEBUG', - lambda: hasattr(sys, 'gettotalrefcount'), - warn=(impl == 'cp')): + is_cpython = (impl == 'cp') + if get_flag( + 'Py_DEBUG', lambda: hasattr(sys, 'gettotalrefcount'), + warn=is_cpython): d = 'd' - if get_flag('WITH_PYMALLOC', - lambda: impl == 'cp', - warn=(impl == 'cp')): + if sys.version_info < (3, 8) and get_flag( + 'WITH_PYMALLOC', lambda: is_cpython, warn=is_cpython): m = 'm' - if get_flag('Py_UNICODE_SIZE', - lambda: sys.maxunicode == 0x10ffff, - expected=4, - warn=(impl == 'cp' and - sys.version_info < (3, 3))) \ - and sys.version_info < (3, 3): + if sys.version_info < (3, 3) and get_flag( + 'Py_UNICODE_SIZE', lambda: sys.maxunicode == 0x10ffff, + expected=4, warn=is_cpython): u = 'u' abi = '%s%s%s%s%s' % (impl, get_impl_ver(), d, m, u) elif soabi and soabi.startswith('cpython-'): abi = 'cp' + soabi.split('-')[1] elif soabi: abi = soabi.replace('.', '_').replace('-', '_') - else: - abi = None + return abi @@ -163,6 +164,33 @@ def get_platform(): return result +def is_linux_armhf(): + # type: () -> bool + if get_platform() != "linux_armv7l": + return False + # hard-float ABI can be detected from the ELF header of the running + # process + sys_executable = os.environ.get('PIP_PYTHON_PATH', sys.executable) + try: + with open(sys_executable, 'rb') as f: + elf_header_raw = f.read(40) # read 40 first bytes of ELF header + except (IOError, OSError, TypeError): + return False + if elf_header_raw is None or len(elf_header_raw) < 40: + return False + if isinstance(elf_header_raw, str): + elf_header = [ord(c) for c in elf_header_raw] + else: + elf_header = [b for b in elf_header_raw] + result = elf_header[0:4] == [0x7f, 0x45, 0x4c, 0x46] # ELF magic number + result &= elf_header[4:5] == [1] # 32-bit ELF + result &= elf_header[5:6] == [1] # little-endian + result &= elf_header[18:20] == [0x28, 0] # ARM machine + result &= elf_header[39:40] == [5] # ARM EABIv5 + result &= (elf_header[37:38][0] & 4) == 4 # EF_ARM_ABI_FLOAT_HARD + return result + + def is_manylinux1_compatible(): # type: () -> bool # Only Linux, and only x86-64 / i686 @@ -199,6 +227,32 @@ def is_manylinux2010_compatible(): return pipenv.patched.notpip._internal.utils.glibc.have_compatible_glibc(2, 12) +def is_manylinux2014_compatible(): + # type: () -> bool + # Only Linux, and only supported architectures + platform = get_platform() + if platform not in {"linux_x86_64", "linux_i686", "linux_aarch64", + "linux_armv7l", "linux_ppc64", "linux_ppc64le", + "linux_s390x"}: + return False + + # check for hard-float ABI in case we're running linux_armv7l not to + # install hard-float ABI wheel in a soft-float ABI environment + if platform == "linux_armv7l" and not is_linux_armhf(): + return False + + # Check for presence of _manylinux module + try: + import _manylinux + return bool(_manylinux.manylinux2014_compatible) + except (ImportError, AttributeError): + # Fall through to heuristic check below + pass + + # Check glibc version. CentOS 7 uses glibc 2.17. + return pipenv.patched.notpip._internal.utils.glibc.have_compatible_glibc(2, 17) + + def get_darwin_arches(major, minor, machine): # type: (int, int, str) -> List[str] """Return a list of supported arches (including group arches) for @@ -307,7 +361,7 @@ def get_supported( if abi: abis[0:0] = [abi] - abi3s = set() + abi3s = set() # type: Set[str] for suffix in get_extension_suffixes(): if suffix.startswith('.abi'): abi3s.add(suffix.split('.', 2)[1]) @@ -332,6 +386,16 @@ def get_supported( else: # arch pattern didn't match (?!) arches = [arch] + elif arch_prefix == 'manylinux2014': + arches = [arch] + # manylinux1/manylinux2010 wheels run on most manylinux2014 systems + # with the exception of wheels depending on ncurses. PEP 599 states + # manylinux1/manylinux2010 wheels should be considered + # manylinux2014 wheels: + # https://www.python.org/dev/peps/pep-0599/#backwards-compatibility-with-manylinux2010-wheels + if arch_suffix in {'i686', 'x86_64'}: + arches.append('manylinux2010' + arch_sep + arch_suffix) + arches.append('manylinux1' + arch_sep + arch_suffix) elif arch_prefix == 'manylinux2010': # manylinux1 wheels run on most manylinux2010 systems with the # exception of wheels depending on ncurses. PEP 571 states @@ -340,6 +404,8 @@ def get_supported( arches = [arch, 'manylinux1' + arch_sep + arch_suffix] elif platform is None: arches = [] + if is_manylinux2014_compatible(): + arches.append('manylinux2014' + arch_sep + arch_suffix) if is_manylinux2010_compatible(): arches.append('manylinux2010' + arch_sep + arch_suffix) if is_manylinux1_compatible(): diff --git a/pipenv/patched/notpip/_internal/pyproject.py b/pipenv/patched/notpip/_internal/pyproject.py index 8845b2dc9a..bef9c37889 100644 --- a/pipenv/patched/notpip/_internal/pyproject.py +++ b/pipenv/patched/notpip/_internal/pyproject.py @@ -10,7 +10,7 @@ from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: - from typing import Any, Tuple, Optional, List # noqa: F401 + from typing import Any, Tuple, Optional, List def _is_list_of_str(obj): @@ -21,9 +21,9 @@ def _is_list_of_str(obj): ) -def make_pyproject_path(setup_py_dir): +def make_pyproject_path(unpacked_source_directory): # type: (str) -> str - path = os.path.join(setup_py_dir, 'pyproject.toml') + path = os.path.join(unpacked_source_directory, 'pyproject.toml') # Python2 __file__ should not be unicode if six.PY2 and isinstance(path, six.text_type): diff --git a/pipenv/patched/notpip/_internal/req/__init__.py b/pipenv/patched/notpip/_internal/req/__init__.py index 51606fec89..998be6a26a 100644 --- a/pipenv/patched/notpip/_internal/req/__init__.py +++ b/pipenv/patched/notpip/_internal/req/__init__.py @@ -1,15 +1,19 @@ +# The following comment should be removed at some point in the future. +# mypy: strict-optional=False + from __future__ import absolute_import import logging -from .req_install import InstallRequirement -from .req_set import RequirementSet -from .req_file import parse_requirements from pipenv.patched.notpip._internal.utils.logging import indent_log from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING +from .req_file import parse_requirements +from .req_install import InstallRequirement +from .req_set import RequirementSet + if MYPY_CHECK_RUNNING: - from typing import List, Sequence # noqa: F401 + from typing import Any, List, Sequence __all__ = [ "RequirementSet", "InstallRequirement", @@ -23,7 +27,8 @@ def install_given_reqs( to_install, # type: List[InstallRequirement] install_options, # type: List[str] global_options=(), # type: Sequence[str] - *args, **kwargs + *args, # type: Any + **kwargs # type: Any ): # type: (...) -> List[InstallRequirement] """ diff --git a/pipenv/patched/notpip/_internal/req/constructors.py b/pipenv/patched/notpip/_internal/req/constructors.py index a9d8221a43..b1a2abe723 100644 --- a/pipenv/patched/notpip/_internal/req/constructors.py +++ b/pipenv/patched/notpip/_internal/req/constructors.py @@ -8,6 +8,10 @@ InstallRequirement. """ +# The following comment should be removed at some point in the future. +# mypy: strict-optional=False +# mypy: disallow-untyped-defs=False + import logging import os import re @@ -17,24 +21,23 @@ from pipenv.patched.notpip._vendor.packaging.specifiers import Specifier from pipenv.patched.notpip._vendor.pkg_resources import RequirementParseError, parse_requirements -from pipenv.patched.notpip._internal.download import ( - is_archive_file, is_url, path_to_url, url_to_path, -) from pipenv.patched.notpip._internal.exceptions import InstallationError from pipenv.patched.notpip._internal.models.index import PyPI, TestPyPI from pipenv.patched.notpip._internal.models.link import Link from pipenv.patched.notpip._internal.pyproject import make_pyproject_path from pipenv.patched.notpip._internal.req.req_install import InstallRequirement -from pipenv.patched.notpip._internal.utils.misc import is_installable_dir +from pipenv.patched.notpip._internal.utils.filetypes import ARCHIVE_EXTENSIONS +from pipenv.patched.notpip._internal.utils.misc import is_installable_dir, splitext from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING -from pipenv.patched.notpip._internal.vcs import vcs +from pipenv.patched.notpip._internal.utils.urls import path_to_url +from pipenv.patched.notpip._internal.vcs import is_url, vcs from pipenv.patched.notpip._internal.wheel import Wheel if MYPY_CHECK_RUNNING: - from typing import ( # noqa: F401 - Optional, Tuple, Set, Any, Union, Text, Dict, + from typing import ( + Any, Dict, Optional, Set, Tuple, Union, ) - from pipenv.patched.notpip._internal.cache import WheelCache # noqa: F401 + from pipenv.patched.notpip._internal.cache import WheelCache __all__ = [ @@ -46,6 +49,15 @@ operators = Specifier._operators.keys() +def is_archive_file(name): + # type: (str) -> bool + """Return True if `name` is a considered as an archive file.""" + ext = splitext(name)[1].lower() + if ext in ARCHIVE_EXTENSIONS: + return True + return False + + def _strip_extras(path): # type: (str) -> Tuple[str, Optional[str]] m = re.match(r'^(.+)(\[[^\]]+\])$', path) @@ -59,6 +71,13 @@ def _strip_extras(path): return path_no_extras, extras +def convert_extras(extras): + # type: (Optional[str]) -> Set[str] + if not extras: + return set() + return Requirement("placeholder" + extras.lower()).extras + + def parse_editable(editable_req): # type: (str) -> Tuple[Optional[str], str, Optional[Set[str]]] """Parses an editable requirement into: @@ -111,9 +130,9 @@ def parse_editable(editable_req): if '+' not in url: raise InstallationError( - '%s should either be a path to a local project or a VCS url ' - 'beginning with svn+, git+, hg+, or bzr+' % - editable_req + '{} is not a valid editable requirement. ' + 'It should either be a path to a local project or a VCS URL ' + '(beginning with svn+, git+, hg+, or bzr+).'.format(editable_req) ) vc_type = url.split('+', 1)[0].lower() @@ -161,6 +180,37 @@ def deduce_helpful_msg(req): return msg +class RequirementParts(object): + def __init__( + self, + requirement, # type: Optional[Requirement] + link, # type: Optional[Link] + markers, # type: Optional[Marker] + extras, # type: Set[str] + ): + self.requirement = requirement + self.link = link + self.markers = markers + self.extras = extras + + +def parse_req_from_editable(editable_req): + # type: (str) -> RequirementParts + name, url, extras_override = parse_editable(editable_req) + + if name is not None: + try: + req = Requirement(name) + except InvalidRequirement: + raise InstallationError("Invalid requirement: '%s'" % name) + else: + req = None + + link = Link(url) + + return RequirementParts(req, link, None, extras_override) + + # ---- The actual constructors follow ---- @@ -174,45 +224,80 @@ def install_req_from_editable( constraint=False # type: bool ): # type: (...) -> InstallRequirement - name, url, extras_override = parse_editable(editable_req) - if url.startswith('file:'): - source_dir = url_to_path(url) - else: - source_dir = None - if name is not None: - try: - req = Requirement(name) - except InvalidRequirement: - raise InstallationError("Invalid requirement: '%s'" % name) - else: - req = None + parts = parse_req_from_editable(editable_req) + + source_dir = parts.link.file_path if parts.link.scheme == 'file' else None + return InstallRequirement( - req, comes_from, source_dir=source_dir, + parts.requirement, comes_from, source_dir=source_dir, editable=True, - link=Link(url), + link=parts.link, constraint=constraint, use_pep517=use_pep517, isolated=isolated, options=options if options else {}, wheel_cache=wheel_cache, - extras=extras_override or (), + extras=parts.extras, ) -def install_req_from_line( - name, # type: str - comes_from=None, # type: Optional[Union[str, InstallRequirement]] - use_pep517=None, # type: Optional[bool] - isolated=False, # type: bool - options=None, # type: Optional[Dict[str, Any]] - wheel_cache=None, # type: Optional[WheelCache] - constraint=False # type: bool -): - # type: (...) -> InstallRequirement - """Creates an InstallRequirement from a name, which might be a - requirement, directory containing 'setup.py', filename, or URL. +def _looks_like_path(name): + # type: (str) -> bool + """Checks whether the string "looks like" a path on the filesystem. + + This does not check whether the target actually exists, only judge from the + appearance. + + Returns true if any of the following conditions is true: + * a path separator is found (either os.path.sep or os.path.altsep); + * a dot is found (which represents the current directory). + """ + if os.path.sep in name: + return True + if os.path.altsep is not None and os.path.altsep in name: + return True + if name.startswith("."): + return True + return False + + +def _get_url_from_path(path, name): + # type: (str, str) -> str + """ + First, it checks whether a provided path is an installable directory + (e.g. it has a setup.py). If it is, returns the path. + + If false, check if the path is an archive file (such as a .whl). + The function checks if the path is a file. If false, if the path has + an @, it will treat it as a PEP 440 URL requirement and return the path. """ + if _looks_like_path(name) and os.path.isdir(path): + if is_installable_dir(path): + return path_to_url(path) + raise InstallationError( + "Directory %r is not installable. Neither 'setup.py' " + "nor 'pyproject.toml' found." % name + ) + if not is_archive_file(path): + return None + if os.path.isfile(path): + return path_to_url(path) + urlreq_parts = name.split('@', 1) + if len(urlreq_parts) >= 2 and not _looks_like_path(urlreq_parts[0]): + # If the path contains '@' and the part before it does not look + # like a path, try to treat it as a PEP 440 URL req instead. + return None + logger.warning( + 'Requirement %r looks like a filename, but the ' + 'file does not exist', + name + ) + return path_to_url(path) + + +def parse_req_from_line(name, line_source): + # type: (str, Optional[str]) -> RequirementParts if is_url(name): marker_sep = '; ' else: @@ -236,26 +321,9 @@ def install_req_from_line( link = Link(name) else: p, extras_as_string = _strip_extras(path) - looks_like_dir = os.path.isdir(p) and ( - os.path.sep in name or - (os.path.altsep is not None and os.path.altsep in name) or - name.startswith('.') - ) - if looks_like_dir: - if not is_installable_dir(p): - raise InstallationError( - "Directory %r is not installable. Neither 'setup.py' " - "nor 'pyproject.toml' found." % name - ) - link = Link(path_to_url(p)) - elif is_archive_file(p): - if not os.path.isfile(p): - logger.warning( - 'Requirement %r looks like a filename, but the ' - 'file does not exist', - name - ) - link = Link(path_to_url(p)) + url = _get_url_from_path(p, name) + if url is not None: + link = Link(url) # it's a local file, dir, or url if link: @@ -276,10 +344,13 @@ def install_req_from_line( else: req_as_string = name - if extras_as_string: - extras = Requirement("placeholder" + extras_as_string.lower()).extras - else: - extras = () + extras = convert_extras(extras_as_string) + + def with_source(text): + if not line_source: + return text + return '{} (from {})'.format(text, line_source) + if req_as_string is not None: try: req = Requirement(req_as_string) @@ -291,20 +362,45 @@ def install_req_from_line( not any(op in req_as_string for op in operators)): add_msg = "= is not a valid operator. Did you mean == ?" else: - add_msg = "" - raise InstallationError( - "Invalid requirement: '%s'\n%s" % (req_as_string, add_msg) + add_msg = '' + msg = with_source( + 'Invalid requirement: {!r}'.format(req_as_string) ) + if add_msg: + msg += '\nHint: {}'.format(add_msg) + raise InstallationError(msg) else: req = None + return RequirementParts(req, link, markers, extras) + + +def install_req_from_line( + name, # type: str + comes_from=None, # type: Optional[Union[str, InstallRequirement]] + use_pep517=None, # type: Optional[bool] + isolated=False, # type: bool + options=None, # type: Optional[Dict[str, Any]] + wheel_cache=None, # type: Optional[WheelCache] + constraint=False, # type: bool + line_source=None, # type: Optional[str] +): + # type: (...) -> InstallRequirement + """Creates an InstallRequirement from a name, which might be a + requirement, directory containing 'setup.py', filename, or URL. + + :param line_source: An optional string describing where the line is from, + for logging purposes in case of an error. + """ + parts = parse_req_from_line(name, line_source) + return InstallRequirement( - req, comes_from, link=link, markers=markers, + parts.requirement, comes_from, link=parts.link, markers=parts.markers, use_pep517=use_pep517, isolated=isolated, options=options if options else {}, wheel_cache=wheel_cache, constraint=constraint, - extras=extras, + extras=parts.extras, ) @@ -319,13 +415,14 @@ def install_req_from_req_string( try: req = Requirement(req_string) except InvalidRequirement: - raise InstallationError("Invalid requirement: '%s'" % req) + raise InstallationError("Invalid requirement: '%s'" % req_string) domains_not_allowed = [ PyPI.file_storage_domain, TestPyPI.file_storage_domain, ] - if req.url and comes_from.link.netloc in domains_not_allowed: + if (req.url and comes_from and comes_from.link and + comes_from.link.netloc in domains_not_allowed): # Explicitly disallow pypi packages that depend on external urls raise InstallationError( "Packages installed from PyPI cannot depend on packages " diff --git a/pipenv/patched/notpip/_internal/req/req_file.py b/pipenv/patched/notpip/_internal/req/req_file.py index 30391cb5cf..ece5498662 100644 --- a/pipenv/patched/notpip/_internal/req/req_file.py +++ b/pipenv/patched/notpip/_internal/req/req_file.py @@ -2,6 +2,9 @@ Requirements file parsing """ +# The following comment should be removed at some point in the future. +# mypy: strict-optional=False + from __future__ import absolute_import import optparse @@ -16,26 +19,28 @@ from pipenv.patched.notpip._internal.cli import cmdoptions from pipenv.patched.notpip._internal.download import get_file_content from pipenv.patched.notpip._internal.exceptions import RequirementsFileParseError +from pipenv.patched.notpip._internal.models.search_scope import SearchScope from pipenv.patched.notpip._internal.req.constructors import ( - install_req_from_editable, install_req_from_line, + install_req_from_editable, + install_req_from_line, ) from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: - from typing import ( # noqa: F401 - Iterator, Tuple, Optional, List, Callable, Text + from typing import ( + Any, Callable, Iterator, List, NoReturn, Optional, Text, Tuple, ) - from pipenv.patched.notpip._internal.req import InstallRequirement # noqa: F401 - from pipenv.patched.notpip._internal.cache import WheelCache # noqa: F401 - from pipenv.patched.notpip._internal.index import PackageFinder # noqa: F401 - from pipenv.patched.notpip._internal.download import PipSession # noqa: F401 + from pipenv.patched.notpip._internal.req import InstallRequirement + from pipenv.patched.notpip._internal.cache import WheelCache + from pipenv.patched.notpip._internal.index import PackageFinder + from pipenv.patched.notpip._internal.network.session import PipSession ReqFileLines = Iterator[Tuple[int, Text]] __all__ = ['parse_requirements'] SCHEME_RE = re.compile(r'^(http|https|file):', re.I) -COMMENT_RE = re.compile(r'(^|\s)+#.*$') +COMMENT_RE = re.compile(r'(^|\s+)#.*$') # Matches environment variable-style values in '${MY_VARIABLE_1}' with the # variable name consisting of only uppercase letters, digits or the '_' @@ -138,7 +143,7 @@ def process_line( session=None, # type: Optional[PipSession] wheel_cache=None, # type: Optional[WheelCache] use_pep517=None, # type: Optional[bool] - constraint=False # type: bool + constraint=False, # type: bool ): # type: (...) -> Iterator[InstallRequirement] """Process a single requirements line; This can result in creating/yielding @@ -187,10 +192,16 @@ def process_line( for dest in SUPPORTED_OPTIONS_REQ_DEST: if dest in opts.__dict__ and opts.__dict__[dest]: req_options[dest] = opts.__dict__[dest] + line_source = 'line {} of {}'.format(line_number, filename) yield install_req_from_line( - args_str, line_comes_from, constraint=constraint, + args_str, + comes_from=line_comes_from, use_pep517=use_pep517, - isolated=isolated, options=req_options, wheel_cache=wheel_cache + isolated=isolated, + options=req_options, + wheel_cache=wheel_cache, + constraint=constraint, + line_source=line_source, ) # yield an editable requirement @@ -232,12 +243,14 @@ def process_line( # set finder options elif finder: + find_links = finder.find_links + index_urls = finder.index_urls if opts.index_url: - finder.index_urls = [opts.index_url] + index_urls = [opts.index_url] if opts.no_index is True: - finder.index_urls = [] + index_urls = [] if opts.extra_index_urls: - finder.index_urls.extend(opts.extra_index_urls) + index_urls.extend(opts.extra_index_urls) if opts.find_links: # FIXME: it would be nice to keep track of the source # of the find_links: support a find-links local path @@ -247,12 +260,19 @@ def process_line( relative_to_reqs_file = os.path.join(req_dir, value) if os.path.exists(relative_to_reqs_file): value = relative_to_reqs_file - finder.find_links.append(value) + find_links.append(value) + + search_scope = SearchScope( + find_links=find_links, + index_urls=index_urls, + ) + finder.search_scope = search_scope + if opts.pre: - finder.allow_all_prereleases = True - if opts.trusted_hosts: - finder.secure_origins.extend( - ("*", host, "*") for host in opts.trusted_hosts) + finder.set_allow_all_prereleases() + for host in opts.trusted_hosts or []: + source = 'line {} of {}'.format(line_number, filename) + session.add_trusted_host(host, source=source) def break_args_options(line): @@ -288,6 +308,7 @@ def build_parser(line): # By default optparse sys.exits on parsing errors. We want to wrap # that in our own exception. def parser_exit(self, msg): + # type: (Any, str) -> NoReturn # add offending line msg = 'Invalid requirement: %s\n%s' % (line, msg) raise RequirementsFileParseError(msg) @@ -364,7 +385,7 @@ def expand_env_variables(lines_enum): 1. Strings that contain a `$` aren't accidentally (partially) expanded. 2. Ensure consistency across platforms for requirement files. - These points are the result of a discusssion on the `github pull + These points are the result of a discussion on the `github pull request #3514 `_. Valid characters in variable names follow the `POSIX standard diff --git a/pipenv/patched/notpip/_internal/req/req_install.py b/pipenv/patched/notpip/_internal/req/req_install.py index fd3cead652..2da04659cc 100644 --- a/pipenv/patched/notpip/_internal/req/req_install.py +++ b/pipenv/patched/notpip/_internal/req/req_install.py @@ -1,5 +1,10 @@ +# The following comment should be removed at some point in the future. +# mypy: strict-optional=False +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import +import atexit import logging import os import shutil @@ -15,41 +20,54 @@ from pipenv.patched.notpip._vendor.packaging.version import parse as parse_version from pipenv.patched.notpip._vendor.pep517.wrappers import Pep517HookCaller -from pipenv.patched.notpip._internal import wheel +from pipenv.patched.notpip._internal import pep425tags, wheel from pipenv.patched.notpip._internal.build_env import NoOpBuildEnvironment from pipenv.patched.notpip._internal.exceptions import InstallationError -from pipenv.patched.notpip._internal.locations import ( - PIP_DELETE_MARKER_FILENAME, running_under_virtualenv, -) from pipenv.patched.notpip._internal.models.link import Link +from pipenv.patched.notpip._internal.operations.generate_metadata import get_metadata_generator from pipenv.patched.notpip._internal.pyproject import load_pyproject_toml, make_pyproject_path from pipenv.patched.notpip._internal.req.req_uninstall import UninstallPathSet from pipenv.patched.notpip._internal.utils.compat import native_str from pipenv.patched.notpip._internal.utils.hashes import Hashes from pipenv.patched.notpip._internal.utils.logging import indent_log +from pipenv.patched.notpip._internal.utils.marker_files import ( + PIP_DELETE_MARKER_FILENAME, + has_delete_marker_file, +) from pipenv.patched.notpip._internal.utils.misc import ( - _make_build_dir, ask_path_exists, backup_dir, call_subprocess, - display_path, dist_in_site_packages, dist_in_usersite, ensure_dir, - get_installed_version, redact_password_from_url, rmtree, + _make_build_dir, + ask_path_exists, + backup_dir, + display_path, + dist_in_site_packages, + dist_in_usersite, + ensure_dir, + get_installed_version, + hide_url, + redact_auth_from_url, + rmtree, ) from pipenv.patched.notpip._internal.utils.packaging import get_metadata -from pipenv.patched.notpip._internal.utils.setuptools_build import SETUPTOOLS_SHIM +from pipenv.patched.notpip._internal.utils.setuptools_build import make_setuptools_shim_args +from pipenv.patched.notpip._internal.utils.subprocess import ( + call_subprocess, + runner_with_spinner_message, +) from pipenv.patched.notpip._internal.utils.temp_dir import TempDirectory from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING -from pipenv.patched.notpip._internal.utils.ui import open_spinner +from pipenv.patched.notpip._internal.utils.virtualenv import running_under_virtualenv from pipenv.patched.notpip._internal.vcs import vcs -from pipenv.patched.notpip._internal.wheel import move_wheel_files if MYPY_CHECK_RUNNING: - from typing import ( # noqa: F401 - Optional, Iterable, List, Union, Any, Text, Sequence, Dict + from typing import ( + Any, Dict, Iterable, List, Optional, Sequence, Union, ) - from pipenv.patched.notpip._internal.build_env import BuildEnvironment # noqa: F401 - from pipenv.patched.notpip._internal.cache import WheelCache # noqa: F401 - from pipenv.patched.notpip._internal.index import PackageFinder # noqa: F401 - from pipenv.patched.notpip._vendor.pkg_resources import Distribution # noqa: F401 - from pipenv.patched.notpip._vendor.packaging.specifiers import SpecifierSet # noqa: F401 - from pipenv.patched.notpip._vendor.packaging.markers import Marker # noqa: F401 + from pipenv.patched.notpip._internal.build_env import BuildEnvironment + from pipenv.patched.notpip._internal.cache import WheelCache + from pipenv.patched.notpip._internal.index import PackageFinder + from pipenv.patched.notpip._vendor.pkg_resources import Distribution + from pipenv.patched.notpip._vendor.packaging.specifiers import SpecifierSet + from pipenv.patched.notpip._vendor.packaging.markers import Marker logger = logging.getLogger(__name__) @@ -58,7 +76,7 @@ class InstallRequirement(object): """ Represents something that may be installed later on, may have information - about where to fetch the relavant requirement and also contains logic for + about where to fetch the relevant requirement and also contains logic for installing the said requirement. """ @@ -69,7 +87,6 @@ def __init__( source_dir=None, # type: Optional[str] editable=False, # type: bool link=None, # type: Optional[Link] - update=True, # type: bool markers=None, # type: Optional[Marker] use_pep517=None, # type: Optional[bool] isolated=False, # type: bool @@ -83,10 +100,10 @@ def __init__( self.req = req self.comes_from = comes_from self.constraint = constraint - if source_dir is not None: - self.source_dir = os.path.normpath(os.path.abspath(source_dir)) + if source_dir is None: + self.source_dir = None # type: Optional[str] else: - self.source_dir = None + self.source_dir = os.path.normpath(os.path.abspath(source_dir)) self.editable = editable self._wheel_cache = wheel_cache @@ -107,7 +124,6 @@ def __init__( markers = req.marker self.markers = markers - self._egg_info_path = None # type: Optional[str] # This holds the pkg_resources.Distribution object if this requirement # is already available: self.satisfied_by = None @@ -115,16 +131,12 @@ def __init__( # conflicts with another installed distribution: self.conflicts_with = None # Temporary build location - self._temp_build_dir = TempDirectory(kind="req-build") + self._temp_build_dir = None # type: Optional[TempDirectory] # Used to store the global directory where the _temp_build_dir should - # have been created. Cf _correct_build_location method. + # have been created. Cf move_to_correct_build_directory method. self._ideal_build_dir = None # type: Optional[str] - # True if the editable should be updated: - self.update = update # Set to True after successful installation self.install_succeeded = None # type: Optional[bool] - # UninstallPathSet of uninstalled distribution (for possible rollback) - self.uninstalled_pathset = None self.options = options if options else {} # Set to True after successful preparation of this requirement self.prepared = False @@ -156,19 +168,20 @@ def __init__( self.use_pep517 = use_pep517 def __str__(self): + # type: () -> str if self.req: s = str(self.req) if self.link: - s += ' from %s' % redact_password_from_url(self.link.url) + s += ' from %s' % redact_auth_from_url(self.link.url) elif self.link: - s = redact_password_from_url(self.link.url) + s = redact_auth_from_url(self.link.url) else: s = '' if self.satisfied_by is not None: s += ' in %s' % display_path(self.satisfied_by.location) if self.comes_from: if isinstance(self.comes_from, six.string_types): - comes_from = self.comes_from + comes_from = self.comes_from # type: Optional[str] else: comes_from = self.comes_from.from_path() if comes_from: @@ -176,9 +189,25 @@ def __str__(self): return s def __repr__(self): + # type: () -> str return '<%s object: %s editable=%r>' % ( self.__class__.__name__, str(self), self.editable) + def format_debug(self): + # type: () -> str + """An un-tested helper for getting state, for debugging. + """ + attributes = vars(self) + names = sorted(attributes) + + state = ( + "{}={!r}".format(attr, attributes[attr]) for attr in sorted(names) + ) + return '<{name} object: {{{state}}}>'.format( + name=self.__class__.__name__, + state=", ".join(state), + ) + def populate_link(self, finder, upgrade, require_hashes): # type: (PackageFinder, bool, bool) -> None """Ensure that if a link can be found for this, that it is found. @@ -196,7 +225,12 @@ def populate_link(self, finder, upgrade, require_hashes): self.link = finder.find_requirement(self, upgrade) if self._wheel_cache is not None and not require_hashes: old_link = self.link - self.link = self._wheel_cache.get(self.link, self.name) + supported_tags = pep425tags.get_supported() + self.link = self._wheel_cache.get( + link=self.link, + package_name=self.name, + supported_tags=supported_tags, + ) if old_link != self.link: logger.debug('Using cached wheel link: %s', self.link) @@ -226,6 +260,7 @@ def is_pinned(self): @property def installed_version(self): + # type: () -> Optional[str] return get_installed_version(self.name) def match_markers(self, extras_requested=None): @@ -290,20 +325,21 @@ def from_path(self): s += '->' + comes_from return s - def build_location(self, build_dir): - # type: (str) -> Optional[str] + def ensure_build_location(self, build_dir): + # type: (str) -> str assert build_dir is not None - if self._temp_build_dir.path is not None: + if self._temp_build_dir is not None: + assert self._temp_build_dir.path return self._temp_build_dir.path if self.req is None: # for requirement via a path to a directory: the name of the # package is not available yet so we create a temp directory - # Once run_egg_info will have run, we'll be able - # to fix it via _correct_build_location + # Once run_egg_info will have run, we'll be able to fix it via + # move_to_correct_build_directory(). # Some systems have /tmp as a symlink which confuses custom # builds (such as numpy). Thus, we ensure that the real path # is returned. - self._temp_build_dir.create() + self._temp_build_dir = TempDirectory(kind="req-build") self._ideal_build_dir = build_dir return self._temp_build_dir.path @@ -318,59 +354,73 @@ def build_location(self, build_dir): _make_build_dir(build_dir) return os.path.join(build_dir, name) - def _correct_build_location(self): + def move_to_correct_build_directory(self): # type: () -> None - """Move self._temp_build_dir to self._ideal_build_dir/self.req.name + """Move self._temp_build_dir to "self._ideal_build_dir/self.req.name" For some requirements (e.g. a path to a directory), the name of the package is not available until we run egg_info, so the build_location will return a temporary directory and store the _ideal_build_dir. - This is only called by self.run_egg_info to fix the temporary build - directory. + This is only called to "fix" the build directory after generating + metadata. """ if self.source_dir is not None: return assert self.req is not None - assert self._temp_build_dir.path - assert (self._ideal_build_dir is not None and - self._ideal_build_dir.path) # type: ignore - old_location = self._temp_build_dir.path - self._temp_build_dir.path = None + assert self._temp_build_dir + assert ( + self._ideal_build_dir is not None and + self._ideal_build_dir.path # type: ignore + ) + old_location = self._temp_build_dir + self._temp_build_dir = None # checked inside ensure_build_location - new_location = self.build_location(self._ideal_build_dir) + # Figure out the correct place to put the files. + new_location = self.ensure_build_location(self._ideal_build_dir) if os.path.exists(new_location): raise InstallationError( 'A package already exists in %s; please remove it to continue' - % display_path(new_location)) + % display_path(new_location) + ) + + # Move the files to the correct location. logger.debug( 'Moving package %s from %s to new location %s', - self, display_path(old_location), display_path(new_location), + self, display_path(old_location.path), display_path(new_location), ) - shutil.move(old_location, new_location) - self._temp_build_dir.path = new_location - self._ideal_build_dir = None + shutil.move(old_location.path, new_location) + + # Update directory-tracking variables, to be in line with new_location self.source_dir = os.path.normpath(os.path.abspath(new_location)) - self._egg_info_path = None + self._temp_build_dir = TempDirectory( + path=new_location, kind="req-install", + ) # Correct the metadata directory, if it exists if self.metadata_directory: old_meta = self.metadata_directory - rel = os.path.relpath(old_meta, start=old_location) + rel = os.path.relpath(old_meta, start=old_location.path) new_meta = os.path.join(new_location, rel) new_meta = os.path.normpath(os.path.abspath(new_meta)) self.metadata_directory = new_meta + # Done with any "move built files" work, since have moved files to the + # "ideal" build location. Setting to None allows to clearly flag that + # no more moves are needed. + self._ideal_build_dir = None + def remove_temporary_source(self): # type: () -> None """Remove the source files from this requirement, if they are marked for deletion""" - if self.source_dir and os.path.exists( - os.path.join(self.source_dir, PIP_DELETE_MARKER_FILENAME)): + if self.source_dir and has_delete_marker_file(self.source_dir): logger.debug('Removing source in %s', self.source_dir) rmtree(self.source_dir) self.source_dir = None - self._temp_build_dir.cleanup() + if self._temp_build_dir: + self._temp_build_dir.cleanup() + self._temp_build_dir = None self.build_env.cleanup() def check_if_exists(self, use_user_site): @@ -434,7 +484,7 @@ def move_wheel_files( pycompile=True # type: bool ): # type: (...) -> None - move_wheel_files( + wheel.move_wheel_files( self.name, self.req, wheeldir, user=use_user_site, home=home, @@ -447,18 +497,17 @@ def move_wheel_files( # Things valid for sdists @property - def setup_py_dir(self): + def unpacked_source_directory(self): # type: () -> str return os.path.join( self.source_dir, self.link and self.link.subdirectory_fragment or '') @property - def setup_py(self): + def setup_py_path(self): # type: () -> str assert self.source_dir, "No source dir for %s" % self - - setup_py = os.path.join(self.setup_py_dir, 'setup.py') + setup_py = os.path.join(self.unpacked_source_directory, 'setup.py') # Python2 __file__ should not be unicode if six.PY2 and isinstance(setup_py, six.text_type): @@ -467,11 +516,10 @@ def setup_py(self): return setup_py @property - def pyproject_toml(self): + def pyproject_toml_path(self): # type: () -> str assert self.source_dir, "No source dir for %s" % self - - return make_pyproject_path(self.setup_py_dir) + return make_pyproject_path(self.unpacked_source_directory) def load_pyproject_toml(self): # type: () -> None @@ -482,37 +530,24 @@ def load_pyproject_toml(self): use_pep517 attribute can be used to determine whether we should follow the PEP 517 or legacy (setup.py) code path. """ - pep517_data = load_pyproject_toml( + pyproject_toml_data = load_pyproject_toml( self.use_pep517, - self.pyproject_toml, - self.setup_py, + self.pyproject_toml_path, + self.setup_py_path, str(self) ) - if pep517_data is None: + if pyproject_toml_data is None: self.use_pep517 = False - else: - self.use_pep517 = True - requires, backend, check = pep517_data - self.requirements_to_check = check - self.pyproject_requires = requires - self.pep517_backend = Pep517HookCaller(self.setup_py_dir, backend) - - # Use a custom function to call subprocesses - self.spin_message = "" - - def runner(cmd, cwd=None, extra_environ=None): - with open_spinner(self.spin_message) as spinner: - call_subprocess( - cmd, - cwd=cwd, - extra_environ=extra_environ, - show_stdout=False, - spinner=spinner - ) - self.spin_message = "" + return - self.pep517_backend._subprocess_runner = runner + self.use_pep517 = True + requires, backend, check = pyproject_toml_data + self.requirements_to_check = check + self.pyproject_requires = requires + self.pep517_backend = Pep517HookCaller( + self.unpacked_source_directory, backend + ) def prepare_metadata(self): # type: () -> None @@ -523,11 +558,9 @@ def prepare_metadata(self): """ assert self.source_dir + metadata_generator = get_metadata_generator(self) with indent_log(): - if self.use_pep517: - self.prepare_pep517_metadata() - else: - self.run_egg_info() + self.metadata_directory = metadata_generator(self) if not self.req: if isinstance(parse_version(self.metadata["Version"]), Version): @@ -541,7 +574,7 @@ def prepare_metadata(self): self.metadata["Version"], ]) ) - self._correct_build_location() + self.move_to_correct_build_directory() else: metadata_name = canonicalize_name(self.metadata["Name"]) if canonicalize_name(self.req.name) != metadata_name: @@ -554,116 +587,31 @@ def prepare_metadata(self): self.req = Requirement(metadata_name) def prepare_pep517_metadata(self): - # type: () -> None + # type: () -> str assert self.pep517_backend is not None - metadata_dir = os.path.join( - self.setup_py_dir, - 'pip-wheel-metadata' - ) - ensure_dir(metadata_dir) + # NOTE: This needs to be refactored to stop using atexit + metadata_tmpdir = TempDirectory(kind="modern-metadata") + atexit.register(metadata_tmpdir.cleanup) + + metadata_dir = metadata_tmpdir.path with self.build_env: # Note that Pep517HookCaller implements a fallback for # prepare_metadata_for_build_wheel, so we don't have to # consider the possibility that this hook doesn't exist. + runner = runner_with_spinner_message("Preparing wheel metadata") backend = self.pep517_backend - self.spin_message = "Preparing wheel metadata" - distinfo_dir = backend.prepare_metadata_for_build_wheel( - metadata_dir - ) - - self.metadata_directory = os.path.join(metadata_dir, distinfo_dir) - - def run_egg_info(self): - # type: () -> None - if self.name: - logger.debug( - 'Running setup.py (path:%s) egg_info for package %s', - self.setup_py, self.name, - ) - else: - logger.debug( - 'Running setup.py (path:%s) egg_info for package from %s', - self.setup_py, self.link, - ) - script = SETUPTOOLS_SHIM % self.setup_py - sys_executable = os.environ.get('PIP_PYTHON_PATH', sys.executable) - base_cmd = [sys_executable, '-c', script] - if self.isolated: - base_cmd += ["--no-user-cfg"] - egg_info_cmd = base_cmd + ['egg_info'] - # We can't put the .egg-info files at the root, because then the - # source code will be mistaken for an installed egg, causing - # problems - if self.editable: - egg_base_option = [] # type: List[str] - else: - egg_info_dir = os.path.join(self.setup_py_dir, 'pip-egg-info') - ensure_dir(egg_info_dir) - egg_base_option = ['--egg-base', 'pip-egg-info'] - with self.build_env: - call_subprocess( - egg_info_cmd + egg_base_option, - cwd=self.setup_py_dir, - show_stdout=False, - command_desc='python setup.py egg_info') - - @property - def egg_info_path(self): - # type: () -> str - if self._egg_info_path is None: - if self.editable: - base = self.source_dir - else: - base = os.path.join(self.setup_py_dir, 'pip-egg-info') - filenames = os.listdir(base) - if self.editable: - filenames = [] - for root, dirs, files in os.walk(base): - for dir in vcs.dirnames: - if dir in dirs: - dirs.remove(dir) - # Iterate over a copy of ``dirs``, since mutating - # a list while iterating over it can cause trouble. - # (See https://github.com/pypa/pip/pull/462.) - for dir in list(dirs): - # Don't search in anything that looks like a virtualenv - # environment - if ( - os.path.lexists( - os.path.join(root, dir, 'bin', 'python') - ) or - os.path.exists( - os.path.join( - root, dir, 'Scripts', 'Python.exe' - ) - )): - dirs.remove(dir) - # Also don't search through tests - elif dir == 'test' or dir == 'tests': - dirs.remove(dir) - filenames.extend([os.path.join(root, dir) - for dir in dirs]) - filenames = [f for f in filenames if f.endswith('.egg-info')] - - if not filenames: - raise InstallationError( - "Files/directories not found in %s" % base - ) - # if we have more than one match, we pick the toplevel one. This - # can easily be the case if there is a dist folder which contains - # an extracted tarball for testing purposes. - if len(filenames) > 1: - filenames.sort( - key=lambda x: x.count(os.path.sep) + - (os.path.altsep and x.count(os.path.altsep) or 0) + with backend.subprocess_runner(runner): + distinfo_dir = backend.prepare_metadata_for_build_wheel( + metadata_dir ) - self._egg_info_path = os.path.join(base, filenames[0]) - return self._egg_info_path + + return os.path.join(metadata_dir, distinfo_dir) @property def metadata(self): + # type: () -> Any if not hasattr(self, '_metadata'): self._metadata = get_metadata(self.get_dist()) @@ -672,22 +620,21 @@ def metadata(self): def get_dist(self): # type: () -> Distribution """Return a pkg_resources.Distribution for this requirement""" - if self.metadata_directory: - base_dir, distinfo = os.path.split(self.metadata_directory) - metadata = pkg_resources.PathMetadata( - base_dir, self.metadata_directory - ) - dist_name = os.path.splitext(distinfo)[0] - typ = pkg_resources.DistInfoDistribution + dist_dir = self.metadata_directory.rstrip(os.sep) + + # Determine the correct Distribution object type. + if dist_dir.endswith(".egg-info"): + dist_cls = pkg_resources.Distribution else: - egg_info = self.egg_info_path.rstrip(os.path.sep) - base_dir = os.path.dirname(egg_info) - metadata = pkg_resources.PathMetadata(base_dir, egg_info) - dist_name = os.path.splitext(os.path.basename(egg_info))[0] - # https://github.com/python/mypy/issues/1174 - typ = pkg_resources.Distribution # type: ignore - - return typ( + assert dist_dir.endswith(".dist-info") + dist_cls = pkg_resources.DistInfoDistribution + + # Build a PathMetadata object, from path to metadata. :wink: + base_dir, dist_dir_name = os.path.split(dist_dir) + dist_name = os.path.splitext(dist_dir_name)[0] + metadata = pkg_resources.PathMetadata(base_dir, dist_dir) + + return dist_cls( base_dir, project_name=dist_name, metadata=metadata, @@ -713,7 +660,7 @@ def assert_source_matches_version(self): # For both source distributions and editables def ensure_has_source_dir(self, parent_dir): - # type: (str) -> str + # type: (str) -> None """Ensure that a source_dir is set. This will create a temporary build dir if the name of the requirement @@ -724,8 +671,7 @@ def ensure_has_source_dir(self, parent_dir): :return: self.source_dir """ if self.source_dir is None: - self.source_dir = self.build_location(parent_dir) - return self.source_dir + self.source_dir = self.ensure_build_location(parent_dir) # For editable installations def install_editable( @@ -737,29 +683,21 @@ def install_editable( # type: (...) -> None logger.info('Running setup.py develop for %s', self.name) - if self.isolated: - global_options = list(global_options) + ["--no-user-cfg"] - if prefix: prefix_param = ['--prefix={}'.format(prefix)] install_options = list(install_options) + prefix_param - + base_cmd = make_setuptools_shim_args( + self.setup_py_path, + global_options=global_options, + no_user_config=self.isolated + ) with indent_log(): - # FIXME: should we do --install-headers here too? with self.build_env: - sys_executable = os.environ.get('PIP_PYTHON_PATH', sys.executable) call_subprocess( - [ - sys_executable, - '-c', - SETUPTOOLS_SHIM % self.setup_py - ] + - list(global_options) + + base_cmd + ['develop', '--no-deps'] + list(install_options), - - cwd=self.setup_py_dir, - show_stdout=False, + cwd=self.unpacked_source_directory, ) self.install_succeeded = True @@ -779,16 +717,14 @@ def update_editable(self, obtain=True): # Static paths don't get updated return assert '+' in self.link.url, "bad url: %r" % self.link.url - if not self.update: - return vc_type, url = self.link.url.split('+', 1) - backend = vcs.get_backend(vc_type) - if backend: - vcs_backend = backend(self.link.url) + vcs_backend = vcs.get_backend(vc_type) + if vcs_backend: + hidden_url = hide_url(self.link.url) if obtain: - vcs_backend.obtain(self.source_dir) + vcs_backend.obtain(self.source_dir, url=hidden_url) else: - vcs_backend.export(self.source_dir) + vcs_backend.export(self.source_dir, url=hidden_url) else: assert 0, ( 'Unexpected version control type (in %s): %s' @@ -820,6 +756,7 @@ def uninstall(self, auto_confirm=False, verbose=False, return uninstalled_pathset def _clean_zip_name(self, name, prefix): # only used by archive. + # type: (str, str) -> str assert name.startswith(prefix + os.path.sep), ( "name %r doesn't start with prefix %r" % (name, prefix) ) @@ -833,14 +770,18 @@ def _get_archive_name(self, path, parentdir, rootdir): name = self._clean_zip_name(path, rootdir) return self.name + '/' + name - # TODO: Investigate if this should be kept in InstallRequirement - # Seems to be used only when VCS + downloads def archive(self, build_dir): # type: (str) -> None + """Saves archive to provided build_dir. + + Used for saving downloaded VCS requirements as part of `pip download`. + """ assert self.source_dir + create_archive = True archive_name = '%s-%s.zip' % (self.name, self.metadata["version"]) archive_path = os.path.join(build_dir, archive_name) + if os.path.exists(archive_path): response = ask_path_exists( 'The file %s exists. (i)gnore, (w)ipe, (b)ackup, (a)bort ' % @@ -860,32 +801,37 @@ def archive(self, build_dir): shutil.move(archive_path, dest_file) elif response == 'a': sys.exit(-1) - if create_archive: - zip = zipfile.ZipFile( - archive_path, 'w', zipfile.ZIP_DEFLATED, - allowZip64=True + + if not create_archive: + return + + zip_output = zipfile.ZipFile( + archive_path, 'w', zipfile.ZIP_DEFLATED, allowZip64=True, + ) + with zip_output: + dir = os.path.normcase( + os.path.abspath(self.unpacked_source_directory) ) - dir = os.path.normcase(os.path.abspath(self.setup_py_dir)) for dirpath, dirnames, filenames in os.walk(dir): if 'pip-egg-info' in dirnames: dirnames.remove('pip-egg-info') for dirname in dirnames: - dir_arcname = self._get_archive_name(dirname, - parentdir=dirpath, - rootdir=dir) + dir_arcname = self._get_archive_name( + dirname, parentdir=dirpath, rootdir=dir, + ) zipdir = zipfile.ZipInfo(dir_arcname + '/') zipdir.external_attr = 0x1ED << 16 # 0o755 - zip.writestr(zipdir, '') + zip_output.writestr(zipdir, '') for filename in filenames: if filename == PIP_DELETE_MARKER_FILENAME: continue - file_arcname = self._get_archive_name(filename, - parentdir=dirpath, - rootdir=dir) + file_arcname = self._get_archive_name( + filename, parentdir=dirpath, rootdir=dir, + ) filename = os.path.join(dirpath, filename) - zip.write(filename, file_arcname) - zip.close() - logger.info('Saved %s', display_path(archive_path)) + zip_output.write(filename, file_arcname) + + logger.info('Saved %s', display_path(archive_path)) def install( self, @@ -927,25 +873,20 @@ def install( install_options = list(install_options) + \ self.options.get('install_options', []) - if self.isolated: - # https://github.com/python/mypy/issues/1174 - global_options = global_options + ["--no-user-cfg"] # type: ignore - with TempDirectory(kind="record") as temp_dir: record_filename = os.path.join(temp_dir.path, 'install-record.txt') install_args = self.get_install_args( global_options, record_filename, root, prefix, pycompile, ) - msg = 'Running setup.py install for %s' % (self.name,) - with open_spinner(msg) as spinner: - with indent_log(): - with self.build_env: - call_subprocess( - install_args + install_options, - cwd=self.setup_py_dir, - show_stdout=False, - spinner=spinner, - ) + + runner = runner_with_spinner_message( + "Running setup.py install for {}".format(self.name) + ) + with indent_log(), self.build_env: + runner( + cmd=install_args + install_options, + cwd=self.unpacked_source_directory, + ) if not os.path.exists(record_filename): logger.debug('Record file %s not found', record_filename) @@ -953,6 +894,7 @@ def install( self.install_succeeded = True def prepend_root(path): + # type: (str) -> str if root is None or not os.path.isabs(path): return path else: @@ -971,7 +913,6 @@ def prepend_root(path): self, ) # FIXME: put the record somewhere - # FIXME: should this be an error? return new_lines = [] with open(record_filename) as f: @@ -997,12 +938,13 @@ def get_install_args( pycompile # type: bool ): # type: (...) -> List[str] - sys_executable = os.environ.get('PIP_PYTHON_PATH', sys.executable) - install_args = [sys_executable, "-u"] - install_args.append('-c') - install_args.append(SETUPTOOLS_SHIM % self.setup_py) - install_args += list(global_options) + \ - ['install', '--record', record_filename] + install_args = make_setuptools_shim_args( + self.setup_py_path, + global_options=global_options, + no_user_config=self.isolated, + unbuffered_output=True + ) + install_args += ['install', '--record', record_filename] install_args += ['--single-version-externally-managed'] if root is not None: diff --git a/pipenv/patched/notpip/_internal/req/req_set.py b/pipenv/patched/notpip/_internal/req/req_set.py index e7da5b7122..d99dc43497 100644 --- a/pipenv/patched/notpip/_internal/req/req_set.py +++ b/pipenv/patched/notpip/_internal/req/req_set.py @@ -1,16 +1,22 @@ +# The following comment should be removed at some point in the future. +# mypy: strict-optional=False + from __future__ import absolute_import import logging from collections import OrderedDict +from pipenv.patched.notpip._vendor.packaging.utils import canonicalize_name + +from pipenv.patched.notpip._internal import pep425tags from pipenv.patched.notpip._internal.exceptions import InstallationError from pipenv.patched.notpip._internal.utils.logging import indent_log from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING from pipenv.patched.notpip._internal.wheel import Wheel if MYPY_CHECK_RUNNING: - from typing import Optional, List, Tuple, Dict, Iterable # noqa: F401 - from pipenv.patched.notpip._internal.req.req_install import InstallRequirement # noqa: F401 + from typing import Dict, Iterable, List, Optional, Tuple + from pipenv.patched.notpip._internal.req.req_install import InstallRequirement logger = logging.getLogger(__name__) @@ -19,35 +25,54 @@ class RequirementSet(object): def __init__(self, require_hashes=False, check_supported_wheels=True, ignore_compatibility=True): - # type: (bool, bool) -> None + # type: (bool) -> None """Create a RequirementSet. """ self.requirements = OrderedDict() # type: Dict[str, InstallRequirement] # noqa: E501 self.require_hashes = require_hashes self.check_supported_wheels = check_supported_wheels - if ignore_compatibility: - self.check_supported_wheels = False - self.ignore_compatibility = (check_supported_wheels is False or ignore_compatibility is True) - # Mapping of alias: real_name - self.requirement_aliases = {} # type: Dict[str, str] self.unnamed_requirements = [] # type: List[InstallRequirement] self.successfully_downloaded = [] # type: List[InstallRequirement] self.reqs_to_cleanup = [] # type: List[InstallRequirement] + if ignore_compatibility: + self.check_supported_wheels = False + self.ignore_compatibility = (check_supported_wheels is False or ignore_compatibility is True) def __str__(self): - reqs = [req for req in self.requirements.values() - if not req.comes_from] - reqs.sort(key=lambda req: req.name.lower()) - return ' '.join([str(req.req) for req in reqs]) + # type: () -> str + requirements = sorted( + (req for req in self.requirements.values() if not req.comes_from), + key=lambda req: canonicalize_name(req.name), + ) + return ' '.join(str(req.req) for req in requirements) def __repr__(self): - reqs = [req for req in self.requirements.values()] - reqs.sort(key=lambda req: req.name.lower()) - reqs_str = ', '.join([str(req.req) for req in reqs]) - return ('<%s object; %d requirement(s): %s>' - % (self.__class__.__name__, len(reqs), reqs_str)) + # type: () -> str + requirements = sorted( + self.requirements.values(), + key=lambda req: canonicalize_name(req.name), + ) + + format_string = '<{classname} object; {count} requirement(s): {reqs}>' + return format_string.format( + classname=self.__class__.__name__, + count=len(requirements), + reqs=', '.join(str(req.req) for req in requirements), + ) + + def add_unnamed_requirement(self, install_req): + # type: (InstallRequirement) -> None + assert not install_req.name + self.unnamed_requirements.append(install_req) + + def add_named_requirement(self, install_req): + # type: (InstallRequirement) -> None + assert install_req.name + + project_name = canonicalize_name(install_req.name) + self.requirements[project_name] = install_req def add_requirement( self, @@ -70,13 +95,11 @@ def add_requirement( the requirement is not applicable, or [install_req] if the requirement is applicable and has just been added. """ - name = install_req.name - # If the markers do not match, ignore this requirement. if not install_req.match_markers(extras_requested): logger.info( "Ignoring %s: markers '%s' don't match your environment", - name, install_req.markers, + install_req.name, install_req.markers, ) return [], None @@ -86,7 +109,8 @@ def add_requirement( # single requirements file. if install_req.link and install_req.link.is_wheel: wheel = Wheel(install_req.link.filename) - if self.check_supported_wheels and not wheel.supported(): + tags = pep425tags.get_supported() + if (self.check_supported_wheels and not wheel.supported(tags)): raise InstallationError( "%s is not a supported wheel on this platform." % wheel.filename @@ -100,13 +124,12 @@ def add_requirement( # Unnamed requirements are scanned again and the requirement won't be # added as a dependency until after scanning. - if not name: - # url or path requirement w/o an egg fragment - self.unnamed_requirements.append(install_req) + if not install_req.name: + self.add_unnamed_requirement(install_req) return [install_req], None try: - existing_req = self.get_requirement(name) + existing_req = self.get_requirement(install_req.name) except KeyError: existing_req = None @@ -120,17 +143,14 @@ def add_requirement( if has_conflicting_requirement: raise InstallationError( "Double requirement given: %s (already in %s, name=%r)" - % (install_req, existing_req, name) + % (install_req, existing_req, install_req.name) ) # When no existing requirement exists, add the requirement as a # dependency and it will be scanned again after. if not existing_req: - self.requirements[name] = install_req - # FIXME: what about other normalizations? E.g., _ vs. -? - if name.lower() != name: - self.requirement_aliases[name.lower()] = name - # We'd want to rescan this requirements later + self.add_named_requirement(install_req) + # We'd want to rescan this requirement later return [install_req], install_req # Assume there's no need to scan, and that we've already @@ -150,7 +170,7 @@ def add_requirement( raise InstallationError( "Could not satisfy constraints for '%s': " "installation from path or url cannot be " - "constrained to a version" % name, + "constrained to a version" % install_req.name, ) # If we're now installing a constraint, mark the existing # object for real installation. @@ -166,29 +186,22 @@ def add_requirement( # scanning again. return [existing_req], existing_req - def has_requirement(self, project_name): + def has_requirement(self, name): # type: (str) -> bool - name = project_name.lower() - if (name in self.requirements and - not self.requirements[name].constraint or - name in self.requirement_aliases and - not self.requirements[self.requirement_aliases[name]].constraint): - return True - return False - - @property - def has_requirements(self): - # type: () -> List[InstallRequirement] - return list(req for req in self.requirements.values() if not - req.constraint) or self.unnamed_requirements - - def get_requirement(self, project_name): + project_name = canonicalize_name(name) + + return ( + project_name in self.requirements and + not self.requirements[project_name].constraint + ) + + def get_requirement(self, name): # type: (str) -> InstallRequirement - for name in project_name, project_name.lower(): - if name in self.requirements: - return self.requirements[name] - if name in self.requirement_aliases: - return self.requirements[self.requirement_aliases[name]] + project_name = canonicalize_name(name) + + if project_name in self.requirements: + return self.requirements[project_name] + pass def cleanup_files(self): diff --git a/pipenv/patched/notpip/_internal/req/req_tracker.py b/pipenv/patched/notpip/_internal/req/req_tracker.py index d17a4187da..1fa4fe7e10 100644 --- a/pipenv/patched/notpip/_internal/req/req_tracker.py +++ b/pipenv/patched/notpip/_internal/req/req_tracker.py @@ -1,3 +1,6 @@ +# The following comment should be removed at some point in the future. +# mypy: strict-optional=False + from __future__ import absolute_import import contextlib @@ -10,9 +13,10 @@ from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: - from typing import Set, Iterator # noqa: F401 - from pipenv.patched.notpip._internal.req.req_install import InstallRequirement # noqa: F401 - from pipenv.patched.notpip._internal.models.link import Link # noqa: F401 + from types import TracebackType + from typing import Iterator, Optional, Set, Type + from pipenv.patched.notpip._internal.req.req_install import InstallRequirement + from pipenv.patched.notpip._internal.models.link import Link logger = logging.getLogger(__name__) @@ -24,7 +28,6 @@ def __init__(self): self._root = os.environ.get('PIP_REQ_TRACKER') if self._root is None: self._temp_dir = TempDirectory(delete=False, kind='req-tracker') - self._temp_dir.create() self._root = os.environ['PIP_REQ_TRACKER'] = self._temp_dir.path logger.debug('Created requirements tracker %r', self._root) else: @@ -33,9 +36,16 @@ def __init__(self): self._entries = set() # type: Set[InstallRequirement] def __enter__(self): + # type: () -> RequirementTracker return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__( + self, + exc_type, # type: Optional[Type[BaseException]] + exc_val, # type: Optional[BaseException] + exc_tb # type: Optional[TracebackType] + ): + # type: (...) -> None self.cleanup() def _entry_path(self, link): diff --git a/pipenv/patched/notpip/_internal/req/req_uninstall.py b/pipenv/patched/notpip/_internal/req/req_uninstall.py index ce80e6dc91..add3418c0a 100644 --- a/pipenv/patched/notpip/_internal/req/req_uninstall.py +++ b/pipenv/patched/notpip/_internal/req/req_uninstall.py @@ -14,15 +14,30 @@ from pipenv.patched.notpip._internal.utils.compat import WINDOWS, cache_from_source, uses_pycache from pipenv.patched.notpip._internal.utils.logging import indent_log from pipenv.patched.notpip._internal.utils.misc import ( - FakeFile, ask, dist_in_usersite, dist_is_local, egg_link_path, is_local, - normalize_path, renames, rmtree, + FakeFile, + ask, + dist_in_usersite, + dist_is_local, + egg_link_path, + is_local, + normalize_path, + renames, + rmtree, ) from pipenv.patched.notpip._internal.utils.temp_dir import AdjacentTempDirectory, TempDirectory +from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import ( + Any, Callable, Dict, Iterable, Iterator, List, Optional, Set, Tuple, + ) + from pipenv.patched.notpip._vendor.pkg_resources import Distribution logger = logging.getLogger(__name__) def _script_names(dist, script_name, is_gui): + # type: (Distribution, str, bool) -> List[str] """Create the fully qualified name of the files created by {console,gui}_scripts for the given ``dist``. Returns the list of file names @@ -44,9 +59,11 @@ def _script_names(dist, script_name, is_gui): def _unique(fn): + # type: (Callable) -> Callable[..., Iterator[Any]] @functools.wraps(fn) def unique(*args, **kw): - seen = set() + # type: (Any, Any) -> Iterator[Any] + seen = set() # type: Set[Any] for item in fn(*args, **kw): if item not in seen: seen.add(item) @@ -56,6 +73,7 @@ def unique(*args, **kw): @_unique def uninstallation_paths(dist): + # type: (Distribution) -> Iterator[str] """ Yield all the uninstallation paths for dist based on RECORD-without-.py[co] @@ -78,13 +96,14 @@ def uninstallation_paths(dist): def compact(paths): + # type: (Iterable[str]) -> Set[str] """Compact a path set to contain the minimal number of paths necessary to contain all paths in the set. If /a/path/ and /a/path/to/a/file.txt are both in the set, leave only the shorter path.""" sep = os.path.sep - short_paths = set() + short_paths = set() # type: Set[str] for path in sorted(paths, key=len): should_skip = any( path.startswith(shortpath.rstrip("*")) and @@ -97,6 +116,7 @@ def compact(paths): def compress_for_rename(paths): + # type: (Iterable[str]) -> Set[str] """Returns a set containing the paths that need to be renamed. This set may include directories when the original sequence of paths @@ -106,9 +126,10 @@ def compress_for_rename(paths): remaining = set(case_map) unchecked = sorted(set(os.path.split(p)[0] for p in case_map.values()), key=len) - wildcards = set() + wildcards = set() # type: Set[str] def norm_join(*a): + # type: (str) -> str return os.path.normcase(os.path.join(*a)) for root in unchecked: @@ -117,8 +138,8 @@ def norm_join(*a): # This directory has already been handled. continue - all_files = set() - all_subdirs = set() + all_files = set() # type: Set[str] + all_subdirs = set() # type: Set[str] for dirname, subdirs, files in os.walk(root): all_subdirs.update(norm_join(root, dirname, d) for d in subdirs) @@ -135,6 +156,7 @@ def norm_join(*a): def compress_for_output_listing(paths): + # type: (Iterable[str]) -> Tuple[Set[str], Set[str]] """Returns a tuple of 2 sets of which paths to display to user The first set contains paths that would be deleted. Files of a package @@ -145,7 +167,7 @@ def compress_for_output_listing(paths): folders. """ - will_remove = list(paths) + will_remove = set(paths) will_skip = set() # Determine folders and files @@ -158,7 +180,8 @@ def compress_for_output_listing(paths): folders.add(os.path.dirname(path)) files.add(path) - _normcased_files = set(map(os.path.normcase, files)) + # probably this one https://github.com/python/mypy/issues/390 + _normcased_files = set(map(os.path.normcase, files)) # type: ignore folders = compact(folders) @@ -187,30 +210,31 @@ class StashedUninstallPathSet(object): """A set of file rename operations to stash files while tentatively uninstalling them.""" def __init__(self): + # type: () -> None # Mapping from source file root to [Adjacent]TempDirectory # for files under that directory. - self._save_dirs = {} + self._save_dirs = {} # type: Dict[str, TempDirectory] # (old path, new path) tuples for each move that may need # to be undone. - self._moves = [] + self._moves = [] # type: List[Tuple[str, str]] def _get_directory_stash(self, path): + # type: (str) -> str """Stashes a directory. Directories are stashed adjacent to their original location if possible, or else moved/copied into the user's temp dir.""" try: - save_dir = AdjacentTempDirectory(path) - save_dir.create() + save_dir = AdjacentTempDirectory(path) # type: TempDirectory except OSError: save_dir = TempDirectory(kind="uninstall") - save_dir.create() self._save_dirs[os.path.normcase(path)] = save_dir return save_dir.path def _get_file_stash(self, path): + # type: (str) -> str """Stashes a file. If no root has been provided, one will be created for the directory @@ -230,7 +254,6 @@ def _get_file_stash(self, path): # Did not find any suitable root head = os.path.dirname(path) save_dir = TempDirectory(kind='uninstall') - save_dir.create() self._save_dirs[head] = save_dir relpath = os.path.relpath(path, head) @@ -239,15 +262,18 @@ def _get_file_stash(self, path): return save_dir.path def stash(self, path): + # type: (str) -> str """Stashes the directory or file and returns its new location. + Handle symlinks as files to avoid modifying the symlink targets. """ - if os.path.isdir(path): + path_is_dir = os.path.isdir(path) and not os.path.islink(path) + if path_is_dir: new_path = self._get_directory_stash(path) else: new_path = self._get_file_stash(path) self._moves.append((path, new_path)) - if os.path.isdir(path) and os.path.isdir(new_path): + if (path_is_dir and os.path.isdir(new_path)): # If we're moving a directory, we need to # remove the destination first or else it will be # moved to inside the existing directory. @@ -258,6 +284,7 @@ def stash(self, path): return new_path def commit(self): + # type: () -> None """Commits the uninstall by removing stashed files.""" for _, save_dir in self._save_dirs.items(): save_dir.cleanup() @@ -265,6 +292,7 @@ def commit(self): self._save_dirs = {} def rollback(self): + # type: () -> None """Undoes the uninstall by moving stashed files back.""" for p in self._moves: logging.info("Moving to %s\n from %s", *p) @@ -272,7 +300,7 @@ def rollback(self): for new_path, path in self._moves: try: logger.debug('Replacing %s from %s', new_path, path) - if os.path.isfile(new_path): + if os.path.isfile(new_path) or os.path.islink(new_path): os.unlink(new_path) elif os.path.isdir(new_path): rmtree(new_path) @@ -285,6 +313,7 @@ def rollback(self): @property def can_rollback(self): + # type: () -> bool return bool(self._moves) @@ -292,13 +321,15 @@ class UninstallPathSet(object): """A set of file paths to be removed in the uninstallation of a requirement.""" def __init__(self, dist): - self.paths = set() - self._refuse = set() - self.pth = {} + # type: (Distribution) -> None + self.paths = set() # type: Set[str] + self._refuse = set() # type: Set[str] + self.pth = {} # type: Dict[str, UninstallPthEntries] self.dist = dist self._moved_paths = StashedUninstallPathSet() def _permitted(self, path): + # type: (str) -> bool """ Return True if the given path is one we are permitted to remove/modify, False otherwise. @@ -307,6 +338,7 @@ def _permitted(self, path): return is_local(path) def add(self, path): + # type: (str) -> None head, tail = os.path.split(path) # we normalize the head to resolve parent directory symlinks, but not @@ -326,6 +358,7 @@ def add(self, path): self.add(cache_from_source(path)) def add_pth(self, pth_file, entry): + # type: (str, str) -> None pth_file = normalize_path(pth_file) if self._permitted(pth_file): if pth_file not in self.pth: @@ -335,6 +368,7 @@ def add_pth(self, pth_file, entry): self._refuse.add(pth_file) def remove(self, auto_confirm=False, verbose=False): + # type: (bool, bool) -> None """Remove paths in ``self.paths`` with confirmation (unless ``auto_confirm`` is True).""" @@ -366,10 +400,12 @@ def remove(self, auto_confirm=False, verbose=False): logger.info('Successfully uninstalled %s', dist_name_version) def _allowed_to_proceed(self, verbose): + # type: (bool) -> bool """Display which files would be deleted and prompt for confirmation """ def _display(msg, paths): + # type: (str, Iterable[str]) -> None if not paths: return @@ -383,7 +419,7 @@ def _display(msg, paths): else: # In verbose mode, display all the files that are going to be # deleted. - will_remove = list(self.paths) + will_remove = set(self.paths) will_skip = set() _display('Would remove:', will_remove) @@ -395,24 +431,27 @@ def _display(msg, paths): return ask('Proceed (y/n)? ', ('y', 'n')) == 'y' def rollback(self): + # type: () -> None """Rollback the changes previously made by remove().""" if not self._moved_paths.can_rollback: logger.error( "Can't roll back %s; was not uninstalled", self.dist.project_name, ) - return False + return logger.info('Rolling back uninstall of %s', self.dist.project_name) self._moved_paths.rollback() for pth in self.pth.values(): pth.rollback() def commit(self): + # type: () -> None """Remove temporary save dir: rollback will no longer be possible.""" self._moved_paths.commit() @classmethod def from_dist(cls, dist): + # type: (Distribution) -> UninstallPathSet dist_path = normalize_path(dist.location) if not dist_is_local(dist): logger.info( @@ -544,25 +583,33 @@ def from_dist(cls, dist): class UninstallPthEntries(object): def __init__(self, pth_file): + # type: (str) -> None if not os.path.isfile(pth_file): raise UninstallationError( "Cannot remove entries from nonexistent file %s" % pth_file ) self.file = pth_file - self.entries = set() - self._saved_lines = None + self.entries = set() # type: Set[str] + self._saved_lines = None # type: Optional[List[bytes]] def add(self, entry): + # type: (str) -> None entry = os.path.normcase(entry) # On Windows, os.path.normcase converts the entry to use # backslashes. This is correct for entries that describe absolute # paths outside of site-packages, but all the others use forward # slashes. + # os.path.splitdrive is used instead of os.path.isabs because isabs + # treats non-absolute paths with drive letter markings like c:foo\bar + # as absolute paths. It also does not recognize UNC paths if they don't + # have more than "\\sever\share". Valid examples: "\\server\share\" or + # "\\server\share\folder". Python 2.7.8+ support UNC in splitdrive. if WINDOWS and not os.path.splitdrive(entry)[0]: entry = entry.replace('\\', '/') self.entries.add(entry) def remove(self): + # type: () -> None logger.debug('Removing pth entries from %s:', self.file) with open(self.file, 'rb') as fh: # windows uses '\r\n' with py3k, but uses '\n' with py2.x @@ -585,6 +632,7 @@ def remove(self): fh.writelines(lines) def rollback(self): + # type: () -> bool if self._saved_lines is None: logger.error( 'Cannot roll back changes to %s, none were made', self.file diff --git a/pipenv/patched/notpip/_internal/self_outdated_check.py b/pipenv/patched/notpip/_internal/self_outdated_check.py new file mode 100644 index 0000000000..8bf5e9f719 --- /dev/null +++ b/pipenv/patched/notpip/_internal/self_outdated_check.py @@ -0,0 +1,244 @@ +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + +from __future__ import absolute_import + +import datetime +import hashlib +import json +import logging +import os.path +import sys + +from pipenv.patched.notpip._vendor import pkg_resources +from pipenv.patched.notpip._vendor.packaging import version as packaging_version +from pipenv.patched.notpip._vendor.six import ensure_binary + +from pipenv.patched.notpip._internal.collector import LinkCollector +from pipenv.patched.notpip._internal.index import PackageFinder +from pipenv.patched.notpip._internal.models.search_scope import SearchScope +from pipenv.patched.notpip._internal.models.selection_prefs import SelectionPreferences +from pipenv.patched.notpip._internal.utils.compat import WINDOWS +from pipenv.patched.notpip._internal.utils.filesystem import ( + adjacent_tmp_file, + check_path_owner, + replace, +) +from pipenv.patched.notpip._internal.utils.misc import ( + ensure_dir, + get_installed_version, + redact_auth_from_url, +) +from pipenv.patched.notpip._internal.utils.packaging import get_installer +from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + import optparse + from optparse import Values + from typing import Any, Dict, Text, Union + + from pipenv.patched.notpip._internal.network.session import PipSession + + +SELFCHECK_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ" + + +logger = logging.getLogger(__name__) + + +def make_link_collector( + session, # type: PipSession + options, # type: Values + suppress_no_index=False, # type: bool +): + # type: (...) -> LinkCollector + """ + :param session: The Session to use to make requests. + :param suppress_no_index: Whether to ignore the --no-index option + when constructing the SearchScope object. + """ + index_urls = [options.index_url] + options.extra_index_urls + if options.no_index and not suppress_no_index: + logger.debug( + 'Ignoring indexes: %s', + ','.join(redact_auth_from_url(url) for url in index_urls), + ) + index_urls = [] + + # Make sure find_links is a list before passing to create(). + find_links = options.find_links or [] + + search_scope = SearchScope.create( + find_links=find_links, index_urls=index_urls, + ) + + link_collector = LinkCollector(session=session, search_scope=search_scope) + + return link_collector + + +def _get_statefile_name(key): + # type: (Union[str, Text]) -> str + key_bytes = ensure_binary(key) + name = hashlib.sha224(key_bytes).hexdigest() + return name + + +class SelfCheckState(object): + def __init__(self, cache_dir): + # type: (str) -> None + self.state = {} # type: Dict[str, Any] + self.statefile_path = None + + # Try to load the existing state + if cache_dir: + self.statefile_path = os.path.join( + cache_dir, "selfcheck", _get_statefile_name(self.key) + ) + try: + with open(self.statefile_path) as statefile: + self.state = json.load(statefile) + except (IOError, ValueError, KeyError): + # Explicitly suppressing exceptions, since we don't want to + # error out if the cache file is invalid. + pass + + @property + def key(self): + return sys.prefix + + def save(self, pypi_version, current_time): + # type: (str, datetime.datetime) -> None + # If we do not have a path to cache in, don't bother saving. + if not self.statefile_path: + return + + # Check to make sure that we own the directory + if not check_path_owner(os.path.dirname(self.statefile_path)): + return + + # Now that we've ensured the directory is owned by this user, we'll go + # ahead and make sure that all our directories are created. + ensure_dir(os.path.dirname(self.statefile_path)) + + state = { + # Include the key so it's easy to tell which pip wrote the + # file. + "key": self.key, + "last_check": current_time.strftime(SELFCHECK_DATE_FMT), + "pypi_version": pypi_version, + } + + text = json.dumps(state, sort_keys=True, separators=(",", ":")) + + with adjacent_tmp_file(self.statefile_path) as f: + f.write(ensure_binary(text)) + + try: + # Since we have a prefix-specific state file, we can just + # overwrite whatever is there, no need to check. + replace(f.name, self.statefile_path) + except OSError: + # Best effort. + pass + + +def was_installed_by_pip(pkg): + # type: (str) -> bool + """Checks whether pkg was installed by pip + + This is used not to display the upgrade message when pip is in fact + installed by system package manager, such as dnf on Fedora. + """ + try: + dist = pkg_resources.get_distribution(pkg) + return "pip" == get_installer(dist) + except pkg_resources.DistributionNotFound: + return False + + +def pip_self_version_check(session, options): + # type: (PipSession, optparse.Values) -> None + """Check for an update for pip. + + Limit the frequency of checks to once per week. State is stored either in + the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix + of the pip script path. + """ + installed_version = get_installed_version("pip") + if not installed_version: + return + + pip_version = packaging_version.parse(installed_version) + pypi_version = None + + try: + state = SelfCheckState(cache_dir=options.cache_dir) + + current_time = datetime.datetime.utcnow() + # Determine if we need to refresh the state + if "last_check" in state.state and "pypi_version" in state.state: + last_check = datetime.datetime.strptime( + state.state["last_check"], + SELFCHECK_DATE_FMT + ) + if (current_time - last_check).total_seconds() < 7 * 24 * 60 * 60: + pypi_version = state.state["pypi_version"] + + # Refresh the version if we need to or just see if we need to warn + if pypi_version is None: + # Lets use PackageFinder to see what the latest pip version is + link_collector = make_link_collector( + session, + options=options, + suppress_no_index=True, + ) + + # Pass allow_yanked=False so we don't suggest upgrading to a + # yanked version. + selection_prefs = SelectionPreferences( + allow_yanked=False, + allow_all_prereleases=False, # Explicitly set to False + ) + + finder = PackageFinder.create( + link_collector=link_collector, + selection_prefs=selection_prefs, + ) + best_candidate = finder.find_best_candidate("pip").best_candidate + if best_candidate is None: + return + pypi_version = str(best_candidate.version) + + # save that we've performed a check + state.save(pypi_version, current_time) + + remote_version = packaging_version.parse(pypi_version) + + local_version_is_older = ( + pip_version < remote_version and + pip_version.base_version != remote_version.base_version and + was_installed_by_pip('pip') + ) + + # Determine if our pypi_version is older + if not local_version_is_older: + return + + # Advise "python -m pip" on Windows to avoid issues + # with overwriting pip.exe. + if WINDOWS: + pip_cmd = "python -m pip" + else: + pip_cmd = "pip" + logger.warning( + "You are using pip version %s; however, version %s is " + "available.\nYou should consider upgrading via the " + "'%s install --upgrade pip' command.", + pip_version, pypi_version, pip_cmd + ) + except Exception: + logger.debug( + "There was an error checking the latest version of pip", + exc_info=True, + ) diff --git a/pipenv/patched/notpip/_internal/utils/appdirs.py b/pipenv/patched/notpip/_internal/utils/appdirs.py index 9ce3a1b340..c1ba02d214 100644 --- a/pipenv/patched/notpip/_internal/utils/appdirs.py +++ b/pipenv/patched/notpip/_internal/utils/appdirs.py @@ -2,6 +2,10 @@ This code was taken from https://github.com/ActiveState/appdirs and modified to suit our purposes. """ + +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import import os @@ -13,9 +17,7 @@ from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: - from typing import ( # noqa: F401 - List, Union - ) + from typing import List def user_cache_dir(appname): @@ -220,6 +222,8 @@ def _get_win_folder_from_registry(csidl_name): def _get_win_folder_with_ctypes(csidl_name): # type: (str) -> str + # On Python 2, ctypes.create_unicode_buffer().value returns "unicode", + # which isn't the same as str in the annotation above. csidl_const = { "CSIDL_APPDATA": 26, "CSIDL_COMMON_APPDATA": 35, @@ -227,7 +231,8 @@ def _get_win_folder_with_ctypes(csidl_name): }[csidl_name] buf = ctypes.create_unicode_buffer(1024) - ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) + windll = ctypes.windll # type: ignore + windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) # Downgrade to short path name if have highbit chars. See # . @@ -238,10 +243,11 @@ def _get_win_folder_with_ctypes(csidl_name): break if has_high_char: buf2 = ctypes.create_unicode_buffer(1024) - if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): + if windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): buf = buf2 - return buf.value + # The type: ignore is explained under the type annotation for this function + return buf.value # type: ignore if WINDOWS: diff --git a/pipenv/patched/notpip/_internal/utils/compat.py b/pipenv/patched/notpip/_internal/utils/compat.py index 1dad56b01a..758aa0d3bc 100644 --- a/pipenv/patched/notpip/_internal/utils/compat.py +++ b/pipenv/patched/notpip/_internal/utils/compat.py @@ -1,5 +1,9 @@ """Stuff that differs in different Python versions and platform distributions.""" + +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import, division import codecs @@ -9,12 +13,21 @@ import shutil import sys -from pipenv.patched.notpip._vendor.six import text_type +from pipenv.patched.notpip._vendor.six import PY2, text_type +from pipenv.patched.notpip._vendor.urllib3.util import IS_PYOPENSSL from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: - from typing import Tuple, Text # noqa: F401 + from typing import Optional, Text, Tuple, Union + +try: + import _ssl # noqa +except ImportError: + ssl = None +else: + # This additional assignment was needed to prevent a mypy error. + ssl = _ssl try: import ipaddress @@ -36,10 +49,9 @@ logger = logging.getLogger(__name__) -if sys.version_info >= (3, 4): - uses_pycache = True - from importlib.util import cache_from_source -else: +HAS_TLS = (ssl is not None) or IS_PYOPENSSL + +if PY2: import imp try: @@ -49,41 +61,54 @@ cache_from_source = None uses_pycache = cache_from_source is not None +else: + uses_pycache = True + from importlib.util import cache_from_source -if sys.version_info >= (3, 5): - backslashreplace_decode = "backslashreplace" -else: - # In version 3.4 and older, backslashreplace exists +if PY2: + # In Python 2.7, backslashreplace exists # but does not support use for decoding. # We implement our own replace handler for this # situation, so that we can consistently use # backslash replacement for all versions. def backslashreplace_decode_fn(err): raw_bytes = (err.object[i] for i in range(err.start, err.end)) - if sys.version_info[0] == 2: - # Python 2 gave us characters - convert to numeric bytes - raw_bytes = (ord(b) for b in raw_bytes) + # Python 2 gave us characters - convert to numeric bytes + raw_bytes = (ord(b) for b in raw_bytes) return u"".join(u"\\x%x" % c for c in raw_bytes), err.end codecs.register_error( "backslashreplace_decode", backslashreplace_decode_fn, ) backslashreplace_decode = "backslashreplace_decode" +else: + backslashreplace_decode = "backslashreplace" -def console_to_str(data): - # type: (bytes) -> Text - """Return a string, safe for output, of subprocess output. +def str_to_display(data, desc=None): + # type: (Union[bytes, Text], Optional[str]) -> Text + """ + For display or logging purposes, convert a bytes object (or text) to + text (e.g. unicode in Python 2) safe for output. + + :param desc: An optional phrase describing the input data, for use in + the log message if a warning is logged. Defaults to "Bytes object". - We assume the data is in the locale preferred encoding. - If it won't decode properly, we warn the user but decode as - best we can. + This function should never error out and so can take a best effort + approach. It is okay to be lossy if needed since the return value is + just for display. - We also ensure that the output can be safely written to - standard output without encoding errors. + We assume the data is in the locale preferred encoding. If it won't + decode properly, we warn the user but decode as best we can. + + We also ensure that the output can be safely written to standard output + without encoding errors. """ + if isinstance(data, text_type): + return data + # Otherwise, data is a bytes object (str in Python 2). # First, get the encoding we assume. This is the preferred # encoding for the locale, unless that is not found, or # it is ASCII, in which case assume UTF-8 @@ -96,10 +121,10 @@ def console_to_str(data): try: decoded_data = data.decode(encoding) except UnicodeDecodeError: - logger.warning( - "Subprocess output does not appear to be encoded as %s", - encoding, - ) + if desc is None: + desc = 'Bytes object' + msg_format = '{} does not appear to be encoded as %s'.format(desc) + logger.warning(msg_format, encoding) decoded_data = data.decode(encoding, errors=backslashreplace_decode) # Make sure we can print the output, by encoding it to the output @@ -127,19 +152,26 @@ def console_to_str(data): return decoded_data -if sys.version_info >= (3,): +def console_to_str(data): + # type: (bytes) -> Text + """Return a string, safe for output, of subprocess output. + """ + return str_to_display(data, desc='Subprocess output') + + +if PY2: def native_str(s, replace=False): # type: (str, bool) -> str - if isinstance(s, bytes): - return s.decode('utf-8', 'replace' if replace else 'strict') + # Replace is ignored -- unicode to UTF-8 can't fail + if isinstance(s, text_type): + return s.encode('utf-8') return s else: def native_str(s, replace=False): # type: (str, bool) -> str - # Replace is ignored -- unicode to UTF-8 can't fail - if isinstance(s, text_type): - return s.encode('utf-8') + if isinstance(s, bytes): + return s.decode('utf-8', 'replace' if replace else 'strict') return s @@ -173,16 +205,17 @@ def get_path_uid(path): return file_uid -if sys.version_info >= (3, 4): - from importlib.machinery import EXTENSION_SUFFIXES +if PY2: + from imp import get_suffixes def get_extension_suffixes(): - return EXTENSION_SUFFIXES + return [suffix[0] for suffix in get_suffixes()] + else: - from imp import get_suffixes + from importlib.machinery import EXTENSION_SUFFIXES def get_extension_suffixes(): - return [suffix[0] for suffix in get_suffixes()] + return EXTENSION_SUFFIXES def expanduser(path): diff --git a/pipenv/patched/notpip/_internal/utils/deprecation.py b/pipenv/patched/notpip/_internal/utils/deprecation.py index 2e309ec2c9..dcb407bed8 100644 --- a/pipenv/patched/notpip/_internal/utils/deprecation.py +++ b/pipenv/patched/notpip/_internal/utils/deprecation.py @@ -1,6 +1,10 @@ """ A module that implements tooling to enable easy warnings about deprecations. """ + +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import import logging @@ -12,7 +16,10 @@ from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: - from typing import Any, Optional # noqa: F401 + from typing import Any, Optional + + +DEPRECATION_MSG_PREFIX = "DEPRECATION: " class PipDeprecationWarning(Warning): @@ -75,16 +82,23 @@ def deprecated(reason, replacement, gone_in, issue=None): """ # Construct a nice message. - # This is purposely eagerly formatted as we want it to appear as if someone - # typed this entire message out. - message = "DEPRECATION: " + reason - if replacement is not None: - message += " A possible replacement is {}.".format(replacement) - if issue is not None: - url = "https://github.com/pypa/pip/issues/" + str(issue) - message += " You can find discussion regarding this at {}.".format(url) + # This is eagerly formatted as we want it to get logged as if someone + # typed this entire message out. + sentences = [ + (reason, DEPRECATION_MSG_PREFIX + "{}"), + (gone_in, "pip {} will remove support for this functionality."), + (replacement, "A possible replacement is {}."), + (issue, ( + "You can find discussion regarding this at " + "https://github.com/pypa/pip/issues/{}." + )), + ] + message = " ".join( + template.format(val) for val, template in sentences if val is not None + ) # Raise as an error if it has to be removed. if gone_in is not None and parse(current_version) >= parse(gone_in): raise PipDeprecationWarning(message) + warnings.warn(message, category=PipDeprecationWarning, stacklevel=2) diff --git a/pipenv/patched/notpip/_internal/utils/encoding.py b/pipenv/patched/notpip/_internal/utils/encoding.py index f03fc9013e..5abba6f900 100644 --- a/pipenv/patched/notpip/_internal/utils/encoding.py +++ b/pipenv/patched/notpip/_internal/utils/encoding.py @@ -1,3 +1,6 @@ +# The following comment should be removed at some point in the future. +# mypy: strict-optional=False + import codecs import locale import re @@ -6,16 +9,16 @@ from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: - from typing import List, Tuple, Text # noqa: F401 + from typing import List, Tuple, Text BOMS = [ - (codecs.BOM_UTF8, 'utf8'), - (codecs.BOM_UTF16, 'utf16'), - (codecs.BOM_UTF16_BE, 'utf16-be'), - (codecs.BOM_UTF16_LE, 'utf16-le'), - (codecs.BOM_UTF32, 'utf32'), - (codecs.BOM_UTF32_BE, 'utf32-be'), - (codecs.BOM_UTF32_LE, 'utf32-le'), + (codecs.BOM_UTF8, 'utf-8'), + (codecs.BOM_UTF16, 'utf-16'), + (codecs.BOM_UTF16_BE, 'utf-16-be'), + (codecs.BOM_UTF16_LE, 'utf-16-le'), + (codecs.BOM_UTF32, 'utf-32'), + (codecs.BOM_UTF32_BE, 'utf-32-be'), + (codecs.BOM_UTF32_LE, 'utf-32-le'), ] # type: List[Tuple[bytes, Text]] ENCODING_RE = re.compile(br'coding[:=]\s*([-\w.]+)') diff --git a/pipenv/patched/notpip/_internal/utils/filesystem.py b/pipenv/patched/notpip/_internal/utils/filesystem.py index d4aae97d54..c1e4507db6 100644 --- a/pipenv/patched/notpip/_internal/utils/filesystem.py +++ b/pipenv/patched/notpip/_internal/utils/filesystem.py @@ -1,7 +1,27 @@ import os import os.path +import shutil +import stat +from contextlib import contextmanager +from tempfile import NamedTemporaryFile + +# NOTE: retrying is not annotated in typeshed as on 2017-07-17, which is +# why we ignore the type on this import. +from pipenv.patched.notpip._vendor.retrying import retry # type: ignore +from pipenv.patched.notpip._vendor.six import PY2 from pipenv.patched.notpip._internal.utils.compat import get_path_uid +from pipenv.patched.notpip._internal.utils.misc import cast +from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import BinaryIO, Iterator + + class NamedTemporaryFileResult(BinaryIO): + @property + def file(self): + # type: () -> BinaryIO + pass def check_path_owner(path): @@ -28,3 +48,68 @@ def check_path_owner(path): else: previous, path = path, os.path.dirname(path) return False # assume we don't own the path + + +def copy2_fixed(src, dest): + # type: (str, str) -> None + """Wrap shutil.copy2() but map errors copying socket files to + SpecialFileError as expected. + + See also https://bugs.python.org/issue37700. + """ + try: + shutil.copy2(src, dest) + except (OSError, IOError): + for f in [src, dest]: + try: + is_socket_file = is_socket(f) + except OSError: + # An error has already occurred. Another error here is not + # a problem and we can ignore it. + pass + else: + if is_socket_file: + raise shutil.SpecialFileError("`%s` is a socket" % f) + + raise + + +def is_socket(path): + # type: (str) -> bool + return stat.S_ISSOCK(os.lstat(path).st_mode) + + +@contextmanager +def adjacent_tmp_file(path): + # type: (str) -> Iterator[NamedTemporaryFileResult] + """Given a path to a file, open a temp file next to it securely and ensure + it is written to disk after the context reaches its end. + """ + with NamedTemporaryFile( + delete=False, + dir=os.path.dirname(path), + prefix=os.path.basename(path), + suffix='.tmp', + ) as f: + result = cast('NamedTemporaryFileResult', f) + try: + yield result + finally: + result.file.flush() + os.fsync(result.file.fileno()) + + +_replace_retry = retry(stop_max_delay=1000, wait_fixed=250) + +if PY2: + @_replace_retry + def replace(src, dest): + # type: (str, str) -> None + try: + os.rename(src, dest) + except OSError: + os.remove(dest) + os.rename(src, dest) + +else: + replace = _replace_retry(os.replace) diff --git a/pipenv/patched/notpip/_internal/utils/filetypes.py b/pipenv/patched/notpip/_internal/utils/filetypes.py new file mode 100644 index 0000000000..482925d1f9 --- /dev/null +++ b/pipenv/patched/notpip/_internal/utils/filetypes.py @@ -0,0 +1,16 @@ +"""Filetype information. +""" +from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import Tuple + +WHEEL_EXTENSION = '.whl' +BZ2_EXTENSIONS = ('.tar.bz2', '.tbz') # type: Tuple[str, ...] +XZ_EXTENSIONS = ('.tar.xz', '.txz', '.tlz', + '.tar.lz', '.tar.lzma') # type: Tuple[str, ...] +ZIP_EXTENSIONS = ('.zip', WHEEL_EXTENSION) # type: Tuple[str, ...] +TAR_EXTENSIONS = ('.tar.gz', '.tgz', '.tar') # type: Tuple[str, ...] +ARCHIVE_EXTENSIONS = ( + ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS + XZ_EXTENSIONS +) diff --git a/pipenv/patched/notpip/_internal/utils/glibc.py b/pipenv/patched/notpip/_internal/utils/glibc.py index e2b6d50558..5e3c6b1af2 100644 --- a/pipenv/patched/notpip/_internal/utils/glibc.py +++ b/pipenv/patched/notpip/_internal/utils/glibc.py @@ -1,18 +1,48 @@ +# The following comment should be removed at some point in the future. +# mypy: strict-optional=False + from __future__ import absolute_import -import ctypes +import os import re import warnings from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: - from typing import Optional, Tuple # noqa: F401 + from typing import Optional, Tuple def glibc_version_string(): # type: () -> Optional[str] "Returns glibc version string, or None if not using glibc." + return glibc_version_string_confstr() or glibc_version_string_ctypes() + + +def glibc_version_string_confstr(): + # type: () -> Optional[str] + "Primary implementation of glibc_version_string using os.confstr." + # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely + # to be broken or missing. This strategy is used in the standard library + # platform module: + # https://github.com/python/cpython/blob/fcf1d003bf4f0100c9d0921ff3d70e1127ca1b71/Lib/platform.py#L175-L183 + try: + # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17": + _, version = os.confstr("CS_GNU_LIBC_VERSION").split() + except (AttributeError, OSError, ValueError): + # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)... + return None + return version + + +def glibc_version_string_ctypes(): + # type: () -> Optional[str] + "Fallback implementation of glibc_version_string using ctypes." + + try: + import ctypes + except ImportError: + return None # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen # manpage says, "If filename is NULL, then the returned handle is for the @@ -56,7 +86,7 @@ def check_glibc_version(version_str, required_major, minimum_minor): def have_compatible_glibc(required_major, minimum_minor): # type: (int, int) -> bool - version_str = glibc_version_string() # type: Optional[str] + version_str = glibc_version_string() if version_str is None: return False return check_glibc_version(version_str, required_major, minimum_minor) diff --git a/pipenv/patched/notpip/_internal/utils/hashes.py b/pipenv/patched/notpip/_internal/utils/hashes.py index 55cb84117e..ef81612bb7 100644 --- a/pipenv/patched/notpip/_internal/utils/hashes.py +++ b/pipenv/patched/notpip/_internal/utils/hashes.py @@ -1,3 +1,6 @@ +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import import hashlib @@ -5,20 +8,22 @@ from pipenv.patched.notpip._vendor.six import iteritems, iterkeys, itervalues from pipenv.patched.notpip._internal.exceptions import ( - HashMismatch, HashMissing, InstallationError, + HashMismatch, + HashMissing, + InstallationError, ) from pipenv.patched.notpip._internal.utils.misc import read_chunks from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: - from typing import ( # noqa: F401 + from typing import ( Dict, List, BinaryIO, NoReturn, Iterator ) from pipenv.patched.notpip._vendor.six import PY3 if PY3: - from hashlib import _Hash # noqa: F401 + from hashlib import _Hash else: - from hashlib import _hash as _Hash # noqa: F401 + from hashlib import _hash as _Hash # The recommended hash algo of the moment. Change this whenever the state of @@ -44,6 +49,19 @@ def __init__(self, hashes=None): """ self._allowed = {} if hashes is None else hashes + @property + def digest_count(self): + # type: () -> int + return sum(len(digests) for digests in self._allowed.values()) + + def is_hash_allowed( + self, + hash_name, # type: str + hex_digest, # type: str + ): + """Return whether the given hex digest is allowed.""" + return hex_digest in self._allowed.get(hash_name, []) + def check_against_chunks(self, chunks): # type: (Iterator[bytes]) -> None """Check good hashes against ones built from iterable of chunks of diff --git a/pipenv/patched/notpip/_internal/utils/inject_securetransport.py b/pipenv/patched/notpip/_internal/utils/inject_securetransport.py new file mode 100644 index 0000000000..f367b64ee7 --- /dev/null +++ b/pipenv/patched/notpip/_internal/utils/inject_securetransport.py @@ -0,0 +1,36 @@ +"""A helper module that injects SecureTransport, on import. + +The import should be done as early as possible, to ensure all requests and +sessions (or whatever) are created after injecting SecureTransport. + +Note that we only do the injection on macOS, when the linked OpenSSL is too +old to handle TLSv1.2. +""" + +import sys + + +def inject_securetransport(): + # type: () -> None + # Only relevant on macOS + if sys.platform != "darwin": + return + + try: + import ssl + except ImportError: + return + + # Checks for OpenSSL 1.0.1 + if ssl.OPENSSL_VERSION_NUMBER >= 0x1000100f: + return + + try: + from pipenv.patched.notpip._vendor.urllib3.contrib import securetransport + except (ImportError, OSError): + return + + securetransport.inject_into_urllib3() + + +inject_securetransport() diff --git a/pipenv/patched/notpip/_internal/utils/logging.py b/pipenv/patched/notpip/_internal/utils/logging.py index 638c5ca939..d956b6a788 100644 --- a/pipenv/patched/notpip/_internal/utils/logging.py +++ b/pipenv/patched/notpip/_internal/utils/logging.py @@ -1,3 +1,6 @@ +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import import contextlib @@ -6,10 +9,12 @@ import logging.handlers import os import sys +from logging import Filter, getLogger from pipenv.patched.notpip._vendor.six import PY2 from pipenv.patched.notpip._internal.utils.compat import WINDOWS +from pipenv.patched.notpip._internal.utils.deprecation import DEPRECATION_MSG_PREFIX from pipenv.patched.notpip._internal.utils.misc import ensure_dir try: @@ -19,15 +24,36 @@ try: - from pipenv.patched.notpip._vendor import colorama + # Use "import as" and set colorama in the else clause to avoid mypy + # errors and get the following correct revealed type for colorama: + # `Union[_importlib_modulespec.ModuleType, None]` + # Otherwise, we get an error like the following in the except block: + # > Incompatible types in assignment (expression has type "None", + # variable has type Module) + # TODO: eliminate the need to use "import as" once mypy addresses some + # of its issues with conditional imports. Here is an umbrella issue: + # https://github.com/python/mypy/issues/1297 + from pipenv.patched.notpip._vendor import colorama as _colorama # Lots of different errors can come from this, including SystemError and # ImportError. except Exception: colorama = None +else: + # Import Fore explicitly rather than accessing below as colorama.Fore + # to avoid the following error running mypy: + # > Module has no attribute "Fore" + # TODO: eliminate the need to import Fore once mypy addresses some of its + # issues with conditional imports. This particular case could be an + # instance of the following issue (but also see the umbrella issue above): + # https://github.com/python/mypy/issues/3500 + from pipenv.patched.notpip._vendor.colorama import Fore + + colorama = _colorama _log_state = threading.local() _log_state.indentation = 0 +subprocess_logger = getLogger('pip.subprocessor') class BrokenStdoutLoggingError(Exception): @@ -90,9 +116,10 @@ def get_indentation(): class IndentingFormatter(logging.Formatter): + def __init__(self, *args, **kwargs): """ - A logging.Formatter obeying containing indent_log contexts. + A logging.Formatter that obeys the indent_log() context manager. :param add_timestamp: A bool indicating output lines should be prefixed with their record's timestamp. @@ -100,15 +127,36 @@ def __init__(self, *args, **kwargs): self.add_timestamp = kwargs.pop("add_timestamp", False) super(IndentingFormatter, self).__init__(*args, **kwargs) + def get_message_start(self, formatted, levelno): + """ + Return the start of the formatted log message (not counting the + prefix to add to each line). + """ + if levelno < logging.WARNING: + return '' + if formatted.startswith(DEPRECATION_MSG_PREFIX): + # Then the message already has a prefix. We don't want it to + # look like "WARNING: DEPRECATION: ...." + return '' + if levelno < logging.ERROR: + return 'WARNING: ' + + return 'ERROR: ' + def format(self, record): """ - Calls the standard formatter, but will indent all of the log messages - by our current indentation level. + Calls the standard formatter, but will indent all of the log message + lines by our current indentation level. """ formatted = super(IndentingFormatter, self).format(record) + message_start = self.get_message_start(formatted, record.levelno) + formatted = message_start + formatted + prefix = '' if self.add_timestamp: - prefix = self.formatTime(record, "%Y-%m-%dT%H:%M:%S ") + # TODO: Use Formatter.default_time_format after dropping PY2. + t = self.formatTime(record, "%Y-%m-%dT%H:%M:%S") + prefix = '%s,%03d ' % (t, record.msecs) prefix += " " * get_indentation() formatted = "".join([ prefix + line @@ -129,8 +177,8 @@ class ColorizedStreamHandler(logging.StreamHandler): if colorama: COLORS = [ # This needs to be in order from highest logging level to lowest. - (logging.ERROR, _color_wrap(colorama.Fore.RED)), - (logging.WARNING, _color_wrap(colorama.Fore.YELLOW)), + (logging.ERROR, _color_wrap(Fore.RED)), + (logging.WARNING, _color_wrap(Fore.YELLOW)), ] else: COLORS = [] @@ -205,7 +253,7 @@ def _open(self): return logging.handlers.RotatingFileHandler._open(self) -class MaxLevelFilter(logging.Filter): +class MaxLevelFilter(Filter): def __init__(self, level): self.level = level @@ -214,6 +262,18 @@ def filter(self, record): return record.levelno < self.level +class ExcludeLoggerFilter(Filter): + + """ + A logging Filter that excludes records from a logger (or its children). + """ + + def filter(self, record): + # The base Filter class allows only records from a logger (or its + # children). + return not super(ExcludeLoggerFilter, self).filter(record) + + def setup_logging(verbosity, no_color, user_log_file): """Configures and sets up all of the logging @@ -254,18 +314,29 @@ def setup_logging(verbosity, no_color, user_log_file): "stderr": "ext://sys.stderr", } handler_classes = { - "stream": "pip._internal.utils.logging.ColorizedStreamHandler", - "file": "pip._internal.utils.logging.BetterRotatingFileHandler", + "stream": "pipenv.patched.notpip._internal.utils.logging.ColorizedStreamHandler", + "file": "pipenv.patched.notpip._internal.utils.logging.BetterRotatingFileHandler", } + handlers = ["console", "console_errors", "console_subprocess"] + ( + ["user_log"] if include_user_log else [] + ) logging.config.dictConfig({ "version": 1, "disable_existing_loggers": False, "filters": { "exclude_warnings": { - "()": "pip._internal.utils.logging.MaxLevelFilter", + "()": "pipenv.patched.notpip._internal.utils.logging.MaxLevelFilter", "level": logging.WARNING, }, + "restrict_to_subprocess": { + "()": "logging.Filter", + "name": subprocess_logger.name, + }, + "exclude_subprocess": { + "()": "pipenv.patched.notpip._internal.utils.logging.ExcludeLoggerFilter", + "name": subprocess_logger.name, + }, }, "formatters": { "indent": { @@ -284,7 +355,7 @@ def setup_logging(verbosity, no_color, user_log_file): "class": handler_classes["stream"], "no_color": no_color, "stream": log_streams["stdout"], - "filters": ["exclude_warnings"], + "filters": ["exclude_subprocess", "exclude_warnings"], "formatter": "indent", }, "console_errors": { @@ -292,6 +363,17 @@ def setup_logging(verbosity, no_color, user_log_file): "class": handler_classes["stream"], "no_color": no_color, "stream": log_streams["stderr"], + "filters": ["exclude_subprocess"], + "formatter": "indent", + }, + # A handler responsible for logging to the console messages + # from the "subprocessor" logger. + "console_subprocess": { + "level": level, + "class": handler_classes["stream"], + "no_color": no_color, + "stream": log_streams["stderr"], + "filters": ["restrict_to_subprocess"], "formatter": "indent", }, "user_log": { @@ -304,9 +386,7 @@ def setup_logging(verbosity, no_color, user_log_file): }, "root": { "level": root_level, - "handlers": ["console", "console_errors"] + ( - ["user_log"] if include_user_log else [] - ), + "handlers": handlers, }, "loggers": { "pip._vendor": { diff --git a/pipenv/patched/notpip/_internal/utils/marker_files.py b/pipenv/patched/notpip/_internal/utils/marker_files.py new file mode 100644 index 0000000000..734cba4c1d --- /dev/null +++ b/pipenv/patched/notpip/_internal/utils/marker_files.py @@ -0,0 +1,27 @@ +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + +import os.path + +DELETE_MARKER_MESSAGE = '''\ +This file is placed here by pip to indicate the source was put +here by pip. + +Once this package is successfully installed this source code will be +deleted (unless you remove this file). +''' +PIP_DELETE_MARKER_FILENAME = 'pip-delete-this-directory.txt' + + +def has_delete_marker_file(directory): + return os.path.exists(os.path.join(directory, PIP_DELETE_MARKER_FILENAME)) + + +def write_delete_marker_file(directory): + # type: (str) -> None + """ + Write the pip delete marker file into this directory. + """ + filepath = os.path.join(directory, PIP_DELETE_MARKER_FILENAME) + with open(filepath, 'w') as marker_fp: + marker_fp.write(DELETE_MARKER_MESSAGE) diff --git a/pipenv/patched/notpip/_internal/utils/misc.py b/pipenv/patched/notpip/_internal/utils/misc.py index 0a3237a2a7..87af02a4e7 100644 --- a/pipenv/patched/notpip/_internal/utils/misc.py +++ b/pipenv/patched/notpip/_internal/utils/misc.py @@ -1,41 +1,49 @@ +# The following comment should be removed at some point in the future. +# mypy: strict-optional=False +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import import contextlib import errno +import getpass import io -import locale -# we have a submodule named 'logging' which would shadow this if we used the -# regular name: -import logging as std_logging +import logging import os import posixpath -import re import shutil import stat -import subprocess import sys -import tarfile -import zipfile from collections import deque from pipenv.patched.notpip._vendor import pkg_resources # NOTE: retrying is not annotated in typeshed as on 2017-07-17, which is # why we ignore the type on this import. from pipenv.patched.notpip._vendor.retrying import retry # type: ignore -from pipenv.patched.notpip._vendor.six import PY2 +from pipenv.patched.notpip._vendor.six import PY2, text_type from pipenv.patched.notpip._vendor.six.moves import input from pipenv.patched.notpip._vendor.six.moves.urllib import parse as urllib_parse from pipenv.patched.notpip._vendor.six.moves.urllib.parse import unquote as urllib_unquote -from pipenv.patched.notpip._internal.exceptions import CommandError, InstallationError +from pipenv.patched.notpip import __version__ +from pipenv.patched.notpip._internal.exceptions import CommandError from pipenv.patched.notpip._internal.locations import ( - running_under_virtualenv, site_packages, user_site, virtualenv_no_global, - write_delete_marker_file, + get_major_minor_version, + site_packages, + user_site, ) from pipenv.patched.notpip._internal.utils.compat import ( - WINDOWS, console_to_str, expanduser, stdlib_pkgs, + WINDOWS, + expanduser, + stdlib_pkgs, + str_to_display, ) +from pipenv.patched.notpip._internal.utils.marker_files import write_delete_marker_file from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING +from pipenv.patched.notpip._internal.utils.virtualenv import ( + running_under_virtualenv, + virtualenv_no_global, +) if PY2: from io import BytesIO as StringIO @@ -43,51 +51,62 @@ from io import StringIO if MYPY_CHECK_RUNNING: - from typing import ( # noqa: F401 - Optional, Tuple, Iterable, List, Match, Union, Any, Mapping, Text, - AnyStr, Container + from typing import ( + Any, AnyStr, Container, Iterable, List, Optional, Text, + Tuple, Union, cast, ) - from pipenv.patched.notpip._vendor.pkg_resources import Distribution # noqa: F401 - from pipenv.patched.notpip._internal.models.link import Link # noqa: F401 - from pipenv.patched.notpip._internal.utils.ui import SpinnerInterface # noqa: F401 + from pipenv.patched.notpip._vendor.pkg_resources import Distribution + + VersionInfo = Tuple[int, int, int] +else: + # typing's cast() is needed at runtime, but we don't want to import typing. + # Thus, we use a dummy no-op version, which we tell mypy to ignore. + def cast(type_, value): # type: ignore + return value __all__ = ['rmtree', 'display_path', 'backup_dir', 'ask', 'splitext', 'format_size', 'is_installable_dir', - 'is_svn_page', 'file_contents', - 'split_leading_dir', 'has_leading_dir', 'normalize_path', 'renames', 'get_prog', - 'unzip_file', 'untar_file', 'unpack_file', 'call_subprocess', 'captured_stdout', 'ensure_dir', - 'ARCHIVE_EXTENSIONS', 'SUPPORTED_EXTENSIONS', 'WHEEL_EXTENSION', 'get_installed_version', 'remove_auth_from_url'] -logger = std_logging.getLogger(__name__) +logger = logging.getLogger(__name__) -WHEEL_EXTENSION = '.whl' -BZ2_EXTENSIONS = ('.tar.bz2', '.tbz') -XZ_EXTENSIONS = ('.tar.xz', '.txz', '.tlz', '.tar.lz', '.tar.lzma') -ZIP_EXTENSIONS = ('.zip', WHEEL_EXTENSION) -TAR_EXTENSIONS = ('.tar.gz', '.tgz', '.tar') -ARCHIVE_EXTENSIONS = ( - ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS + XZ_EXTENSIONS) -SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS -try: - import bz2 # noqa - SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS -except ImportError: - logger.debug('bz2 module is not available') +def get_pip_version(): + # type: () -> str + pip_pkg_dir = os.path.join(os.path.dirname(__file__), "..", "..") + pip_pkg_dir = os.path.abspath(pip_pkg_dir) -try: - # Only for Python 3.3+ - import lzma # noqa - SUPPORTED_EXTENSIONS += XZ_EXTENSIONS -except ImportError: - logger.debug('lzma module is not available') + return ( + 'pip {} from {} (python {})'.format( + __version__, pip_pkg_dir, get_major_minor_version(), + ) + ) + + +def normalize_version_info(py_version_info): + # type: (Tuple[int, ...]) -> Tuple[int, int, int] + """ + Convert a tuple of ints representing a Python version to one of length + three. + + :param py_version_info: a tuple of ints representing a Python version, + or None to specify no version. The tuple can have any length. + + :return: a tuple of length three if `py_version_info` is non-None. + Otherwise, return `py_version_info` unchanged (i.e. None). + """ + if len(py_version_info) < 3: + py_version_info += (3 - len(py_version_info)) * (0,) + elif len(py_version_info) > 3: + py_version_info = py_version_info[:3] + + return cast('VersionInfo', py_version_info) def ensure_dir(path): @@ -125,8 +144,13 @@ def rmtree_errorhandler(func, path, exc_info): """On Windows, the files in .svn are read-only, so when rmtree() tries to remove them, an exception is thrown. We catch that here, remove the read-only attribute, and hopefully continue without problems.""" - # if file type currently read only - if os.stat(path).st_mode & stat.S_IREAD: + try: + has_attr_readonly = not (os.stat(path).st_mode & stat.S_IWRITE) + except (IOError, OSError): + # it's equivalent to os.path.exists + return + + if has_attr_readonly: # convert to read/write os.chmod(path, stat.S_IWRITE) # use the original function to repeat the operation @@ -136,6 +160,40 @@ def rmtree_errorhandler(func, path, exc_info): raise +def path_to_display(path): + # type: (Optional[Union[str, Text]]) -> Optional[Text] + """ + Convert a bytes (or text) path to text (unicode in Python 2) for display + and logging purposes. + + This function should never error out. Also, this function is mainly needed + for Python 2 since in Python 3 str paths are already text. + """ + if path is None: + return None + if isinstance(path, text_type): + return path + # Otherwise, path is a bytes object (str in Python 2). + try: + display_path = path.decode(sys.getfilesystemencoding(), 'strict') + except UnicodeDecodeError: + # Include the full bytes to make troubleshooting easier, even though + # it may not be very human readable. + if PY2: + # Convert the bytes to a readable str representation using + # repr(), and then convert the str to unicode. + # Also, we add the prefix "b" to the repr() return value both + # to make the Python 2 output look like the Python 3 output, and + # to signal to the user that this is a bytes representation. + display_path = str_to_display('b{!r}'.format(path)) + else: + # Silence the "F821 undefined name 'ascii'" flake8 error since + # in Python 3 ascii() is a built-in. + display_path = ascii(path) # noqa: F821 + + return display_path + + def display_path(path): # type: (Union[str, Text]) -> str """Gives the display value for a given path, making it relative to cwd @@ -169,15 +227,21 @@ def ask_path_exists(message, options): return ask(message, options) +def _check_no_input(message): + # type: (str) -> None + """Raise an error if no input is allowed.""" + if os.environ.get('PIP_NO_INPUT'): + raise Exception( + 'No input was expected ($PIP_NO_INPUT set); question: %s' % + message + ) + + def ask(message, options): # type: (str, Iterable[str]) -> str """Ask the message interactively, with the given possible responses""" while 1: - if os.environ.get('PIP_NO_INPUT'): - raise Exception( - 'No input was expected ($PIP_NO_INPUT set); question: %s' % - message - ) + _check_no_input(message) response = input(message) response = response.strip().lower() if response not in options: @@ -189,6 +253,20 @@ def ask(message, options): return response +def ask_input(message): + # type: (str) -> str + """Ask for input interactively.""" + _check_no_input(message) + return input(message) + + +def ask_password(message): + # type: (str) -> str + """Ask for a password interactively.""" + _check_no_input(message) + return getpass.getpass(message) + + def format_size(bytes): # type: (float) -> str if bytes > 1000 * 1000: @@ -216,21 +294,6 @@ def is_installable_dir(path): return False -def is_svn_page(html): - # type: (Union[str, Text]) -> Optional[Match[Union[str, Text]]] - """ - Returns true if the page appears to be the index page of an svn repository - """ - return (re.search(r'[^<]*Revision \d+:', html) and - re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I)) - - -def file_contents(filename): - # type: (str) -> Text - with open(filename, 'rb') as fp: - return fp.read().decode('utf-8') - - def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE): """Yield pieces of data from a file-like object until EOF.""" while True: @@ -240,34 +303,6 @@ def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE): yield chunk -def split_leading_dir(path): - # type: (Union[str, Text]) -> List[Union[str, Text]] - path = path.lstrip('/').lstrip('\\') - if '/' in path and (('\\' in path and path.find('/') < path.find('\\')) or - '\\' not in path): - return path.split('/', 1) - elif '\\' in path: - return path.split('\\', 1) - else: - return [path, ''] - - -def has_leading_dir(paths): - # type: (Iterable[Union[str, Text]]) -> bool - """Returns true if all the paths have the same leading path name - (i.e., everything is in one subdirectory in an archive)""" - common_prefix = None - for path in paths: - prefix, rest = split_leading_dir(path) - if not prefix: - return False - elif common_prefix is None: - common_prefix = prefix - elif prefix != common_prefix: - return False - return True - - def normalize_path(path, resolve_symlinks=True): # type: (str, bool) -> str """ @@ -317,10 +352,12 @@ def is_local(path): If we're not in a virtualenv, all paths are considered "local." + Caution: this function assumes the head of path has been normalized + with normalize_path. """ if not running_under_virtualenv(): return True - return normalize_path(path).startswith(normalize_path(sys.prefix)) + return path.startswith(normalize_path(sys.prefix)) def dist_is_local(dist): @@ -340,8 +377,7 @@ def dist_in_usersite(dist): """ Return True if given Distribution is installed in user site. """ - norm_path = normalize_path(dist_location(dist)) - return norm_path.startswith(normalize_path(user_site)) + return dist_location(dist).startswith(normalize_path(user_site)) def dist_in_site_packages(dist): @@ -350,9 +386,7 @@ def dist_in_site_packages(dist): Return True if given Distribution is installed in sysconfig.get_python_lib(). """ - return normalize_path( - dist_location(dist) - ).startswith(normalize_path(site_packages)) + return dist_location(dist).startswith(normalize_path(site_packages)) def dist_is_editable(dist): @@ -367,12 +401,15 @@ def dist_is_editable(dist): return False -def get_installed_distributions(local_only=True, - skip=stdlib_pkgs, - include_editables=True, - editables_only=False, - user_only=False): - # type: (bool, Container[str], bool, bool, bool) -> List[Distribution] +def get_installed_distributions( + local_only=True, # type: bool + skip=stdlib_pkgs, # type: Container[str] + include_editables=True, # type: bool + editables_only=False, # type: bool + user_only=False, # type: bool + paths=None # type: Optional[List[str]] +): + # type: (...) -> List[Distribution] """ Return a list of installed Distribution objects. @@ -389,7 +426,14 @@ def get_installed_distributions(local_only=True, If ``user_only`` is True , only report installations in the user site directory. + If ``paths`` is set, only report the distributions present at the + specified list of locations. """ + if paths: + working_set = pkg_resources.WorkingSet(paths) + else: + working_set = pkg_resources.working_set + if local_only: local_test = dist_is_local else: @@ -417,7 +461,7 @@ def user_test(d): return True # because of pkg_resources vendoring, mypy cannot find stub in typeshed - return [d for d in pkg_resources.working_set # type: ignore + return [d for d in working_set # type: ignore if local_test(d) and d.key not in skip and editable_test(d) and @@ -447,12 +491,9 @@ def egg_link_path(dist): """ sites = [] if running_under_virtualenv(): - if virtualenv_no_global(): - sites.append(site_packages) - else: - sites.append(site_packages) - if user_site: - sites.append(user_site) + sites.append(site_packages) + if not virtualenv_no_global() and user_site: + sites.append(user_site) else: if user_site: sites.append(user_site) @@ -473,331 +514,17 @@ def dist_location(dist): packages, where dist.location is the source code location, and we want to know where the egg-link file is. + The returned location is normalized (in particular, with symlinks removed). """ egg_link = egg_link_path(dist) if egg_link: - return egg_link - return dist.location - - -def current_umask(): - """Get the current umask which involves having to set it temporarily.""" - mask = os.umask(0) - os.umask(mask) - return mask + return normalize_path(egg_link) + return normalize_path(dist.location) -def unzip_file(filename, location, flatten=True): - # type: (str, str, bool) -> None - """ - Unzip the file (with path `filename`) to the destination `location`. All - files are written based on system defaults and umask (i.e. permissions are - not preserved), except that regular file members with any execute - permissions (user, group, or world) have "chmod +x" applied after being - written. Note that for windows, any execute changes using os.chmod are - no-ops per the python docs. - """ - ensure_dir(location) - zipfp = open(filename, 'rb') - try: - zip = zipfile.ZipFile(zipfp, allowZip64=True) - leading = has_leading_dir(zip.namelist()) and flatten - for info in zip.infolist(): - name = info.filename - fn = name - if leading: - fn = split_leading_dir(name)[1] - fn = os.path.join(location, fn) - dir = os.path.dirname(fn) - if fn.endswith('/') or fn.endswith('\\'): - # A directory - ensure_dir(fn) - else: - ensure_dir(dir) - # Don't use read() to avoid allocating an arbitrarily large - # chunk of memory for the file's content - fp = zip.open(name) - try: - with open(fn, 'wb') as destfp: - shutil.copyfileobj(fp, destfp) - finally: - fp.close() - mode = info.external_attr >> 16 - # if mode and regular file and any execute permissions for - # user/group/world? - if mode and stat.S_ISREG(mode) and mode & 0o111: - # make dest file have execute for user/group/world - # (chmod +x) no-op on windows per python docs - os.chmod(fn, (0o777 - current_umask() | 0o111)) - finally: - zipfp.close() - - -def untar_file(filename, location): +def write_output(msg, *args): # type: (str, str) -> None - """ - Untar the file (with path `filename`) to the destination `location`. - All files are written based on system defaults and umask (i.e. permissions - are not preserved), except that regular file members with any execute - permissions (user, group, or world) have "chmod +x" applied after being - written. Note that for windows, any execute changes using os.chmod are - no-ops per the python docs. - """ - ensure_dir(location) - if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'): - mode = 'r:gz' - elif filename.lower().endswith(BZ2_EXTENSIONS): - mode = 'r:bz2' - elif filename.lower().endswith(XZ_EXTENSIONS): - mode = 'r:xz' - elif filename.lower().endswith('.tar'): - mode = 'r' - else: - logger.warning( - 'Cannot determine compression type for file %s', filename, - ) - mode = 'r:*' - tar = tarfile.open(filename, mode) - try: - leading = has_leading_dir([ - member.name for member in tar.getmembers() - ]) - for member in tar.getmembers(): - fn = member.name - if leading: - # https://github.com/python/mypy/issues/1174 - fn = split_leading_dir(fn)[1] # type: ignore - path = os.path.join(location, fn) - if member.isdir(): - ensure_dir(path) - elif member.issym(): - try: - # https://github.com/python/typeshed/issues/2673 - tar._extract_member(member, path) # type: ignore - except Exception as exc: - # Some corrupt tar files seem to produce this - # (specifically bad symlinks) - logger.warning( - 'In the tar file %s the member %s is invalid: %s', - filename, member.name, exc, - ) - continue - else: - try: - fp = tar.extractfile(member) - except (KeyError, AttributeError) as exc: - # Some corrupt tar files seem to produce this - # (specifically bad symlinks) - logger.warning( - 'In the tar file %s the member %s is invalid: %s', - filename, member.name, exc, - ) - continue - ensure_dir(os.path.dirname(path)) - with open(path, 'wb') as destfp: - shutil.copyfileobj(fp, destfp) - fp.close() - # Update the timestamp (useful for cython compiled files) - # https://github.com/python/typeshed/issues/2673 - tar.utime(member, path) # type: ignore - # member have any execute permissions for user/group/world? - if member.mode & 0o111: - # make dest file have execute for user/group/world - # no-op on windows per python docs - os.chmod(path, (0o777 - current_umask() | 0o111)) - finally: - tar.close() - - -def unpack_file( - filename, # type: str - location, # type: str - content_type, # type: Optional[str] - link # type: Optional[Link] -): - # type: (...) -> None - filename = os.path.realpath(filename) - if (content_type == 'application/zip' or - filename.lower().endswith(ZIP_EXTENSIONS) or - zipfile.is_zipfile(filename)): - unzip_file( - filename, - location, - flatten=not filename.endswith('.whl') - ) - elif (content_type == 'application/x-gzip' or - tarfile.is_tarfile(filename) or - filename.lower().endswith( - TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS)): - untar_file(filename, location) - elif (content_type and content_type.startswith('text/html') and - is_svn_page(file_contents(filename))): - # We don't really care about this - from pipenv.patched.notpip._internal.vcs.subversion import Subversion - Subversion('svn+' + link.url).unpack(location) - else: - # FIXME: handle? - # FIXME: magic signatures? - logger.critical( - 'Cannot unpack file %s (downloaded from %s, content-type: %s); ' - 'cannot detect archive format', - filename, location, content_type, - ) - raise InstallationError( - 'Cannot determine archive format of %s' % location - ) - - -def call_subprocess( - cmd, # type: List[str] - show_stdout=True, # type: bool - cwd=None, # type: Optional[str] - on_returncode='raise', # type: str - extra_ok_returncodes=None, # type: Optional[Iterable[int]] - command_desc=None, # type: Optional[str] - extra_environ=None, # type: Optional[Mapping[str, Any]] - unset_environ=None, # type: Optional[Iterable[str]] - spinner=None # type: Optional[SpinnerInterface] -): - # type: (...) -> Optional[Text] - """ - Args: - extra_ok_returncodes: an iterable of integer return codes that are - acceptable, in addition to 0. Defaults to None, which means []. - unset_environ: an iterable of environment variable names to unset - prior to calling subprocess.Popen(). - """ - if extra_ok_returncodes is None: - extra_ok_returncodes = [] - if unset_environ is None: - unset_environ = [] - # This function's handling of subprocess output is confusing and I - # previously broke it terribly, so as penance I will write a long comment - # explaining things. - # - # The obvious thing that affects output is the show_stdout= - # kwarg. show_stdout=True means, let the subprocess write directly to our - # stdout. Even though it is nominally the default, it is almost never used - # inside pip (and should not be used in new code without a very good - # reason); as of 2016-02-22 it is only used in a few places inside the VCS - # wrapper code. Ideally we should get rid of it entirely, because it - # creates a lot of complexity here for a rarely used feature. - # - # Most places in pip set show_stdout=False. What this means is: - # - We connect the child stdout to a pipe, which we read. - # - By default, we hide the output but show a spinner -- unless the - # subprocess exits with an error, in which case we show the output. - # - If the --verbose option was passed (= loglevel is DEBUG), then we show - # the output unconditionally. (But in this case we don't want to show - # the output a second time if it turns out that there was an error.) - # - # stderr is always merged with stdout (even if show_stdout=True). - if show_stdout: - stdout = None - else: - stdout = subprocess.PIPE - if command_desc is None: - cmd_parts = [] - for part in cmd: - if ' ' in part or '\n' in part or '"' in part or "'" in part: - part = '"%s"' % part.replace('"', '\\"') - cmd_parts.append(part) - command_desc = ' '.join(cmd_parts) - logger.debug("Running command %s", command_desc) - env = os.environ.copy() - if extra_environ: - env.update(extra_environ) - for name in unset_environ: - env.pop(name, None) - try: - proc = subprocess.Popen( - cmd, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, - stdout=stdout, cwd=cwd, env=env, - ) - proc.stdin.close() - except Exception as exc: - logger.critical( - "Error %s while executing command %s", exc, command_desc, - ) - raise - all_output = [] - if stdout is not None: - while True: - line = console_to_str(proc.stdout.readline()) - if not line: - break - line = line.rstrip() - all_output.append(line + '\n') - if logger.getEffectiveLevel() <= std_logging.DEBUG: - # Show the line immediately - logger.debug(line) - else: - # Update the spinner - if spinner is not None: - spinner.spin() - try: - proc.wait() - finally: - if proc.stdout: - proc.stdout.close() - if spinner is not None: - if proc.returncode: - spinner.finish("error") - else: - spinner.finish("done") - if proc.returncode and proc.returncode not in extra_ok_returncodes: - if on_returncode == 'raise': - if (logger.getEffectiveLevel() > std_logging.DEBUG and - not show_stdout): - logger.info( - 'Complete output from command %s:', command_desc, - ) - logger.info( - ''.join(all_output) + - '\n----------------------------------------' - ) - raise InstallationError( - 'Command "%s" failed with error code %s in %s' - % (command_desc, proc.returncode, cwd)) - elif on_returncode == 'warn': - logger.warning( - 'Command "%s" had error code %s in %s', - command_desc, proc.returncode, cwd, - ) - elif on_returncode == 'ignore': - pass - else: - raise ValueError('Invalid value: on_returncode=%s' % - repr(on_returncode)) - if not show_stdout: - return ''.join(all_output) - return None - - -def read_text_file(filename): - # type: (str) -> str - """Return the contents of *filename*. - - Try to decode the file contents with utf-8, the preferred system encoding - (e.g., cp1252 on some Windows machines), and latin1, in that order. - Decoding a byte string with latin1 will never raise an error. In the worst - case, the returned string will contain some garbage characters. - - """ - with open(filename, 'rb') as fp: - data = fp.read() - - encodings = ['utf-8', locale.getpreferredencoding(False), 'latin1'] - for enc in encodings: - try: - # https://github.com/python/mypy/issues/1174 - data = data.decode(enc) # type: ignore - except UnicodeDecodeError: - continue - break - - assert not isinstance(data, bytes) # Latin1 should have worked. - return data + logger.info(msg, *args) def _make_build_dir(build_dir): @@ -922,20 +649,38 @@ def enum(*sequential, **named): return type('Enum', (), enums) -def make_vcs_requirement_url(repo_url, rev, project_name, subdir=None): +def build_netloc(host, port): + # type: (str, Optional[int]) -> str """ - Return the URL for a VCS requirement. + Build a netloc from a host-port pair + """ + if port is None: + return host + if ':' in host: + # Only wrap host with square brackets when it is IPv6 + host = '[{}]'.format(host) + return '{}:{}'.format(host, port) + - Args: - repo_url: the remote VCS url, with any needed VCS prefix (e.g. "git+"). - project_name: the (unescaped) project name. +def build_url_from_netloc(netloc, scheme='https'): + # type: (str, str) -> str + """ + Build a full URL from a netloc. """ - egg_project_name = pkg_resources.to_filename(project_name) - req = '{}@{}#egg={}'.format(repo_url, rev, egg_project_name) - if subdir: - req += '&subdirectory={}'.format(subdir) + if netloc.count(':') >= 2 and '@' not in netloc and '[' not in netloc: + # It must be a bare IPv6 address, so wrap it with brackets. + netloc = '[{}]'.format(netloc) + return '{}://{}'.format(scheme, netloc) - return req + +def parse_netloc(netloc): + # type: (str) -> Tuple[str, Optional[int]] + """ + Return the host-port pair from a netloc. + """ + url = build_url_from_netloc(netloc) + parsed = urllib_parse.urlparse(url) + return parsed.hostname, parsed.port def split_auth_from_netloc(netloc): @@ -969,59 +714,137 @@ def split_auth_from_netloc(netloc): def redact_netloc(netloc): # type: (str) -> str """ - Replace the password in a netloc with "****", if it exists. + Replace the sensitive data in a netloc with "****", if it exists. - For example, "user:pass@example.com" returns "user:****@example.com". + For example: + - "user:pass@example.com" returns "user:****@example.com" + - "accesstoken@example.com" returns "****@example.com" """ netloc, (user, password) = split_auth_from_netloc(netloc) if user is None: return netloc - password = '' if password is None else ':****' - return '{user}{password}@{netloc}'.format(user=urllib_parse.quote(user), + if password is None: + user = '****' + password = '' + else: + user = urllib_parse.quote(user) + password = ':****' + return '{user}{password}@{netloc}'.format(user=user, password=password, netloc=netloc) def _transform_url(url, transform_netloc): + """Transform and replace netloc in a url. + + transform_netloc is a function taking the netloc and returning a + tuple. The first element of this tuple is the new netloc. The + entire tuple is returned. + + Returns a tuple containing the transformed url as item 0 and the + original tuple returned by transform_netloc as item 1. + """ purl = urllib_parse.urlsplit(url) - netloc = transform_netloc(purl.netloc) + netloc_tuple = transform_netloc(purl.netloc) # stripped url url_pieces = ( - purl.scheme, netloc, purl.path, purl.query, purl.fragment + purl.scheme, netloc_tuple[0], purl.path, purl.query, purl.fragment ) surl = urllib_parse.urlunsplit(url_pieces) - return surl + return surl, netloc_tuple def _get_netloc(netloc): - return split_auth_from_netloc(netloc)[0] + return split_auth_from_netloc(netloc) + + +def _redact_netloc(netloc): + return (redact_netloc(netloc),) + + +def split_auth_netloc_from_url(url): + # type: (str) -> Tuple[str, str, Tuple[str, str]] + """ + Parse a url into separate netloc, auth, and url with no auth. + + Returns: (url_without_auth, netloc, (username, password)) + """ + url_without_auth, (netloc, auth) = _transform_url(url, _get_netloc) + return url_without_auth, netloc, auth def remove_auth_from_url(url): # type: (str) -> str - # Return a copy of url with 'username:password@' removed. + """Return a copy of url with 'username:password@' removed.""" # username/pass params are passed to subversion through flags # and are not recognized in the url. - return _transform_url(url, _get_netloc) + return _transform_url(url, _get_netloc)[0] -def redact_password_from_url(url): +def redact_auth_from_url(url): # type: (str) -> str """Replace the password in a given url with ****.""" - return _transform_url(url, redact_netloc) + return _transform_url(url, _redact_netloc)[0] + + +class HiddenText(object): + def __init__( + self, + secret, # type: str + redacted, # type: str + ): + # type: (...) -> None + self.secret = secret + self.redacted = redacted + + def __repr__(self): + # type: (...) -> str + return '<HiddenText {!r}>'.format(str(self)) + + def __str__(self): + # type: (...) -> str + return self.redacted + + # This is useful for testing. + def __eq__(self, other): + # type: (Any) -> bool + if type(self) != type(other): + return False + + # The string being used for redaction doesn't also have to match, + # just the raw, original string. + return (self.secret == other.secret) + + # We need to provide an explicit __ne__ implementation for Python 2. + # TODO: remove this when we drop PY2 support. + def __ne__(self, other): + # type: (Any) -> bool + return not self == other + + +def hide_value(value): + # type: (str) -> HiddenText + return HiddenText(value, redacted='****') + + +def hide_url(url): + # type: (str) -> HiddenText + redacted = redact_auth_from_url(url) + return HiddenText(url, redacted=redacted) def protect_pip_from_modification_on_windows(modifying_pip): + # type: (bool) -> None """Protection of pip.exe from modification on Windows On Windows, any operation modifying pip should be run as: python -m pip ... """ - pip_names = [ - "pip.exe", - "pip{}.exe".format(sys.version_info[0]), - "pip{}.{}.exe".format(*sys.version_info[:2]) - ] + pip_names = set() + for ext in ('', '.exe'): + pip_names.add('pip{ext}'.format(ext=ext)) + pip_names.add('pip{}{ext}'.format(sys.version_info[0], ext=ext)) + pip_names.add('pip{}.{}{ext}'.format(*sys.version_info[:2], ext=ext)) # See https://github.com/pypa/pip/issues/1299 for more discussion should_show_use_python_msg = ( @@ -1038,3 +861,10 @@ def protect_pip_from_modification_on_windows(modifying_pip): 'To modify pip, please run the following command:\n{}' .format(" ".join(new_command)) ) + + +def is_console_interactive(): + # type: () -> bool + """Is this console interactive? + """ + return sys.stdin is not None and sys.stdin.isatty() diff --git a/pipenv/patched/notpip/_internal/utils/models.py b/pipenv/patched/notpip/_internal/utils/models.py index d5cb80a7cb..29e1441153 100644 --- a/pipenv/patched/notpip/_internal/utils/models.py +++ b/pipenv/patched/notpip/_internal/utils/models.py @@ -1,11 +1,13 @@ """Utilities for defining models """ +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False import operator class KeyBasedCompareMixin(object): - """Provides comparision capabilities that is based on a key + """Provides comparison capabilities that is based on a key """ def __init__(self, key, defining_class): diff --git a/pipenv/patched/notpip/_internal/utils/outdated.py b/pipenv/patched/notpip/_internal/utils/outdated.py deleted file mode 100644 index 83dc58cc8d..0000000000 --- a/pipenv/patched/notpip/_internal/utils/outdated.py +++ /dev/null @@ -1,164 +0,0 @@ -from __future__ import absolute_import - -import datetime -import json -import logging -import os.path -import sys - -from pipenv.patched.notpip._vendor import lockfile, pkg_resources -from pipenv.patched.notpip._vendor.packaging import version as packaging_version - -from pipenv.patched.notpip._internal.index import PackageFinder -from pipenv.patched.notpip._internal.utils.compat import WINDOWS -from pipenv.patched.notpip._internal.utils.filesystem import check_path_owner -from pipenv.patched.notpip._internal.utils.misc import ensure_dir, get_installed_version -from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING - -if MYPY_CHECK_RUNNING: - import optparse # noqa: F401 - from typing import Any, Dict # noqa: F401 - from pipenv.patched.notpip._internal.download import PipSession # noqa: F401 - - -SELFCHECK_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ" - - -logger = logging.getLogger(__name__) - - -class SelfCheckState(object): - def __init__(self, cache_dir): - # type: (str) -> None - self.state = {} # type: Dict[str, Any] - self.statefile_path = None - - # Try to load the existing state - if cache_dir: - self.statefile_path = os.path.join(cache_dir, "selfcheck.json") - try: - with open(self.statefile_path) as statefile: - self.state = json.load(statefile)[sys.prefix] - except (IOError, ValueError, KeyError): - # Explicitly suppressing exceptions, since we don't want to - # error out if the cache file is invalid. - pass - - def save(self, pypi_version, current_time): - # type: (str, datetime.datetime) -> None - # If we do not have a path to cache in, don't bother saving. - if not self.statefile_path: - return - - # Check to make sure that we own the directory - if not check_path_owner(os.path.dirname(self.statefile_path)): - return - - # Now that we've ensured the directory is owned by this user, we'll go - # ahead and make sure that all our directories are created. - ensure_dir(os.path.dirname(self.statefile_path)) - - # Attempt to write out our version check file - with lockfile.LockFile(self.statefile_path): - if os.path.exists(self.statefile_path): - with open(self.statefile_path) as statefile: - state = json.load(statefile) - else: - state = {} - - state[sys.prefix] = { - "last_check": current_time.strftime(SELFCHECK_DATE_FMT), - "pypi_version": pypi_version, - } - - with open(self.statefile_path, "w") as statefile: - json.dump(state, statefile, sort_keys=True, - separators=(",", ":")) - - -def was_installed_by_pip(pkg): - # type: (str) -> bool - """Checks whether pkg was installed by pip - - This is used not to display the upgrade message when pip is in fact - installed by system package manager, such as dnf on Fedora. - """ - try: - dist = pkg_resources.get_distribution(pkg) - return (dist.has_metadata('INSTALLER') and - 'pip' in dist.get_metadata_lines('INSTALLER')) - except pkg_resources.DistributionNotFound: - return False - - -def pip_version_check(session, options): - # type: (PipSession, optparse.Values) -> None - """Check for an update for pip. - - Limit the frequency of checks to once per week. State is stored either in - the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix - of the pip script path. - """ - installed_version = get_installed_version("pip") - if not installed_version: - return - - pip_version = packaging_version.parse(installed_version) - pypi_version = None - - try: - state = SelfCheckState(cache_dir=options.cache_dir) - - current_time = datetime.datetime.utcnow() - # Determine if we need to refresh the state - if "last_check" in state.state and "pypi_version" in state.state: - last_check = datetime.datetime.strptime( - state.state["last_check"], - SELFCHECK_DATE_FMT - ) - if (current_time - last_check).total_seconds() < 7 * 24 * 60 * 60: - pypi_version = state.state["pypi_version"] - - # Refresh the version if we need to or just see if we need to warn - if pypi_version is None: - # Lets use PackageFinder to see what the latest pip version is - finder = PackageFinder( - find_links=options.find_links, - index_urls=[options.index_url] + options.extra_index_urls, - allow_all_prereleases=False, # Explicitly set to False - trusted_hosts=options.trusted_hosts, - session=session, - ) - all_candidates = finder.find_all_candidates("pip") - if not all_candidates: - return - pypi_version = str( - max(all_candidates, key=lambda c: c.version).version - ) - - # save that we've performed a check - state.save(pypi_version, current_time) - - remote_version = packaging_version.parse(pypi_version) - - # Determine if our pypi_version is older - if (pip_version < remote_version and - pip_version.base_version != remote_version.base_version and - was_installed_by_pip('pip')): - # Advise "python -m pip" on Windows to avoid issues - # with overwriting pip.exe. - if WINDOWS: - pip_cmd = "python -m pip" - else: - pip_cmd = "pip" - logger.warning( - "You are using pip version %s, however version %s is " - "available.\nYou should consider upgrading via the " - "'%s install --upgrade pip' command.", - pip_version, pypi_version, pip_cmd - ) - except Exception: - logger.debug( - "There was an error checking the latest version of pip", - exc_info=True, - ) diff --git a/pipenv/patched/notpip/_internal/utils/packaging.py b/pipenv/patched/notpip/_internal/utils/packaging.py index dc944529a5..dd32f70d61 100644 --- a/pipenv/patched/notpip/_internal/utils/packaging.py +++ b/pipenv/patched/notpip/_internal/utils/packaging.py @@ -7,75 +7,83 @@ from pipenv.patched.notpip._vendor import pkg_resources from pipenv.patched.notpip._vendor.packaging import specifiers, version -from pipenv.patched.notpip._internal import exceptions +from pipenv.patched.notpip._internal.exceptions import NoneMetadataError from pipenv.patched.notpip._internal.utils.misc import display_path from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: - from typing import Optional # noqa: F401 - from email.message import Message # noqa: F401 - from pipenv.patched.notpip._vendor.pkg_resources import Distribution # noqa: F401 + from typing import Optional, Tuple + from email.message import Message + from pipenv.patched.notpip._vendor.pkg_resources import Distribution logger = logging.getLogger(__name__) -def check_requires_python(requires_python): - # type: (Optional[str]) -> bool +def check_requires_python(requires_python, version_info): + # type: (Optional[str], Tuple[int, ...]) -> bool """ - Check if the python version in use match the `requires_python` specifier. + Check if the given Python version matches a "Requires-Python" specifier. - Returns `True` if the version of python in use matches the requirement. - Returns `False` if the version of python in use does not matches the - requirement. + :param version_info: A 3-tuple of ints representing a Python + major-minor-micro version to check (e.g. `sys.version_info[:3]`). - Raises an InvalidSpecifier if `requires_python` have an invalid format. + :return: `True` if the given Python version satisfies the requirement. + Otherwise, return `False`. + + :raises InvalidSpecifier: If `requires_python` has an invalid format. """ if requires_python is None: # The package provides no information return True requires_python_specifier = specifiers.SpecifierSet(requires_python) - # We only use major.minor.micro python_version = version.parse('{0}.{1}.{2}'.format(*sys.version_info[:3])) return python_version in requires_python_specifier def get_metadata(dist): # type: (Distribution) -> Message + """ + :raises NoneMetadataError: if the distribution reports `has_metadata()` + True but `get_metadata()` returns None. + """ + metadata_name = 'METADATA' if (isinstance(dist, pkg_resources.DistInfoDistribution) and - dist.has_metadata('METADATA')): - metadata = dist.get_metadata('METADATA') + dist.has_metadata(metadata_name)): + metadata = dist.get_metadata(metadata_name) elif dist.has_metadata('PKG-INFO'): - metadata = dist.get_metadata('PKG-INFO') + metadata_name = 'PKG-INFO' + metadata = dist.get_metadata(metadata_name) else: logger.warning("No metadata found in %s", display_path(dist.location)) metadata = '' + if metadata is None: + raise NoneMetadataError(dist, metadata_name) + feed_parser = FeedParser() + # The following line errors out if with a "NoneType" TypeError if + # passed metadata=None. feed_parser.feed(metadata) return feed_parser.close() -def check_dist_requires_python(dist, absorb=False): +def get_requires_python(dist): + # type: (pkg_resources.Distribution) -> Optional[str] + """ + Return the "Requires-Python" metadata for a distribution, or None + if not present. + """ pkg_info_dict = get_metadata(dist) requires_python = pkg_info_dict.get('Requires-Python') - if absorb: - return requires_python - try: - if not check_requires_python(requires_python): - raise exceptions.UnsupportedPythonVersion( - "%s requires Python '%s' but the running Python is %s" % ( - dist.project_name, - requires_python, - '.'.join(map(str, sys.version_info[:3])),) - ) - except specifiers.InvalidSpecifier as e: - logger.warning( - "Package %s has an invalid Requires-Python entry %s - %s", - dist.project_name, requires_python, e, - ) - return + + if requires_python is not None: + # Convert to a str to satisfy the type checker, since requires_python + # can be a Header object. + requires_python = str(requires_python) + + return requires_python def get_installer(dist): diff --git a/pipenv/patched/notpip/_internal/utils/setuptools_build.py b/pipenv/patched/notpip/_internal/utils/setuptools_build.py index 03973e976c..0151ee2f85 100644 --- a/pipenv/patched/notpip/_internal/utils/setuptools_build.py +++ b/pipenv/patched/notpip/_internal/utils/setuptools_build.py @@ -1,8 +1,49 @@ +import os +import sys + +from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import List, Sequence + # Shim to wrap setup.py invocation with setuptools -SETUPTOOLS_SHIM = ( - "import setuptools, tokenize;__file__=%r;" +# +# We set sys.argv[0] to the path to the underlying setup.py file so +# setuptools / distutils don't take the path to the setup.py to be "-c" when +# invoking via the shim. This avoids e.g. the following manifest_maker +# warning: "warning: manifest_maker: standard file '-c' not found". +_SETUPTOOLS_SHIM = ( + "import sys, setuptools, tokenize; sys.argv[0] = {0!r}; __file__={0!r};" "f=getattr(tokenize, 'open', open)(__file__);" "code=f.read().replace('\\r\\n', '\\n');" "f.close();" "exec(compile(code, __file__, 'exec'))" ) + + +def make_setuptools_shim_args( + setup_py_path, # type: str + global_options=None, # type: Sequence[str] + no_user_config=False, # type: bool + unbuffered_output=False # type: bool +): + # type: (...) -> List[str] + """ + Get setuptools command arguments with shim wrapped setup file invocation. + + :param setup_py_path: The path to setup.py to be wrapped. + :param global_options: Additional global options. + :param no_user_config: If True, disables personal user configuration. + :param unbuffered_output: If True, adds the unbuffered switch to the + argument list. + """ + sys_executable = os.environ.get('PIP_PYTHON_PATH', sys.executable) + args = [sys_executable] + if unbuffered_output: + args.append('-u') + args.extend(['-c', _SETUPTOOLS_SHIM.format(setup_py_path)]) + if global_options: + args.extend(global_options) + if no_user_config: + args.append('--no-user-cfg') + return args diff --git a/pipenv/patched/notpip/_internal/utils/subprocess.py b/pipenv/patched/notpip/_internal/utils/subprocess.py new file mode 100644 index 0000000000..1cf2faa9e4 --- /dev/null +++ b/pipenv/patched/notpip/_internal/utils/subprocess.py @@ -0,0 +1,278 @@ +# The following comment should be removed at some point in the future. +# mypy: strict-optional=False + +from __future__ import absolute_import + +import logging +import os +import subprocess + +from pipenv.patched.notpip._vendor.six.moves import shlex_quote + +from pipenv.patched.notpip._internal.exceptions import InstallationError +from pipenv.patched.notpip._internal.utils.compat import console_to_str, str_to_display +from pipenv.patched.notpip._internal.utils.logging import subprocess_logger +from pipenv.patched.notpip._internal.utils.misc import HiddenText, path_to_display +from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING +from pipenv.patched.notpip._internal.utils.ui import open_spinner + +if MYPY_CHECK_RUNNING: + from typing import ( + Any, Callable, Iterable, List, Mapping, Optional, Text, Union, + ) + from pipenv.patched.notpip._internal.utils.ui import SpinnerInterface + + CommandArgs = List[Union[str, HiddenText]] + + +LOG_DIVIDER = '----------------------------------------' + + +def make_command(*args): + # type: (Union[str, HiddenText, CommandArgs]) -> CommandArgs + """ + Create a CommandArgs object. + """ + command_args = [] # type: CommandArgs + for arg in args: + # Check for list instead of CommandArgs since CommandArgs is + # only known during type-checking. + if isinstance(arg, list): + command_args.extend(arg) + else: + # Otherwise, arg is str or HiddenText. + command_args.append(arg) + + return command_args + + +def format_command_args(args): + # type: (Union[List[str], CommandArgs]) -> str + """ + Format command arguments for display. + """ + # For HiddenText arguments, display the redacted form by calling str(). + # Also, we don't apply str() to arguments that aren't HiddenText since + # this can trigger a UnicodeDecodeError in Python 2 if the argument + # has type unicode and includes a non-ascii character. (The type + # checker doesn't ensure the annotations are correct in all cases.) + return ' '.join( + shlex_quote(str(arg)) if isinstance(arg, HiddenText) + else shlex_quote(arg) for arg in args + ) + + +def reveal_command_args(args): + # type: (Union[List[str], CommandArgs]) -> List[str] + """ + Return the arguments in their raw, unredacted form. + """ + return [ + arg.secret if isinstance(arg, HiddenText) else arg for arg in args + ] + + +def make_subprocess_output_error( + cmd_args, # type: Union[List[str], CommandArgs] + cwd, # type: Optional[str] + lines, # type: List[Text] + exit_status, # type: int +): + # type: (...) -> Text + """ + Create and return the error message to use to log a subprocess error + with command output. + + :param lines: A list of lines, each ending with a newline. + """ + command = format_command_args(cmd_args) + # Convert `command` and `cwd` to text (unicode in Python 2) so we can use + # them as arguments in the unicode format string below. This avoids + # "UnicodeDecodeError: 'ascii' codec can't decode byte ..." in Python 2 + # if either contains a non-ascii character. + command_display = str_to_display(command, desc='command bytes') + cwd_display = path_to_display(cwd) + + # We know the joined output value ends in a newline. + output = ''.join(lines) + msg = ( + # Use a unicode string to avoid "UnicodeEncodeError: 'ascii' + # codec can't encode character ..." in Python 2 when a format + # argument (e.g. `output`) has a non-ascii character. + u'Command errored out with exit status {exit_status}:\n' + ' command: {command_display}\n' + ' cwd: {cwd_display}\n' + 'Complete output ({line_count} lines):\n{output}{divider}' + ).format( + exit_status=exit_status, + command_display=command_display, + cwd_display=cwd_display, + line_count=len(lines), + output=output, + divider=LOG_DIVIDER, + ) + return msg + + +def call_subprocess( + cmd, # type: Union[List[str], CommandArgs] + show_stdout=False, # type: bool + cwd=None, # type: Optional[str] + on_returncode='raise', # type: str + extra_ok_returncodes=None, # type: Optional[Iterable[int]] + command_desc=None, # type: Optional[str] + extra_environ=None, # type: Optional[Mapping[str, Any]] + unset_environ=None, # type: Optional[Iterable[str]] + spinner=None, # type: Optional[SpinnerInterface] + log_failed_cmd=True # type: Optional[bool] +): + # type: (...) -> Text + """ + Args: + show_stdout: if true, use INFO to log the subprocess's stderr and + stdout streams. Otherwise, use DEBUG. Defaults to False. + extra_ok_returncodes: an iterable of integer return codes that are + acceptable, in addition to 0. Defaults to None, which means []. + unset_environ: an iterable of environment variable names to unset + prior to calling subprocess.Popen(). + log_failed_cmd: if false, failed commands are not logged, only raised. + """ + if extra_ok_returncodes is None: + extra_ok_returncodes = [] + if unset_environ is None: + unset_environ = [] + # Most places in pip use show_stdout=False. What this means is-- + # + # - We connect the child's output (combined stderr and stdout) to a + # single pipe, which we read. + # - We log this output to stderr at DEBUG level as it is received. + # - If DEBUG logging isn't enabled (e.g. if --verbose logging wasn't + # requested), then we show a spinner so the user can still see the + # subprocess is in progress. + # - If the subprocess exits with an error, we log the output to stderr + # at ERROR level if it hasn't already been displayed to the console + # (e.g. if --verbose logging wasn't enabled). This way we don't log + # the output to the console twice. + # + # If show_stdout=True, then the above is still done, but with DEBUG + # replaced by INFO. + if show_stdout: + # Then log the subprocess output at INFO level. + log_subprocess = subprocess_logger.info + used_level = logging.INFO + else: + # Then log the subprocess output using DEBUG. This also ensures + # it will be logged to the log file (aka user_log), if enabled. + log_subprocess = subprocess_logger.debug + used_level = logging.DEBUG + + # Whether the subprocess will be visible in the console. + showing_subprocess = subprocess_logger.getEffectiveLevel() <= used_level + + # Only use the spinner if we're not showing the subprocess output + # and we have a spinner. + use_spinner = not showing_subprocess and spinner is not None + + if command_desc is None: + command_desc = format_command_args(cmd) + + log_subprocess("Running command %s", command_desc) + env = os.environ.copy() + if extra_environ: + env.update(extra_environ) + for name in unset_environ: + env.pop(name, None) + try: + proc = subprocess.Popen( + # Convert HiddenText objects to the underlying str. + reveal_command_args(cmd), + stderr=subprocess.STDOUT, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, cwd=cwd, env=env, + ) + proc.stdin.close() + except Exception as exc: + if log_failed_cmd: + subprocess_logger.critical( + "Error %s while executing command %s", exc, command_desc, + ) + raise + all_output = [] + while True: + # The "line" value is a unicode string in Python 2. + line = console_to_str(proc.stdout.readline()) + if not line: + break + line = line.rstrip() + all_output.append(line + '\n') + + # Show the line immediately. + log_subprocess(line) + # Update the spinner. + if use_spinner: + spinner.spin() + try: + proc.wait() + finally: + if proc.stdout: + proc.stdout.close() + proc_had_error = ( + proc.returncode and proc.returncode not in extra_ok_returncodes + ) + if use_spinner: + if proc_had_error: + spinner.finish("error") + else: + spinner.finish("done") + if proc_had_error: + if on_returncode == 'raise': + if not showing_subprocess and log_failed_cmd: + # Then the subprocess streams haven't been logged to the + # console yet. + msg = make_subprocess_output_error( + cmd_args=cmd, + cwd=cwd, + lines=all_output, + exit_status=proc.returncode, + ) + subprocess_logger.error(msg) + exc_msg = ( + 'Command errored out with exit status {}: {} ' + 'Check the logs for full command output.' + ).format(proc.returncode, command_desc) + raise InstallationError(exc_msg) + elif on_returncode == 'warn': + subprocess_logger.warning( + 'Command "%s" had error code %s in %s', + command_desc, proc.returncode, cwd, + ) + elif on_returncode == 'ignore': + pass + else: + raise ValueError('Invalid value: on_returncode=%s' % + repr(on_returncode)) + return ''.join(all_output) + + +def runner_with_spinner_message(message): + # type: (str) -> Callable + """Provide a subprocess_runner that shows a spinner message. + + Intended for use with for pep517's Pep517HookCaller. Thus, the runner has + an API that matches what's expected by Pep517HookCaller.subprocess_runner. + """ + + def runner( + cmd, # type: List[str] + cwd=None, # type: Optional[str] + extra_environ=None # type: Optional[Mapping[str, Any]] + ): + # type: (...) -> None + with open_spinner(message) as spinner: + call_subprocess( + cmd, + cwd=cwd, + extra_environ=extra_environ, + spinner=spinner, + ) + + return runner diff --git a/pipenv/patched/notpip/_internal/utils/temp_dir.py b/pipenv/patched/notpip/_internal/utils/temp_dir.py index d8121a5932..84bba3ac5c 100644 --- a/pipenv/patched/notpip/_internal/utils/temp_dir.py +++ b/pipenv/patched/notpip/_internal/utils/temp_dir.py @@ -1,3 +1,6 @@ +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import import errno @@ -8,8 +11,13 @@ import warnings from pipenv.patched.notpip._internal.utils.misc import rmtree +from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING from pipenv.vendor.vistir.compat import finalize, ResourceWarning +if MYPY_CHECK_RUNNING: + from typing import Optional + + logger = logging.getLogger(__name__) @@ -21,24 +29,25 @@ class TempDirectory(object): Attributes: path - Location to the created temporary directory or None + Location to the created temporary directory delete Whether the directory should be deleted when exiting (when used as a contextmanager) Methods: - create() - Creates a temporary directory and stores its path in the path - attribute. cleanup() - Deletes the temporary directory and sets path attribute to None + Deletes the temporary directory - When used as a context manager, a temporary directory is created on - entering the context and, if the delete attribute is True, on exiting the - context the created directory is deleted. + When used as a context manager, if the delete attribute is True, on + exiting the context the temporary directory is deleted. """ - def __init__(self, path=None, delete=None, kind="temp"): + def __init__( + self, + path=None, # type: Optional[str] + delete=None, # type: Optional[bool] + kind="temp" + ): super(TempDirectory, self).__init__() if path is None and delete is None: @@ -46,55 +55,63 @@ def __init__(self, path=None, delete=None, kind="temp"): # an explicit delete option, then we'll default to deleting. delete = True - self.path = path + if path is None: + path = self._create(kind) + + self._path = path + self._deleted = False self.delete = delete self.kind = kind self._finalizer = None - if path: + if self._path: self._register_finalizer() def _register_finalizer(self): - if self.delete and self.path: + if self.delete and self._path: self._finalizer = finalize( self, self._cleanup, - self.path, + self._path, warn_message = None ) else: self._finalizer = None + @property + def path(self): + # type: () -> str + assert not self._deleted, ( + "Attempted to access deleted path: {}".format(self._path) + ) + return self._path + def __repr__(self): return "<{} {!r}>".format(self.__class__.__name__, self.path) def __enter__(self): - self.create() return self def __exit__(self, exc, value, tb): if self.delete: self.cleanup() - def create(self): + def _create(self, kind): """Create a temporary directory and store its path in self.path """ - if self.path is not None: - logger.debug( - "Skipped creation of temporary directory: {}".format(self.path) - ) - return # We realpath here because some systems have their default tmpdir # symlinked to another directory. This tends to confuse build # scripts, so we canonicalize the path by traversing potential # symlinks here. - self.path = os.path.realpath( - tempfile.mkdtemp(prefix="pip-{}-".format(self.kind)) + path = os.path.realpath( + tempfile.mkdtemp(prefix="pip-{}-".format(kind)) ) - self._register_finalizer() - logger.debug("Created temporary directory: {}".format(self.path)) + logger.debug("Created temporary directory: {}".format(path)) + return path @classmethod def _cleanup(cls, name, warn_message=None): + if not os.path.exists(name): + return try: rmtree(name) except OSError: @@ -107,13 +124,12 @@ def cleanup(self): """Remove the temporary directory created and reset state """ if getattr(self._finalizer, "detach", None) and self._finalizer.detach(): - if os.path.exists(self.path): + if os.path.exists(self._path): + self._deleted = True try: - rmtree(self.path) + rmtree(self._path) except OSError: pass - else: - self.path = None class AdjacentTempDirectory(TempDirectory): @@ -138,8 +154,8 @@ class AdjacentTempDirectory(TempDirectory): LEADING_CHARS = "-~.=%0123456789" def __init__(self, original, delete=None): - super(AdjacentTempDirectory, self).__init__(delete=delete) self.original = original.rstrip('/\\') + super(AdjacentTempDirectory, self).__init__(delete=delete) @classmethod def _generate_names(cls, name): @@ -165,7 +181,7 @@ def _generate_names(cls, name): if new_name != name: yield new_name - def create(self): + def _create(self, kind): root, name = os.path.split(self.original) for candidate in self._generate_names(name): path = os.path.join(root, candidate) @@ -176,13 +192,13 @@ def create(self): if ex.errno != errno.EEXIST: raise else: - self.path = os.path.realpath(path) + path = os.path.realpath(path) break - - if not self.path: + else: # Final fallback on the default behavior. - self.path = os.path.realpath( - tempfile.mkdtemp(prefix="pip-{}-".format(self.kind)) + path = os.path.realpath( + tempfile.mkdtemp(prefix="pip-{}-".format(kind)) ) - self._register_finalizer() - logger.debug("Created temporary directory: {}".format(self.path)) + + logger.debug("Created temporary directory: {}".format(path)) + return path \ No newline at end of file diff --git a/pipenv/patched/notpip/_internal/utils/typing.py b/pipenv/patched/notpip/_internal/utils/typing.py index 56f2fa87e2..ec11da8aee 100644 --- a/pipenv/patched/notpip/_internal/utils/typing.py +++ b/pipenv/patched/notpip/_internal/utils/typing.py @@ -21,7 +21,7 @@ from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: - from typing import ... # noqa: F401 + from typing import ... Ref: https://github.com/python/mypy/issues/3216 """ diff --git a/pipenv/patched/notpip/_internal/utils/ui.py b/pipenv/patched/notpip/_internal/utils/ui.py index 18119e0e1c..78a960cfad 100644 --- a/pipenv/patched/notpip/_internal/utils/ui.py +++ b/pipenv/patched/notpip/_internal/utils/ui.py @@ -1,3 +1,7 @@ +# The following comment should be removed at some point in the future. +# mypy: strict-optional=False +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import, division import contextlib @@ -8,11 +12,8 @@ from signal import SIGINT, default_int_handler, signal from pipenv.patched.notpip._vendor import six -from pipenv.patched.notpip._vendor.progress.bar import ( - Bar, ChargingBar, FillingCirclesBar, FillingSquaresBar, IncrementalBar, - ShadyBar, -) -from pipenv.patched.notpip._vendor.progress.helpers import HIDE_CURSOR, SHOW_CURSOR, WritelnMixin +from pipenv.patched.notpip._vendor.progress import HIDE_CURSOR, SHOW_CURSOR +from pipenv.patched.notpip._vendor.progress.bar import Bar, FillingCirclesBar, IncrementalBar from pipenv.patched.notpip._vendor.progress.spinner import Spinner from pipenv.patched.notpip._internal.utils.compat import WINDOWS @@ -21,7 +22,7 @@ from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: - from typing import Any, Iterator, IO # noqa: F401 + from typing import Any, Iterator, IO try: from pipenv.patched.notpip._vendor import colorama @@ -168,7 +169,7 @@ def __init__(self, *args, **kwargs): # The Windows terminal does not support the hide/show cursor ANSI codes # even with colorama. So we'll ensure that hide_cursor is False on # Windows. - # This call neds to go before the super() call, so that hide_cursor + # This call needs to go before the super() call, so that hide_cursor # is set in time. The base progress bar class writes the "hide cursor" # code to the terminal in its init, so if we don't set this soon # enough, we get a "hide" with no corresponding "show"... @@ -211,22 +212,8 @@ class DownloadSilentBar(BaseDownloadProgressBar, SilentBar): # type: ignore pass -class DownloadIncrementalBar(BaseDownloadProgressBar, # type: ignore - IncrementalBar): - pass - - -class DownloadChargingBar(BaseDownloadProgressBar, # type: ignore - ChargingBar): - pass - - -class DownloadShadyBar(BaseDownloadProgressBar, ShadyBar): # type: ignore - pass - - -class DownloadFillingSquaresBar(BaseDownloadProgressBar, # type: ignore - FillingSquaresBar): +class DownloadBar(BaseDownloadProgressBar, # type: ignore + Bar): pass @@ -241,7 +228,7 @@ class DownloadBlueEmojiProgressBar(BaseDownloadProgressBar, # type: ignore class DownloadProgressSpinner(WindowsMixin, InterruptibleMixin, - DownloadProgressMixin, WritelnMixin, Spinner): + DownloadProgressMixin, Spinner): file = sys.stdout suffix = "%(downloaded)s %(download_speed)s" @@ -269,7 +256,7 @@ def update(self): BAR_TYPES = { "off": (DownloadSilentBar, DownloadSilentBar), "on": (DefaultDownloadProgressBar, DownloadProgressSpinner), - "ascii": (DownloadIncrementalBar, DownloadProgressSpinner), + "ascii": (DownloadBar, DownloadProgressSpinner), "pretty": (DownloadFillingCirclesBar, DownloadProgressSpinner), "emoji": (DownloadBlueEmojiProgressBar, DownloadProgressSpinner) } diff --git a/pipenv/patched/notpip/_internal/utils/unpacking.py b/pipenv/patched/notpip/_internal/utils/unpacking.py new file mode 100644 index 0000000000..af0b2674a4 --- /dev/null +++ b/pipenv/patched/notpip/_internal/utils/unpacking.py @@ -0,0 +1,272 @@ +"""Utilities related archives. +""" + +# The following comment should be removed at some point in the future. +# mypy: strict-optional=False +# mypy: disallow-untyped-defs=False + +from __future__ import absolute_import + +import logging +import os +import shutil +import stat +import tarfile +import zipfile + +from pipenv.patched.notpip._internal.exceptions import InstallationError +from pipenv.patched.notpip._internal.utils.filetypes import ( + BZ2_EXTENSIONS, + TAR_EXTENSIONS, + XZ_EXTENSIONS, + ZIP_EXTENSIONS, +) +from pipenv.patched.notpip._internal.utils.misc import ensure_dir +from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import Iterable, List, Optional, Text, Union + + +logger = logging.getLogger(__name__) + + +SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS + +try: + import bz2 # noqa + SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS +except ImportError: + logger.debug('bz2 module is not available') + +try: + # Only for Python 3.3+ + import lzma # noqa + SUPPORTED_EXTENSIONS += XZ_EXTENSIONS +except ImportError: + logger.debug('lzma module is not available') + + +def current_umask(): + """Get the current umask which involves having to set it temporarily.""" + mask = os.umask(0) + os.umask(mask) + return mask + + +def split_leading_dir(path): + # type: (Union[str, Text]) -> List[Union[str, Text]] + path = path.lstrip('/').lstrip('\\') + if ( + '/' in path and ( + ('\\' in path and path.find('/') < path.find('\\')) or + '\\' not in path + ) + ): + return path.split('/', 1) + elif '\\' in path: + return path.split('\\', 1) + else: + return [path, ''] + + +def has_leading_dir(paths): + # type: (Iterable[Union[str, Text]]) -> bool + """Returns true if all the paths have the same leading path name + (i.e., everything is in one subdirectory in an archive)""" + common_prefix = None + for path in paths: + prefix, rest = split_leading_dir(path) + if not prefix: + return False + elif common_prefix is None: + common_prefix = prefix + elif prefix != common_prefix: + return False + return True + + +def is_within_directory(directory, target): + # type: ((Union[str, Text]), (Union[str, Text])) -> bool + """ + Return true if the absolute path of target is within the directory + """ + abs_directory = os.path.abspath(directory) + abs_target = os.path.abspath(target) + + prefix = os.path.commonprefix([abs_directory, abs_target]) + return prefix == abs_directory + + +def unzip_file(filename, location, flatten=True): + # type: (str, str, bool) -> None + """ + Unzip the file (with path `filename`) to the destination `location`. All + files are written based on system defaults and umask (i.e. permissions are + not preserved), except that regular file members with any execute + permissions (user, group, or world) have "chmod +x" applied after being + written. Note that for windows, any execute changes using os.chmod are + no-ops per the python docs. + """ + ensure_dir(location) + zipfp = open(filename, 'rb') + try: + zip = zipfile.ZipFile(zipfp, allowZip64=True) + leading = has_leading_dir(zip.namelist()) and flatten + for info in zip.infolist(): + name = info.filename + fn = name + if leading: + fn = split_leading_dir(name)[1] + fn = os.path.join(location, fn) + dir = os.path.dirname(fn) + if not is_within_directory(location, fn): + message = ( + 'The zip file ({}) has a file ({}) trying to install ' + 'outside target directory ({})' + ) + raise InstallationError(message.format(filename, fn, location)) + if fn.endswith('/') or fn.endswith('\\'): + # A directory + ensure_dir(fn) + else: + ensure_dir(dir) + # Don't use read() to avoid allocating an arbitrarily large + # chunk of memory for the file's content + fp = zip.open(name) + try: + with open(fn, 'wb') as destfp: + shutil.copyfileobj(fp, destfp) + finally: + fp.close() + mode = info.external_attr >> 16 + # if mode and regular file and any execute permissions for + # user/group/world? + if mode and stat.S_ISREG(mode) and mode & 0o111: + # make dest file have execute for user/group/world + # (chmod +x) no-op on windows per python docs + os.chmod(fn, (0o777 - current_umask() | 0o111)) + finally: + zipfp.close() + + +def untar_file(filename, location): + # type: (str, str) -> None + """ + Untar the file (with path `filename`) to the destination `location`. + All files are written based on system defaults and umask (i.e. permissions + are not preserved), except that regular file members with any execute + permissions (user, group, or world) have "chmod +x" applied after being + written. Note that for windows, any execute changes using os.chmod are + no-ops per the python docs. + """ + ensure_dir(location) + if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'): + mode = 'r:gz' + elif filename.lower().endswith(BZ2_EXTENSIONS): + mode = 'r:bz2' + elif filename.lower().endswith(XZ_EXTENSIONS): + mode = 'r:xz' + elif filename.lower().endswith('.tar'): + mode = 'r' + else: + logger.warning( + 'Cannot determine compression type for file %s', filename, + ) + mode = 'r:*' + tar = tarfile.open(filename, mode) + try: + leading = has_leading_dir([ + member.name for member in tar.getmembers() + ]) + for member in tar.getmembers(): + fn = member.name + if leading: + # https://github.com/python/mypy/issues/1174 + fn = split_leading_dir(fn)[1] # type: ignore + path = os.path.join(location, fn) + if not is_within_directory(location, path): + message = ( + 'The tar file ({}) has a file ({}) trying to install ' + 'outside target directory ({})' + ) + raise InstallationError( + message.format(filename, path, location) + ) + if member.isdir(): + ensure_dir(path) + elif member.issym(): + try: + # https://github.com/python/typeshed/issues/2673 + tar._extract_member(member, path) # type: ignore + except Exception as exc: + # Some corrupt tar files seem to produce this + # (specifically bad symlinks) + logger.warning( + 'In the tar file %s the member %s is invalid: %s', + filename, member.name, exc, + ) + continue + else: + try: + fp = tar.extractfile(member) + except (KeyError, AttributeError) as exc: + # Some corrupt tar files seem to produce this + # (specifically bad symlinks) + logger.warning( + 'In the tar file %s the member %s is invalid: %s', + filename, member.name, exc, + ) + continue + ensure_dir(os.path.dirname(path)) + with open(path, 'wb') as destfp: + shutil.copyfileobj(fp, destfp) + fp.close() + # Update the timestamp (useful for cython compiled files) + # https://github.com/python/typeshed/issues/2673 + tar.utime(member, path) # type: ignore + # member have any execute permissions for user/group/world? + if member.mode & 0o111: + # make dest file have execute for user/group/world + # no-op on windows per python docs + os.chmod(path, (0o777 - current_umask() | 0o111)) + finally: + tar.close() + + +def unpack_file( + filename, # type: str + location, # type: str + content_type=None, # type: Optional[str] +): + # type: (...) -> None + filename = os.path.realpath(filename) + if ( + content_type == 'application/zip' or + filename.lower().endswith(ZIP_EXTENSIONS) or + zipfile.is_zipfile(filename) + ): + unzip_file( + filename, + location, + flatten=not filename.endswith('.whl') + ) + elif ( + content_type == 'application/x-gzip' or + tarfile.is_tarfile(filename) or + filename.lower().endswith( + TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS + ) + ): + untar_file(filename, location) + else: + # FIXME: handle? + # FIXME: magic signatures? + logger.critical( + 'Cannot unpack file %s (downloaded from %s, content-type: %s); ' + 'cannot detect archive format', + filename, location, content_type, + ) + raise InstallationError( + 'Cannot determine archive format of {}'.format(location) + ) diff --git a/pipenv/patched/notpip/_internal/utils/urls.py b/pipenv/patched/notpip/_internal/utils/urls.py new file mode 100644 index 0000000000..5a78dadce0 --- /dev/null +++ b/pipenv/patched/notpip/_internal/utils/urls.py @@ -0,0 +1,54 @@ +import os +import sys + +from pipenv.patched.notpip._vendor.six.moves.urllib import parse as urllib_parse +from pipenv.patched.notpip._vendor.six.moves.urllib import request as urllib_request + +from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import Optional, Text, Union + + +def get_url_scheme(url): + # type: (Union[str, Text]) -> Optional[Text] + if ':' not in url: + return None + return url.split(':', 1)[0].lower() + + +def path_to_url(path): + # type: (Union[str, Text]) -> str + """ + Convert a path to a file: URL. The path will be made absolute and have + quoted path parts. + """ + path = os.path.normpath(os.path.abspath(path)) + url = urllib_parse.urljoin('file:', urllib_request.pathname2url(path)) + return url + + +def url_to_path(url): + # type: (str) -> str + """ + Convert a file: URL to a path. + """ + assert url.startswith('file:'), ( + "You can only turn file: urls into filenames (not %r)" % url) + + _, netloc, path, _, _ = urllib_parse.urlsplit(url) + + if not netloc or netloc == 'localhost': + # According to RFC 8089, same as empty authority. + netloc = '' + elif sys.platform == 'win32': + # If we have a UNC path, prepend UNC share notation. + netloc = '\\\\' + netloc + else: + raise ValueError( + 'non-local file URIs are not supported on this platform: %r' + % url + ) + + path = urllib_request.url2pathname(netloc + path) + return path diff --git a/pipenv/patched/notpip/_internal/utils/virtualenv.py b/pipenv/patched/notpip/_internal/utils/virtualenv.py new file mode 100644 index 0000000000..380db1c328 --- /dev/null +++ b/pipenv/patched/notpip/_internal/utils/virtualenv.py @@ -0,0 +1,34 @@ +import os.path +import site +import sys + + +def running_under_virtualenv(): + # type: () -> bool + """ + Return True if we're running inside a virtualenv, False otherwise. + + """ + if hasattr(sys, 'real_prefix'): + # pypa/virtualenv case + return True + elif sys.prefix != getattr(sys, "base_prefix", sys.prefix): + # PEP 405 venv + return True + + return False + + +def virtualenv_no_global(): + # type: () -> bool + """ + Return True if in a venv and no system site packages. + """ + # this mirrors the logic in virtualenv.py for locating the + # no-global-site-packages.txt file + site_mod_dir = os.path.dirname(os.path.abspath(site.__file__)) + no_global_file = os.path.join(site_mod_dir, 'no-global-site-packages.txt') + if running_under_virtualenv() and os.path.isfile(no_global_file): + return True + else: + return False diff --git a/pipenv/patched/notpip/_internal/vcs/__init__.py b/pipenv/patched/notpip/_internal/vcs/__init__.py index 06d36148ac..91d7b3508a 100644 --- a/pipenv/patched/notpip/_internal/vcs/__init__.py +++ b/pipenv/patched/notpip/_internal/vcs/__init__.py @@ -1,534 +1,15 @@ -"""Handles all VCS (version control) support""" -from __future__ import absolute_import - -import errno -import logging -import os -import shutil -import sys - -from pipenv.patched.notpip._vendor.six.moves.urllib import parse as urllib_parse - -from pipenv.patched.notpip._internal.exceptions import BadCommand -from pipenv.patched.notpip._internal.utils.misc import ( - display_path, backup_dir, call_subprocess, rmtree, ask_path_exists, +# Expose a limited set of classes and functions so callers outside of +# the vcs package don't need to import deeper than `pip._internal.vcs`. +# (The test directory and imports protected by MYPY_CHECK_RUNNING may +# still need to import from a vcs sub-package.) +# Import all vcs modules to register each VCS in the VcsSupport object. +import pipenv.patched.notpip._internal.vcs.bazaar +import pipenv.patched.notpip._internal.vcs.git +import pipenv.patched.notpip._internal.vcs.mercurial +import pipenv.patched.notpip._internal.vcs.subversion # noqa: F401 +from pipenv.patched.notpip._internal.vcs.versioncontrol import ( # noqa: F401 + RemoteNotFoundError, + is_url, + make_vcs_requirement_url, + vcs, ) -from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING - -if MYPY_CHECK_RUNNING: - from typing import ( # noqa: F401 - Any, Dict, Iterable, List, Mapping, Optional, Text, Tuple, Type - ) - from pipenv.patched.notpip._internal.utils.ui import SpinnerInterface # noqa: F401 - - AuthInfo = Tuple[Optional[str], Optional[str]] - -__all__ = ['vcs'] - - -logger = logging.getLogger(__name__) - - -class RemoteNotFoundError(Exception): - pass - - -class RevOptions(object): - - """ - Encapsulates a VCS-specific revision to install, along with any VCS - install options. - - Instances of this class should be treated as if immutable. - """ - - def __init__(self, vcs, rev=None, extra_args=None): - # type: (VersionControl, Optional[str], Optional[List[str]]) -> None - """ - Args: - vcs: a VersionControl object. - rev: the name of the revision to install. - extra_args: a list of extra options. - """ - if extra_args is None: - extra_args = [] - - self.extra_args = extra_args - self.rev = rev - self.vcs = vcs - - def __repr__(self): - return '<RevOptions {}: rev={!r}>'.format(self.vcs.name, self.rev) - - @property - def arg_rev(self): - # type: () -> Optional[str] - if self.rev is None: - return self.vcs.default_arg_rev - - return self.rev - - def to_args(self): - # type: () -> List[str] - """ - Return the VCS-specific command arguments. - """ - args = [] # type: List[str] - rev = self.arg_rev - if rev is not None: - args += self.vcs.get_base_rev_args(rev) - args += self.extra_args - - return args - - def to_display(self): - # type: () -> str - if not self.rev: - return '' - - return ' (to revision {})'.format(self.rev) - - def make_new(self, rev): - # type: (str) -> RevOptions - """ - Make a copy of the current instance, but with a new rev. - - Args: - rev: the name of the revision for the new object. - """ - return self.vcs.make_rev_options(rev, extra_args=self.extra_args) - - -class VcsSupport(object): - _registry = {} # type: Dict[str, Type[VersionControl]] - schemes = ['ssh', 'git', 'hg', 'bzr', 'sftp', 'svn'] - - def __init__(self): - # type: () -> None - # Register more schemes with urlparse for various version control - # systems - urllib_parse.uses_netloc.extend(self.schemes) - # Python >= 2.7.4, 3.3 doesn't have uses_fragment - if getattr(urllib_parse, 'uses_fragment', None): - urllib_parse.uses_fragment.extend(self.schemes) - super(VcsSupport, self).__init__() - - def __iter__(self): - return self._registry.__iter__() - - @property - def backends(self): - # type: () -> List[Type[VersionControl]] - return list(self._registry.values()) - - @property - def dirnames(self): - # type: () -> List[str] - return [backend.dirname for backend in self.backends] - - @property - def all_schemes(self): - # type: () -> List[str] - schemes = [] # type: List[str] - for backend in self.backends: - schemes.extend(backend.schemes) - return schemes - - def register(self, cls): - # type: (Type[VersionControl]) -> None - if not hasattr(cls, 'name'): - logger.warning('Cannot register VCS %s', cls.__name__) - return - if cls.name not in self._registry: - self._registry[cls.name] = cls - logger.debug('Registered VCS backend: %s', cls.name) - - def unregister(self, cls=None, name=None): - # type: (Optional[Type[VersionControl]], Optional[str]) -> None - if name in self._registry: - del self._registry[name] - elif cls in self._registry.values(): - del self._registry[cls.name] - else: - logger.warning('Cannot unregister because no class or name given') - - def get_backend_type(self, location): - # type: (str) -> Optional[Type[VersionControl]] - """ - Return the type of the version control backend if found at given - location, e.g. vcs.get_backend_type('/path/to/vcs/checkout') - """ - for vc_type in self._registry.values(): - if vc_type.controls_location(location): - logger.debug('Determine that %s uses VCS: %s', - location, vc_type.name) - return vc_type - return None - - def get_backend(self, name): - # type: (str) -> Optional[Type[VersionControl]] - name = name.lower() - if name in self._registry: - return self._registry[name] - return None - - -vcs = VcsSupport() - - -class VersionControl(object): - name = '' - dirname = '' - repo_name = '' - # List of supported schemes for this Version Control - schemes = () # type: Tuple[str, ...] - # Iterable of environment variable names to pass to call_subprocess(). - unset_environ = () # type: Tuple[str, ...] - default_arg_rev = None # type: Optional[str] - - def __init__(self, url=None, *args, **kwargs): - self.url = url - super(VersionControl, self).__init__(*args, **kwargs) - - def get_base_rev_args(self, rev): - """ - Return the base revision arguments for a vcs command. - - Args: - rev: the name of a revision to install. Cannot be None. - """ - raise NotImplementedError - - def make_rev_options(self, rev=None, extra_args=None): - # type: (Optional[str], Optional[List[str]]) -> RevOptions - """ - Return a RevOptions object. - - Args: - rev: the name of a revision to install. - extra_args: a list of extra options. - """ - return RevOptions(self, rev, extra_args=extra_args) - - @classmethod - def _is_local_repository(cls, repo): - # type: (str) -> bool - """ - posix absolute paths start with os.path.sep, - win32 ones start with drive (like c:\\folder) - """ - drive, tail = os.path.splitdrive(repo) - return repo.startswith(os.path.sep) or bool(drive) - - def export(self, location): - """ - Export the repository at the url to the destination location - i.e. only download the files, without vcs informations - """ - raise NotImplementedError - - def get_netloc_and_auth(self, netloc, scheme): - """ - Parse the repository URL's netloc, and return the new netloc to use - along with auth information. - - Args: - netloc: the original repository URL netloc. - scheme: the repository URL's scheme without the vcs prefix. - - This is mainly for the Subversion class to override, so that auth - information can be provided via the --username and --password options - instead of through the URL. For other subclasses like Git without - such an option, auth information must stay in the URL. - - Returns: (netloc, (username, password)). - """ - return netloc, (None, None) - - def get_url_rev_and_auth(self, url): - # type: (str) -> Tuple[str, Optional[str], AuthInfo] - """ - Parse the repository URL to use, and return the URL, revision, - and auth info to use. - - Returns: (url, rev, (username, password)). - """ - scheme, netloc, path, query, frag = urllib_parse.urlsplit(url) - if '+' not in scheme: - raise ValueError( - "Sorry, {!r} is a malformed VCS url. " - "The format is <vcs>+<protocol>://<url>, " - "e.g. svn+http://myrepo/svn/MyApp#egg=MyApp".format(url) - ) - # Remove the vcs prefix. - scheme = scheme.split('+', 1)[1] - netloc, user_pass = self.get_netloc_and_auth(netloc, scheme) - rev = None - if '@' in path: - path, rev = path.rsplit('@', 1) - url = urllib_parse.urlunsplit((scheme, netloc, path, query, '')) - return url, rev, user_pass - - def make_rev_args(self, username, password): - """ - Return the RevOptions "extra arguments" to use in obtain(). - """ - return [] - - def get_url_rev_options(self, url): - # type: (str) -> Tuple[str, RevOptions] - """ - Return the URL and RevOptions object to use in obtain() and in - some cases export(), as a tuple (url, rev_options). - """ - url, rev, user_pass = self.get_url_rev_and_auth(url) - username, password = user_pass - extra_args = self.make_rev_args(username, password) - rev_options = self.make_rev_options(rev, extra_args=extra_args) - - return url, rev_options - - def normalize_url(self, url): - # type: (str) -> str - """ - Normalize a URL for comparison by unquoting it and removing any - trailing slash. - """ - return urllib_parse.unquote(url).rstrip('/') - - def compare_urls(self, url1, url2): - # type: (str, str) -> bool - """ - Compare two repo URLs for identity, ignoring incidental differences. - """ - return (self.normalize_url(url1) == self.normalize_url(url2)) - - def fetch_new(self, dest, url, rev_options): - """ - Fetch a revision from a repository, in the case that this is the - first fetch from the repository. - - Args: - dest: the directory to fetch the repository to. - rev_options: a RevOptions object. - """ - raise NotImplementedError - - def switch(self, dest, url, rev_options): - """ - Switch the repo at ``dest`` to point to ``URL``. - - Args: - rev_options: a RevOptions object. - """ - raise NotImplementedError - - def update(self, dest, url, rev_options): - """ - Update an already-existing repo to the given ``rev_options``. - - Args: - rev_options: a RevOptions object. - """ - raise NotImplementedError - - def is_commit_id_equal(self, dest, name): - """ - Return whether the id of the current commit equals the given name. - - Args: - dest: the repository directory. - name: a string name. - """ - raise NotImplementedError - - def obtain(self, dest): - # type: (str) -> None - """ - Install or update in editable mode the package represented by this - VersionControl object. - - Args: - dest: the repository directory in which to install or update. - """ - url, rev_options = self.get_url_rev_options(self.url) - - if not os.path.exists(dest): - self.fetch_new(dest, url, rev_options) - return - - rev_display = rev_options.to_display() - if self.is_repository_directory(dest): - existing_url = self.get_remote_url(dest) - if self.compare_urls(existing_url, url): - logger.debug( - '%s in %s exists, and has correct URL (%s)', - self.repo_name.title(), - display_path(dest), - url, - ) - if not self.is_commit_id_equal(dest, rev_options.rev): - logger.info( - 'Updating %s %s%s', - display_path(dest), - self.repo_name, - rev_display, - ) - self.update(dest, url, rev_options) - else: - logger.info('Skipping because already up-to-date.') - return - - logger.warning( - '%s %s in %s exists with URL %s', - self.name, - self.repo_name, - display_path(dest), - existing_url, - ) - prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ', - ('s', 'i', 'w', 'b')) - else: - logger.warning( - 'Directory %s already exists, and is not a %s %s.', - dest, - self.name, - self.repo_name, - ) - # https://github.com/python/mypy/issues/1174 - prompt = ('(i)gnore, (w)ipe, (b)ackup ', # type: ignore - ('i', 'w', 'b')) - - logger.warning( - 'The plan is to install the %s repository %s', - self.name, - url, - ) - response = ask_path_exists('What to do? %s' % prompt[0], prompt[1]) - - if response == 'a': - sys.exit(-1) - - if response == 'w': - logger.warning('Deleting %s', display_path(dest)) - rmtree(dest) - self.fetch_new(dest, url, rev_options) - return - - if response == 'b': - dest_dir = backup_dir(dest) - logger.warning( - 'Backing up %s to %s', display_path(dest), dest_dir, - ) - shutil.move(dest, dest_dir) - self.fetch_new(dest, url, rev_options) - return - - # Do nothing if the response is "i". - if response == 's': - logger.info( - 'Switching %s %s to %s%s', - self.repo_name, - display_path(dest), - url, - rev_display, - ) - self.switch(dest, url, rev_options) - - def unpack(self, location): - # type: (str) -> None - """ - Clean up current location and download the url repository - (and vcs infos) into location - """ - if os.path.exists(location): - rmtree(location) - self.obtain(location) - - @classmethod - def get_src_requirement(cls, location, project_name): - """ - Return a string representing the requirement needed to - redownload the files currently present in location, something - like: - {repository_url}@{revision}#egg={project_name}-{version_identifier} - """ - raise NotImplementedError - - @classmethod - def get_remote_url(cls, location): - """ - Return the url used at location - - Raises RemoteNotFoundError if the repository does not have a remote - url configured. - """ - raise NotImplementedError - - @classmethod - def get_revision(cls, location): - """ - Return the current commit id of the files at the given location. - """ - raise NotImplementedError - - @classmethod - def run_command( - cls, - cmd, # type: List[str] - show_stdout=True, # type: bool - cwd=None, # type: Optional[str] - on_returncode='raise', # type: str - extra_ok_returncodes=None, # type: Optional[Iterable[int]] - command_desc=None, # type: Optional[str] - extra_environ=None, # type: Optional[Mapping[str, Any]] - spinner=None # type: Optional[SpinnerInterface] - ): - # type: (...) -> Optional[Text] - """ - Run a VCS subcommand - This is simply a wrapper around call_subprocess that adds the VCS - command name, and checks that the VCS is available - """ - cmd = [cls.name] + cmd - try: - return call_subprocess(cmd, show_stdout, cwd, - on_returncode=on_returncode, - extra_ok_returncodes=extra_ok_returncodes, - command_desc=command_desc, - extra_environ=extra_environ, - unset_environ=cls.unset_environ, - spinner=spinner) - except OSError as e: - # errno.ENOENT = no such file or directory - # In other words, the VCS executable isn't available - if e.errno == errno.ENOENT: - raise BadCommand( - 'Cannot find command %r - do you have ' - '%r installed and in your ' - 'PATH?' % (cls.name, cls.name)) - else: - raise # re-raise exception if a different error occurred - - @classmethod - def is_repository_directory(cls, path): - # type: (str) -> bool - """ - Return whether a directory path is a repository directory. - """ - logger.debug('Checking in %s for %s (%s)...', - path, cls.dirname, cls.name) - return os.path.exists(os.path.join(path, cls.dirname)) - - @classmethod - def controls_location(cls, location): - # type: (str) -> bool - """ - Check if a location is controlled by the vcs. - It is meant to be overridden to implement smarter detection - mechanisms for specific vcs. - - This can do more than is_repository_directory() alone. For example, - the Git override checks that Git is actually available. - """ - return cls.is_repository_directory(location) diff --git a/pipenv/patched/notpip/_internal/vcs/bazaar.py b/pipenv/patched/notpip/_internal/vcs/bazaar.py index 256eb9e2ac..1342ceeb06 100644 --- a/pipenv/patched/notpip/_internal/vcs/bazaar.py +++ b/pipenv/patched/notpip/_internal/vcs/bazaar.py @@ -1,3 +1,6 @@ +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import import logging @@ -5,12 +8,17 @@ from pipenv.patched.notpip._vendor.six.moves.urllib import parse as urllib_parse -from pipenv.patched.notpip._internal.download import path_to_url -from pipenv.patched.notpip._internal.utils.misc import ( - display_path, make_vcs_requirement_url, rmtree, -) -from pipenv.patched.notpip._internal.utils.temp_dir import TempDirectory -from pipenv.patched.notpip._internal.vcs import VersionControl, vcs +from pipenv.patched.notpip._internal.utils.misc import display_path, rmtree +from pipenv.patched.notpip._internal.utils.subprocess import make_command +from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING +from pipenv.patched.notpip._internal.utils.urls import path_to_url +from pipenv.patched.notpip._internal.vcs.versioncontrol import VersionControl, vcs + +if MYPY_CHECK_RUNNING: + from typing import Optional, Tuple + from pipenv.patched.notpip._internal.utils.misc import HiddenText + from pipenv.patched.notpip._internal.vcs.versioncontrol import AuthInfo, RevOptions + logger = logging.getLogger(__name__) @@ -24,17 +32,19 @@ class Bazaar(VersionControl): 'bzr+lp', ) - def __init__(self, url=None, *args, **kwargs): - super(Bazaar, self).__init__(url, *args, **kwargs) + def __init__(self, *args, **kwargs): + super(Bazaar, self).__init__(*args, **kwargs) # This is only needed for python <2.7.5 # Register lp but do not expose as a scheme to support bzr+lp. if getattr(urllib_parse, 'uses_fragment', None): urllib_parse.uses_fragment.extend(['lp']) - def get_base_rev_args(self, rev): + @staticmethod + def get_base_rev_args(rev): return ['-r', rev] - def export(self, location): + def export(self, location, url): + # type: (str, HiddenText) -> None """ Export the Bazaar repository at the url to the destination location """ @@ -42,15 +52,14 @@ def export(self, location): if os.path.exists(location): rmtree(location) - with TempDirectory(kind="export") as temp_dir: - self.unpack(temp_dir.path) - - self.run_command( - ['export', location], - cwd=temp_dir.path, show_stdout=False, - ) + url, rev_options = self.get_url_rev_options(url) + self.run_command( + make_command('export', location, url, rev_options.to_args()), + show_stdout=False, + ) def fetch_new(self, dest, url, rev_options): + # type: (str, HiddenText, RevOptions) -> None rev_display = rev_options.to_display() logger.info( 'Checking out %s%s to %s', @@ -58,19 +67,25 @@ def fetch_new(self, dest, url, rev_options): rev_display, display_path(dest), ) - cmd_args = ['branch', '-q'] + rev_options.to_args() + [url, dest] + cmd_args = ( + make_command('branch', '-q', rev_options.to_args(), url, dest) + ) self.run_command(cmd_args) def switch(self, dest, url, rev_options): - self.run_command(['switch', url], cwd=dest) + # type: (str, HiddenText, RevOptions) -> None + self.run_command(make_command('switch', url), cwd=dest) def update(self, dest, url, rev_options): - cmd_args = ['pull', '-q'] + rev_options.to_args() + # type: (str, HiddenText, RevOptions) -> None + cmd_args = make_command('pull', '-q', rev_options.to_args()) self.run_command(cmd_args, cwd=dest) - def get_url_rev_and_auth(self, url): + @classmethod + def get_url_rev_and_auth(cls, url): + # type: (str) -> Tuple[str, Optional[str], AuthInfo] # hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it - url, rev, user_pass = super(Bazaar, self).get_url_rev_and_auth(url) + url, rev, user_pass = super(Bazaar, cls).get_url_rev_and_auth(url) if url.startswith('ssh://'): url = 'bzr+' + url return url, rev, user_pass @@ -97,16 +112,7 @@ def get_revision(cls, location): return revision.splitlines()[-1] @classmethod - def get_src_requirement(cls, location, project_name): - repo = cls.get_remote_url(location) - if not repo: - return None - if not repo.lower().startswith('bzr:'): - repo = 'bzr+' + repo - current_rev = cls.get_revision(location) - return make_vcs_requirement_url(repo, current_rev, project_name) - - def is_commit_id_equal(self, dest, name): + def is_commit_id_equal(cls, dest, name): """Always assume the versions don't match""" return False diff --git a/pipenv/patched/notpip/_internal/vcs/git.py b/pipenv/patched/notpip/_internal/vcs/git.py index 310eb9fb6d..6855afb225 100644 --- a/pipenv/patched/notpip/_internal/vcs/git.py +++ b/pipenv/patched/notpip/_internal/vcs/git.py @@ -1,3 +1,6 @@ +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import import logging @@ -9,12 +12,22 @@ from pipenv.patched.notpip._vendor.six.moves.urllib import request as urllib_request from pipenv.patched.notpip._internal.exceptions import BadCommand -from pipenv.patched.notpip._internal.utils.compat import samefile -from pipenv.patched.notpip._internal.utils.misc import ( - display_path, make_vcs_requirement_url, redact_password_from_url, -) +from pipenv.patched.notpip._internal.utils.misc import display_path +from pipenv.patched.notpip._internal.utils.subprocess import make_command from pipenv.patched.notpip._internal.utils.temp_dir import TempDirectory -from pipenv.patched.notpip._internal.vcs import RemoteNotFoundError, VersionControl, vcs +from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING +from pipenv.patched.notpip._internal.vcs.versioncontrol import ( + RemoteNotFoundError, + VersionControl, + find_path_to_setup_from_repo_root, + vcs, +) + +if MYPY_CHECK_RUNNING: + from typing import Optional, Tuple + from pipenv.patched.notpip._internal.utils.misc import HiddenText + from pipenv.patched.notpip._internal.vcs.versioncontrol import AuthInfo, RevOptions + urlsplit = urllib_parse.urlsplit urlunsplit = urllib_parse.urlunsplit @@ -23,7 +36,7 @@ logger = logging.getLogger(__name__) -HASH_REGEX = re.compile('[a-fA-F0-9]{40}') +HASH_REGEX = re.compile('^[a-fA-F0-9]{40}$') def looks_like_hash(sha): @@ -42,28 +55,8 @@ class Git(VersionControl): unset_environ = ('GIT_DIR', 'GIT_WORK_TREE') default_arg_rev = 'HEAD' - def __init__(self, url=None, *args, **kwargs): - - # Works around an apparent Git bug - # (see https://article.gmane.org/gmane.comp.version-control.git/146500) - if url: - scheme, netloc, path, query, fragment = urlsplit(url) - if scheme.endswith('file'): - initial_slashes = path[:-len(path.lstrip('/'))] - newpath = ( - initial_slashes + - urllib_request.url2pathname(path) - .replace('\\', '/').lstrip('/') - ) - url = urlunsplit((scheme, netloc, newpath, query, fragment)) - after_plus = scheme.find('+') + 1 - url = scheme[:after_plus] + urlunsplit( - (scheme[after_plus:], netloc, newpath, query, fragment), - ) - - super(Git, self).__init__(url, *args, **kwargs) - - def get_base_rev_args(self, rev): + @staticmethod + def get_base_rev_args(rev): return [rev] def get_git_version(self): @@ -73,13 +66,14 @@ def get_git_version(self): version = version[len(VERSION_PFX):].split()[0] else: version = '' - # get first 3 positions of the git version becasue + # get first 3 positions of the git version because # on windows it is x.y.z.windows.t, and this parses as # LegacyVersion which always smaller than a Version. version = '.'.join(version.split('.')[:3]) return parse_version(version) - def get_current_branch(self, location): + @classmethod + def get_current_branch(cls, location): """ Return the current branch, or None if HEAD isn't at a branch (e.g. detached HEAD). @@ -89,7 +83,7 @@ def get_current_branch(self, location): # command to exit with status code 1 instead of 128 in this case # and to suppress the message to stderr. args = ['symbolic-ref', '-q', 'HEAD'] - output = self.run_command( + output = cls.run_command( args, extra_ok_returncodes=(1, ), show_stdout=False, cwd=location, ) ref = output.strip() @@ -99,19 +93,21 @@ def get_current_branch(self, location): return None - def export(self, location): + def export(self, location, url): + # type: (str, HiddenText) -> None """Export the Git repository at the url to the destination location""" if not location.endswith('/'): location = location + '/' with TempDirectory(kind="export") as temp_dir: - self.unpack(temp_dir.path) + self.unpack(temp_dir.path, url=url) self.run_command( ['checkout-index', '-a', '-f', '--prefix', location], show_stdout=False, cwd=temp_dir.path ) - def get_revision_sha(self, dest, rev): + @classmethod + def get_revision_sha(cls, dest, rev): """ Return (sha_or_none, is_branch), where sha_or_none is a commit hash if the revision names a remote branch or tag, otherwise None. @@ -121,8 +117,8 @@ def get_revision_sha(self, dest, rev): rev: the revision name. """ # Pass rev to pre-filter the list. - output = self.run_command(['show-ref', rev], cwd=dest, - show_stdout=False, on_returncode='ignore') + output = cls.run_command(['show-ref', rev], cwd=dest, + show_stdout=False, on_returncode='ignore') refs = {} for line in output.strip().splitlines(): try: @@ -145,7 +141,9 @@ def get_revision_sha(self, dest, rev): return (sha, False) - def resolve_revision(self, dest, url, rev_options): + @classmethod + def resolve_revision(cls, dest, url, rev_options): + # type: (str, HiddenText, RevOptions) -> RevOptions """ Resolve a revision to a new RevOptions object with the SHA1 of the branch, tag, or ref if found. @@ -154,7 +152,11 @@ def resolve_revision(self, dest, url, rev_options): rev_options: a RevOptions object. """ rev = rev_options.arg_rev - sha, is_branch = self.get_revision_sha(dest, rev) + # The arg_rev property's implementation for Git ensures that the + # rev return value is always non-None. + assert rev is not None + + sha, is_branch = cls.get_revision_sha(dest, rev) if sha is not None: rev_options = rev_options.make_new(sha) @@ -174,17 +176,18 @@ def resolve_revision(self, dest, url, rev_options): return rev_options # If it looks like a ref, we have to fetch it explicitly. - self.run_command( - ['fetch', '-q', url] + rev_options.to_args(), + cls.run_command( + make_command('fetch', '-q', url, rev_options.to_args()), cwd=dest, ) # Change the revision to the SHA of the ref we fetched - sha = self.get_revision(dest, rev='FETCH_HEAD') + sha = cls.get_revision(dest, rev='FETCH_HEAD') rev_options = rev_options.make_new(sha) return rev_options - def is_commit_id_equal(self, dest, name): + @classmethod + def is_commit_id_equal(cls, dest, name): """ Return whether the current commit hash equals the given name. @@ -196,15 +199,13 @@ def is_commit_id_equal(self, dest, name): # Then avoid an unnecessary subprocess call. return False - return self.get_revision(dest) == name + return cls.get_revision(dest) == name def fetch_new(self, dest, url, rev_options): + # type: (str, HiddenText, RevOptions) -> None rev_display = rev_options.to_display() - logger.info( - 'Cloning %s%s to %s', redact_password_from_url(url), - rev_display, display_path(dest), - ) - self.run_command(['clone', '-q', url, dest]) + logger.info('Cloning %s%s to %s', url, rev_display, display_path(dest)) + self.run_command(make_command('clone', '-q', url, dest)) if rev_options.rev: # Then a specific revision was requested. @@ -214,7 +215,9 @@ def fetch_new(self, dest, url, rev_options): # Only do a checkout if the current commit id doesn't match # the requested revision. if not self.is_commit_id_equal(dest, rev_options.rev): - cmd_args = ['checkout', '-q'] + rev_options.to_args() + cmd_args = make_command( + 'checkout', '-q', rev_options.to_args(), + ) self.run_command(cmd_args, cwd=dest) elif self.get_current_branch(dest) != branch_name: # Then a specific branch was requested, and that branch @@ -229,13 +232,18 @@ def fetch_new(self, dest, url, rev_options): self.update_submodules(dest) def switch(self, dest, url, rev_options): - self.run_command(['config', 'remote.origin.url', url], cwd=dest) - cmd_args = ['checkout', '-q'] + rev_options.to_args() + # type: (str, HiddenText, RevOptions) -> None + self.run_command( + make_command('config', 'remote.origin.url', url), + cwd=dest, + ) + cmd_args = make_command('checkout', '-q', rev_options.to_args()) self.run_command(cmd_args, cwd=dest) self.update_submodules(dest) def update(self, dest, url, rev_options): + # type: (str, HiddenText, RevOptions) -> None # First fetch changes from the default remote if self.get_git_version() >= parse_version('1.9.0'): # fetch tags in addition to everything else @@ -244,7 +252,7 @@ def update(self, dest, url, rev_options): self.run_command(['fetch', '-q'], cwd=dest) # Then reset to wanted revision (maybe even origin/master) rev_options = self.resolve_revision(dest, url, rev_options) - cmd_args = ['reset', '--hard', '-q'] + rev_options.to_args() + cmd_args = make_command('reset', '--hard', '-q', rev_options.to_args()) self.run_command(cmd_args, cwd=dest) #: update submodules self.update_submodules(dest) @@ -286,66 +294,60 @@ def get_revision(cls, location, rev=None): return current_rev.strip() @classmethod - def _get_subdirectory(cls, location): - """Return the relative path of setup.py to the git repo root.""" + def get_subdirectory(cls, location): + """ + Return the path to setup.py, relative to the repo root. + Return None if setup.py is in the repo root. + """ # find the repo root - git_dir = cls.run_command(['rev-parse', '--git-dir'], - show_stdout=False, cwd=location).strip() + git_dir = cls.run_command( + ['rev-parse', '--git-dir'], + show_stdout=False, cwd=location).strip() if not os.path.isabs(git_dir): git_dir = os.path.join(location, git_dir) - root_dir = os.path.join(git_dir, '..') - # find setup.py - orig_location = location - while not os.path.exists(os.path.join(location, 'setup.py')): - last_location = location - location = os.path.dirname(location) - if location == last_location: - # We've traversed up to the root of the filesystem without - # finding setup.py - logger.warning( - "Could not find setup.py for directory %s (tried all " - "parent directories)", - orig_location, - ) - return None - # relative path of setup.py to repo root - if samefile(root_dir, location): - return None - return os.path.relpath(location, root_dir) + repo_root = os.path.abspath(os.path.join(git_dir, '..')) + return find_path_to_setup_from_repo_root(location, repo_root) @classmethod - def get_src_requirement(cls, location, project_name): - repo = cls.get_remote_url(location) - if not repo.lower().startswith('git:'): - repo = 'git+' + repo - current_rev = cls.get_revision(location) - subdir = cls._get_subdirectory(location) - req = make_vcs_requirement_url(repo, current_rev, project_name, - subdir=subdir) - - return req - - def get_url_rev_and_auth(self, url): + def get_url_rev_and_auth(cls, url): + # type: (str) -> Tuple[str, Optional[str], AuthInfo] """ Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'. That's required because although they use SSH they sometimes don't work with a ssh:// scheme (e.g. GitHub). But we need a scheme for parsing. Hence we remove it again afterwards and return it as a stub. """ + # Works around an apparent Git bug + # (see https://article.gmane.org/gmane.comp.version-control.git/146500) + scheme, netloc, path, query, fragment = urlsplit(url) + if scheme.endswith('file'): + initial_slashes = path[:-len(path.lstrip('/'))] + newpath = ( + initial_slashes + + urllib_request.url2pathname(path) + .replace('\\', '/').lstrip('/') + ) + url = urlunsplit((scheme, netloc, newpath, query, fragment)) + after_plus = scheme.find('+') + 1 + url = scheme[:after_plus] + urlunsplit( + (scheme[after_plus:], netloc, newpath, query, fragment), + ) + if '://' not in url: assert 'file:' not in url url = url.replace('git+', 'git+ssh://') - url, rev, user_pass = super(Git, self).get_url_rev_and_auth(url) + url, rev, user_pass = super(Git, cls).get_url_rev_and_auth(url) url = url.replace('ssh://', '') else: - url, rev, user_pass = super(Git, self).get_url_rev_and_auth(url) + url, rev, user_pass = super(Git, cls).get_url_rev_and_auth(url) return url, rev, user_pass - def update_submodules(self, location): + @classmethod + def update_submodules(cls, location): if not os.path.exists(os.path.join(location, '.gitmodules')): return - self.run_command( + cls.run_command( ['submodule', 'update', '--init', '--recursive', '-q'], cwd=location, ) @@ -358,7 +360,8 @@ def controls_location(cls, location): r = cls.run_command(['rev-parse'], cwd=location, show_stdout=False, - on_returncode='ignore') + on_returncode='ignore', + log_failed_cmd=False) return not r except BadCommand: logger.debug("could not determine if %s is under git control " diff --git a/pipenv/patched/notpip/_internal/vcs/mercurial.py b/pipenv/patched/notpip/_internal/vcs/mercurial.py index 37a3f7c296..e36f713004 100644 --- a/pipenv/patched/notpip/_internal/vcs/mercurial.py +++ b/pipenv/patched/notpip/_internal/vcs/mercurial.py @@ -1,3 +1,6 @@ +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import import logging @@ -5,10 +8,22 @@ from pipenv.patched.notpip._vendor.six.moves import configparser -from pipenv.patched.notpip._internal.download import path_to_url -from pipenv.patched.notpip._internal.utils.misc import display_path, make_vcs_requirement_url +from pipenv.patched.notpip._internal.exceptions import BadCommand, InstallationError +from pipenv.patched.notpip._internal.utils.misc import display_path +from pipenv.patched.notpip._internal.utils.subprocess import make_command from pipenv.patched.notpip._internal.utils.temp_dir import TempDirectory -from pipenv.patched.notpip._internal.vcs import VersionControl, vcs +from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING +from pipenv.patched.notpip._internal.utils.urls import path_to_url +from pipenv.patched.notpip._internal.vcs.versioncontrol import ( + VersionControl, + find_path_to_setup_from_repo_root, + vcs, +) + +if MYPY_CHECK_RUNNING: + from pipenv.patched.notpip._internal.utils.misc import HiddenText + from pipenv.patched.notpip._internal.vcs.versioncontrol import RevOptions + logger = logging.getLogger(__name__) @@ -17,21 +32,26 @@ class Mercurial(VersionControl): name = 'hg' dirname = '.hg' repo_name = 'clone' - schemes = ('hg', 'hg+http', 'hg+https', 'hg+ssh', 'hg+static-http') + schemes = ( + 'hg', 'hg+file', 'hg+http', 'hg+https', 'hg+ssh', 'hg+static-http', + ) - def get_base_rev_args(self, rev): + @staticmethod + def get_base_rev_args(rev): return [rev] - def export(self, location): + def export(self, location, url): + # type: (str, HiddenText) -> None """Export the Hg repository at the url to the destination location""" with TempDirectory(kind="export") as temp_dir: - self.unpack(temp_dir.path) + self.unpack(temp_dir.path, url=url) self.run_command( ['archive', location], show_stdout=False, cwd=temp_dir.path ) def fetch_new(self, dest, url, rev_options): + # type: (str, HiddenText, RevOptions) -> None rev_display = rev_options.to_display() logger.info( 'Cloning hg %s%s to %s', @@ -39,16 +59,19 @@ def fetch_new(self, dest, url, rev_options): rev_display, display_path(dest), ) - self.run_command(['clone', '--noupdate', '-q', url, dest]) - cmd_args = ['update', '-q'] + rev_options.to_args() - self.run_command(cmd_args, cwd=dest) + self.run_command(make_command('clone', '--noupdate', '-q', url, dest)) + self.run_command( + make_command('update', '-q', rev_options.to_args()), + cwd=dest, + ) def switch(self, dest, url, rev_options): + # type: (str, HiddenText, RevOptions) -> None repo_config = os.path.join(dest, self.dirname, 'hgrc') - config = configparser.SafeConfigParser() + config = configparser.RawConfigParser() try: config.read(repo_config) - config.set('paths', 'default', url) + config.set('paths', 'default', url.secret) with open(repo_config, 'w') as config_file: config.write(config_file) except (OSError, configparser.NoSectionError) as exc: @@ -56,12 +79,13 @@ def switch(self, dest, url, rev_options): 'Could not switch Mercurial repository to %s: %s', url, exc, ) else: - cmd_args = ['update', '-q'] + rev_options.to_args() + cmd_args = make_command('update', '-q', rev_options.to_args()) self.run_command(cmd_args, cwd=dest) def update(self, dest, url, rev_options): + # type: (str, HiddenText, RevOptions) -> None self.run_command(['pull', '-q'], cwd=dest) - cmd_args = ['update', '-q'] + rev_options.to_args() + cmd_args = make_command('update', '-q', rev_options.to_args()) self.run_command(cmd_args, cwd=dest) @classmethod @@ -75,29 +99,57 @@ def get_remote_url(cls, location): @classmethod def get_revision(cls, location): + """ + Return the repository-local changeset revision number, as an integer. + """ current_revision = cls.run_command( ['parents', '--template={rev}'], show_stdout=False, cwd=location).strip() return current_revision @classmethod - def get_revision_hash(cls, location): + def get_requirement_revision(cls, location): + """ + Return the changeset identification hash, as a 40-character + hexadecimal string + """ current_rev_hash = cls.run_command( ['parents', '--template={node}'], show_stdout=False, cwd=location).strip() return current_rev_hash @classmethod - def get_src_requirement(cls, location, project_name): - repo = cls.get_remote_url(location) - if not repo.lower().startswith('hg:'): - repo = 'hg+' + repo - current_rev_hash = cls.get_revision_hash(location) - return make_vcs_requirement_url(repo, current_rev_hash, project_name) - - def is_commit_id_equal(self, dest, name): + def is_commit_id_equal(cls, dest, name): """Always assume the versions don't match""" return False + @classmethod + def get_subdirectory(cls, location): + """ + Return the path to setup.py, relative to the repo root. + Return None if setup.py is in the repo root. + """ + # find the repo root + repo_root = cls.run_command( + ['root'], show_stdout=False, cwd=location).strip() + if not os.path.isabs(repo_root): + repo_root = os.path.abspath(os.path.join(location, repo_root)) + return find_path_to_setup_from_repo_root(location, repo_root) + + @classmethod + def controls_location(cls, location): + if super(Mercurial, cls).controls_location(location): + return True + try: + cls.run_command( + ['identify'], + cwd=location, + show_stdout=False, + on_returncode='raise', + log_failed_cmd=False) + return True + except (BadCommand, InstallationError): + return False + vcs.register(Mercurial) diff --git a/pipenv/patched/notpip/_internal/vcs/subversion.py b/pipenv/patched/notpip/_internal/vcs/subversion.py index e62d3def52..90e53c11b7 100644 --- a/pipenv/patched/notpip/_internal/vcs/subversion.py +++ b/pipenv/patched/notpip/_internal/vcs/subversion.py @@ -1,3 +1,6 @@ +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import import logging @@ -6,9 +9,14 @@ from pipenv.patched.notpip._internal.utils.logging import indent_log from pipenv.patched.notpip._internal.utils.misc import ( - display_path, make_vcs_requirement_url, rmtree, split_auth_from_netloc, + display_path, + is_console_interactive, + rmtree, + split_auth_from_netloc, ) -from pipenv.patched.notpip._internal.vcs import VersionControl, vcs +from pipenv.patched.notpip._internal.utils.subprocess import make_command +from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING +from pipenv.patched.notpip._internal.vcs.versioncontrol import VersionControl, vcs _svn_xml_url_re = re.compile('url="([^"]+)"') _svn_rev_re = re.compile(r'committed-rev="(\d+)"') @@ -16,6 +24,13 @@ _svn_info_xml_url_re = re.compile(r'<url>(.*)</url>') +if MYPY_CHECK_RUNNING: + from typing import Optional, Tuple + from pipenv.patched.notpip._internal.utils.subprocess import CommandArgs + from pipenv.patched.notpip._internal.utils.misc import HiddenText + from pipenv.patched.notpip._internal.vcs.versioncontrol import AuthInfo, RevOptions + + logger = logging.getLogger(__name__) @@ -25,40 +40,13 @@ class Subversion(VersionControl): repo_name = 'checkout' schemes = ('svn', 'svn+ssh', 'svn+http', 'svn+https', 'svn+svn') - def get_base_rev_args(self, rev): - return ['-r', rev] - - def export(self, location): - """Export the svn repository at the url to the destination location""" - url, rev_options = self.get_url_rev_options(self.url) - - logger.info('Exporting svn repository %s to %s', url, location) - with indent_log(): - if os.path.exists(location): - # Subversion doesn't like to check out over an existing - # directory --force fixes this, but was only added in svn 1.5 - rmtree(location) - cmd_args = ['export'] + rev_options.to_args() + [url, location] - self.run_command(cmd_args, show_stdout=False) - - def fetch_new(self, dest, url, rev_options): - rev_display = rev_options.to_display() - logger.info( - 'Checking out %s%s to %s', - url, - rev_display, - display_path(dest), - ) - cmd_args = ['checkout', '-q'] + rev_options.to_args() + [url, dest] - self.run_command(cmd_args) - - def switch(self, dest, url, rev_options): - cmd_args = ['switch'] + rev_options.to_args() + [url, dest] - self.run_command(cmd_args) + @classmethod + def should_add_vcs_url_prefix(cls, remote_url): + return True - def update(self, dest, url, rev_options): - cmd_args = ['update'] + rev_options.to_args() + [dest] - self.run_command(cmd_args) + @staticmethod + def get_base_rev_args(rev): + return ['-r', rev] @classmethod def get_revision(cls, location): @@ -88,7 +76,8 @@ def get_revision(cls, location): revision = max(revision, localrev) return revision - def get_netloc_and_auth(self, netloc, scheme): + @classmethod + def get_netloc_and_auth(cls, netloc, scheme): """ This override allows the auth information to be passed to svn via the --username and --password options instead of via the URL. @@ -96,20 +85,23 @@ def get_netloc_and_auth(self, netloc, scheme): if scheme == 'ssh': # The --username and --password options can't be used for # svn+ssh URLs, so keep the auth information in the URL. - return super(Subversion, self).get_netloc_and_auth( - netloc, scheme) + return super(Subversion, cls).get_netloc_and_auth(netloc, scheme) return split_auth_from_netloc(netloc) - def get_url_rev_and_auth(self, url): + @classmethod + def get_url_rev_and_auth(cls, url): + # type: (str) -> Tuple[str, Optional[str], AuthInfo] # hotfix the URL scheme after removing svn+ from svn+ssh:// readd it - url, rev, user_pass = super(Subversion, self).get_url_rev_and_auth(url) + url, rev, user_pass = super(Subversion, cls).get_url_rev_and_auth(url) if url.startswith('ssh://'): url = 'svn+' + url return url, rev, user_pass - def make_rev_args(self, username, password): - extra_args = [] + @staticmethod + def make_rev_args(username, password): + # type: (Optional[str], Optional[HiddenText]) -> CommandArgs + extra_args = [] # type: CommandArgs if username: extra_args += ['--username', username] if password: @@ -165,6 +157,11 @@ def _get_svn_url_rev(cls, location): else: try: # subversion >= 1.7 + # Note that using get_remote_call_options is not necessary here + # because `svn info` is being run against a local directory. + # We don't need to worry about making sure interactive mode + # is being used to prompt for passwords, because passwords + # are only potentially needed for remote server requests. xml = cls.run_command( ['info', '--xml', location], show_stdout=False, @@ -184,17 +181,153 @@ def _get_svn_url_rev(cls, location): return url, rev @classmethod - def get_src_requirement(cls, location, project_name): - repo = cls.get_remote_url(location) - if repo is None: - return None - repo = 'svn+' + repo - rev = cls.get_revision(location) - return make_vcs_requirement_url(repo, rev, project_name) - - def is_commit_id_equal(self, dest, name): + def is_commit_id_equal(cls, dest, name): """Always assume the versions don't match""" return False + def __init__(self, use_interactive=None): + # type: (bool) -> None + if use_interactive is None: + use_interactive = is_console_interactive() + self.use_interactive = use_interactive + + # This member is used to cache the fetched version of the current + # ``svn`` client. + # Special value definitions: + # None: Not evaluated yet. + # Empty tuple: Could not parse version. + self._vcs_version = None # type: Optional[Tuple[int, ...]] + + super(Subversion, self).__init__() + + def call_vcs_version(self): + # type: () -> Tuple[int, ...] + """Query the version of the currently installed Subversion client. + + :return: A tuple containing the parts of the version information or + ``()`` if the version returned from ``svn`` could not be parsed. + :raises: BadCommand: If ``svn`` is not installed. + """ + # Example versions: + # svn, version 1.10.3 (r1842928) + # compiled Feb 25 2019, 14:20:39 on x86_64-apple-darwin17.0.0 + # svn, version 1.7.14 (r1542130) + # compiled Mar 28 2018, 08:49:13 on x86_64-pc-linux-gnu + version_prefix = 'svn, version ' + version = self.run_command(['--version'], show_stdout=False) + if not version.startswith(version_prefix): + return () + + version = version[len(version_prefix):].split()[0] + version_list = version.split('.') + try: + parsed_version = tuple(map(int, version_list)) + except ValueError: + return () + + return parsed_version + + def get_vcs_version(self): + # type: () -> Tuple[int, ...] + """Return the version of the currently installed Subversion client. + + If the version of the Subversion client has already been queried, + a cached value will be used. + + :return: A tuple containing the parts of the version information or + ``()`` if the version returned from ``svn`` could not be parsed. + :raises: BadCommand: If ``svn`` is not installed. + """ + if self._vcs_version is not None: + # Use cached version, if available. + # If parsing the version failed previously (empty tuple), + # do not attempt to parse it again. + return self._vcs_version + + vcs_version = self.call_vcs_version() + self._vcs_version = vcs_version + return vcs_version + + def get_remote_call_options(self): + # type: () -> CommandArgs + """Return options to be used on calls to Subversion that contact the server. + + These options are applicable for the following ``svn`` subcommands used + in this class. + + - checkout + - export + - switch + - update + + :return: A list of command line arguments to pass to ``svn``. + """ + if not self.use_interactive: + # --non-interactive switch is available since Subversion 0.14.4. + # Subversion < 1.8 runs in interactive mode by default. + return ['--non-interactive'] + + svn_version = self.get_vcs_version() + # By default, Subversion >= 1.8 runs in non-interactive mode if + # stdin is not a TTY. Since that is how pip invokes SVN, in + # call_subprocess(), pip must pass --force-interactive to ensure + # the user can be prompted for a password, if required. + # SVN added the --force-interactive option in SVN 1.8. Since + # e.g. RHEL/CentOS 7, which is supported until 2024, ships with + # SVN 1.7, pip should continue to support SVN 1.7. Therefore, pip + # can't safely add the option if the SVN version is < 1.8 (or unknown). + if svn_version >= (1, 8): + return ['--force-interactive'] + + return [] + + def export(self, location, url): + # type: (str, HiddenText) -> None + """Export the svn repository at the url to the destination location""" + url, rev_options = self.get_url_rev_options(url) + + logger.info('Exporting svn repository %s to %s', url, location) + with indent_log(): + if os.path.exists(location): + # Subversion doesn't like to check out over an existing + # directory --force fixes this, but was only added in svn 1.5 + rmtree(location) + cmd_args = make_command( + 'export', self.get_remote_call_options(), + rev_options.to_args(), url, location, + ) + self.run_command(cmd_args, show_stdout=False) + + def fetch_new(self, dest, url, rev_options): + # type: (str, HiddenText, RevOptions) -> None + rev_display = rev_options.to_display() + logger.info( + 'Checking out %s%s to %s', + url, + rev_display, + display_path(dest), + ) + cmd_args = make_command( + 'checkout', '-q', self.get_remote_call_options(), + rev_options.to_args(), url, dest, + ) + self.run_command(cmd_args) + + def switch(self, dest, url, rev_options): + # type: (str, HiddenText, RevOptions) -> None + cmd_args = make_command( + 'switch', self.get_remote_call_options(), rev_options.to_args(), + url, dest, + ) + self.run_command(cmd_args) + + def update(self, dest, url, rev_options): + # type: (str, HiddenText, RevOptions) -> None + cmd_args = make_command( + 'update', self.get_remote_call_options(), rev_options.to_args(), + dest, + ) + self.run_command(cmd_args) + vcs.register(Subversion) diff --git a/pipenv/patched/notpip/_internal/vcs/versioncontrol.py b/pipenv/patched/notpip/_internal/vcs/versioncontrol.py new file mode 100644 index 0000000000..efe27c12b1 --- /dev/null +++ b/pipenv/patched/notpip/_internal/vcs/versioncontrol.py @@ -0,0 +1,665 @@ +"""Handles all VCS (version control) support""" + +# The following comment should be removed at some point in the future. +# mypy: disallow-untyped-defs=False + +from __future__ import absolute_import + +import errno +import logging +import os +import shutil +import sys + +from pipenv.patched.notpip._vendor import pkg_resources +from pipenv.patched.notpip._vendor.six.moves.urllib import parse as urllib_parse + +from pipenv.patched.notpip._internal.exceptions import BadCommand +from pipenv.patched.notpip._internal.utils.compat import samefile +from pipenv.patched.notpip._internal.utils.misc import ( + ask_path_exists, + backup_dir, + display_path, + hide_url, + hide_value, + rmtree, +) +from pipenv.patched.notpip._internal.utils.subprocess import call_subprocess, make_command +from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING +from pipenv.patched.notpip._internal.utils.urls import get_url_scheme + +if MYPY_CHECK_RUNNING: + from typing import ( + Any, Dict, Iterable, List, Mapping, Optional, Text, Tuple, Type, Union + ) + from pipenv.patched.notpip._internal.utils.ui import SpinnerInterface + from pipenv.patched.notpip._internal.utils.misc import HiddenText + from pipenv.patched.notpip._internal.utils.subprocess import CommandArgs + + AuthInfo = Tuple[Optional[str], Optional[str]] + + +__all__ = ['vcs'] + + +logger = logging.getLogger(__name__) + + +def is_url(name): + # type: (Union[str, Text]) -> bool + """ + Return true if the name looks like a URL. + """ + scheme = get_url_scheme(name) + if scheme is None: + return False + return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes + + +def make_vcs_requirement_url(repo_url, rev, project_name, subdir=None): + """ + Return the URL for a VCS requirement. + + Args: + repo_url: the remote VCS url, with any needed VCS prefix (e.g. "git+"). + project_name: the (unescaped) project name. + """ + egg_project_name = pkg_resources.to_filename(project_name) + req = '{}@{}#egg={}'.format(repo_url, rev, egg_project_name) + if subdir: + req += '&subdirectory={}'.format(subdir) + + return req + + +def find_path_to_setup_from_repo_root(location, repo_root): + """ + Find the path to `setup.py` by searching up the filesystem from `location`. + Return the path to `setup.py` relative to `repo_root`. + Return None if `setup.py` is in `repo_root` or cannot be found. + """ + # find setup.py + orig_location = location + while not os.path.exists(os.path.join(location, 'setup.py')): + last_location = location + location = os.path.dirname(location) + if location == last_location: + # We've traversed up to the root of the filesystem without + # finding setup.py + logger.warning( + "Could not find setup.py for directory %s (tried all " + "parent directories)", + orig_location, + ) + return None + + if samefile(repo_root, location): + return None + + return os.path.relpath(location, repo_root) + + +class RemoteNotFoundError(Exception): + pass + + +class RevOptions(object): + + """ + Encapsulates a VCS-specific revision to install, along with any VCS + install options. + + Instances of this class should be treated as if immutable. + """ + + def __init__( + self, + vc_class, # type: Type[VersionControl] + rev=None, # type: Optional[str] + extra_args=None, # type: Optional[CommandArgs] + ): + # type: (...) -> None + """ + Args: + vc_class: a VersionControl subclass. + rev: the name of the revision to install. + extra_args: a list of extra options. + """ + if extra_args is None: + extra_args = [] + + self.extra_args = extra_args + self.rev = rev + self.vc_class = vc_class + self.branch_name = None # type: Optional[str] + + def __repr__(self): + return '<RevOptions {}: rev={!r}>'.format(self.vc_class.name, self.rev) + + @property + def arg_rev(self): + # type: () -> Optional[str] + if self.rev is None: + return self.vc_class.default_arg_rev + + return self.rev + + def to_args(self): + # type: () -> CommandArgs + """ + Return the VCS-specific command arguments. + """ + args = [] # type: CommandArgs + rev = self.arg_rev + if rev is not None: + args += self.vc_class.get_base_rev_args(rev) + args += self.extra_args + + return args + + def to_display(self): + # type: () -> str + if not self.rev: + return '' + + return ' (to revision {})'.format(self.rev) + + def make_new(self, rev): + # type: (str) -> RevOptions + """ + Make a copy of the current instance, but with a new rev. + + Args: + rev: the name of the revision for the new object. + """ + return self.vc_class.make_rev_options(rev, extra_args=self.extra_args) + + +class VcsSupport(object): + _registry = {} # type: Dict[str, VersionControl] + schemes = ['ssh', 'git', 'hg', 'bzr', 'sftp', 'svn'] + + def __init__(self): + # type: () -> None + # Register more schemes with urlparse for various version control + # systems + urllib_parse.uses_netloc.extend(self.schemes) + # Python >= 2.7.4, 3.3 doesn't have uses_fragment + if getattr(urllib_parse, 'uses_fragment', None): + urllib_parse.uses_fragment.extend(self.schemes) + super(VcsSupport, self).__init__() + + def __iter__(self): + return self._registry.__iter__() + + @property + def backends(self): + # type: () -> List[VersionControl] + return list(self._registry.values()) + + @property + def dirnames(self): + # type: () -> List[str] + return [backend.dirname for backend in self.backends] + + @property + def all_schemes(self): + # type: () -> List[str] + schemes = [] # type: List[str] + for backend in self.backends: + schemes.extend(backend.schemes) + return schemes + + def register(self, cls): + # type: (Type[VersionControl]) -> None + if not hasattr(cls, 'name'): + logger.warning('Cannot register VCS %s', cls.__name__) + return + if cls.name not in self._registry: + self._registry[cls.name] = cls() + logger.debug('Registered VCS backend: %s', cls.name) + + def unregister(self, name): + # type: (str) -> None + if name in self._registry: + del self._registry[name] + + def get_backend_for_dir(self, location): + # type: (str) -> Optional[VersionControl] + """ + Return a VersionControl object if a repository of that type is found + at the given directory. + """ + for vcs_backend in self._registry.values(): + if vcs_backend.controls_location(location): + logger.debug('Determine that %s uses VCS: %s', + location, vcs_backend.name) + return vcs_backend + return None + + def get_backend(self, name): + # type: (str) -> Optional[VersionControl] + """ + Return a VersionControl object or None. + """ + name = name.lower() + return self._registry.get(name) + + +vcs = VcsSupport() + + +class VersionControl(object): + name = '' + dirname = '' + repo_name = '' + # List of supported schemes for this Version Control + schemes = () # type: Tuple[str, ...] + # Iterable of environment variable names to pass to call_subprocess(). + unset_environ = () # type: Tuple[str, ...] + default_arg_rev = None # type: Optional[str] + + @classmethod + def should_add_vcs_url_prefix(cls, remote_url): + """ + Return whether the vcs prefix (e.g. "git+") should be added to a + repository's remote url when used in a requirement. + """ + return not remote_url.lower().startswith('{}:'.format(cls.name)) + + @classmethod + def get_subdirectory(cls, location): + """ + Return the path to setup.py, relative to the repo root. + Return None if setup.py is in the repo root. + """ + return None + + @classmethod + def get_requirement_revision(cls, repo_dir): + """ + Return the revision string that should be used in a requirement. + """ + return cls.get_revision(repo_dir) + + @classmethod + def get_src_requirement(cls, repo_dir, project_name): + """ + Return the requirement string to use to redownload the files + currently at the given repository directory. + + Args: + project_name: the (unescaped) project name. + + The return value has a form similar to the following: + + {repository_url}@{revision}#egg={project_name} + """ + repo_url = cls.get_remote_url(repo_dir) + if repo_url is None: + return None + + if cls.should_add_vcs_url_prefix(repo_url): + repo_url = '{}+{}'.format(cls.name, repo_url) + + revision = cls.get_requirement_revision(repo_dir) + subdir = cls.get_subdirectory(repo_dir) + req = make_vcs_requirement_url(repo_url, revision, project_name, + subdir=subdir) + + return req + + @staticmethod + def get_base_rev_args(rev): + """ + Return the base revision arguments for a vcs command. + + Args: + rev: the name of a revision to install. Cannot be None. + """ + raise NotImplementedError + + @classmethod + def make_rev_options(cls, rev=None, extra_args=None): + # type: (Optional[str], Optional[CommandArgs]) -> RevOptions + """ + Return a RevOptions object. + + Args: + rev: the name of a revision to install. + extra_args: a list of extra options. + """ + return RevOptions(cls, rev, extra_args=extra_args) + + @classmethod + def _is_local_repository(cls, repo): + # type: (str) -> bool + """ + posix absolute paths start with os.path.sep, + win32 ones start with drive (like c:\\folder) + """ + drive, tail = os.path.splitdrive(repo) + return repo.startswith(os.path.sep) or bool(drive) + + def export(self, location, url): + # type: (str, HiddenText) -> None + """ + Export the repository at the url to the destination location + i.e. only download the files, without vcs informations + + :param url: the repository URL starting with a vcs prefix. + """ + raise NotImplementedError + + @classmethod + def get_netloc_and_auth(cls, netloc, scheme): + """ + Parse the repository URL's netloc, and return the new netloc to use + along with auth information. + + Args: + netloc: the original repository URL netloc. + scheme: the repository URL's scheme without the vcs prefix. + + This is mainly for the Subversion class to override, so that auth + information can be provided via the --username and --password options + instead of through the URL. For other subclasses like Git without + such an option, auth information must stay in the URL. + + Returns: (netloc, (username, password)). + """ + return netloc, (None, None) + + @classmethod + def get_url_rev_and_auth(cls, url): + # type: (str) -> Tuple[str, Optional[str], AuthInfo] + """ + Parse the repository URL to use, and return the URL, revision, + and auth info to use. + + Returns: (url, rev, (username, password)). + """ + scheme, netloc, path, query, frag = urllib_parse.urlsplit(url) + if '+' not in scheme: + raise ValueError( + "Sorry, {!r} is a malformed VCS url. " + "The format is <vcs>+<protocol>://<url>, " + "e.g. svn+http://myrepo/svn/MyApp#egg=MyApp".format(url) + ) + # Remove the vcs prefix. + scheme = scheme.split('+', 1)[1] + netloc, user_pass = cls.get_netloc_and_auth(netloc, scheme) + rev = None + if '@' in path: + path, rev = path.rsplit('@', 1) + url = urllib_parse.urlunsplit((scheme, netloc, path, query, '')) + return url, rev, user_pass + + @staticmethod + def make_rev_args(username, password): + # type: (Optional[str], Optional[HiddenText]) -> CommandArgs + """ + Return the RevOptions "extra arguments" to use in obtain(). + """ + return [] + + def get_url_rev_options(self, url): + # type: (HiddenText) -> Tuple[HiddenText, RevOptions] + """ + Return the URL and RevOptions object to use in obtain() and in + some cases export(), as a tuple (url, rev_options). + """ + secret_url, rev, user_pass = self.get_url_rev_and_auth(url.secret) + username, secret_password = user_pass + password = None # type: Optional[HiddenText] + if secret_password is not None: + password = hide_value(secret_password) + extra_args = self.make_rev_args(username, password) + rev_options = self.make_rev_options(rev, extra_args=extra_args) + + return hide_url(secret_url), rev_options + + @staticmethod + def normalize_url(url): + # type: (str) -> str + """ + Normalize a URL for comparison by unquoting it and removing any + trailing slash. + """ + return urllib_parse.unquote(url).rstrip('/') + + @classmethod + def compare_urls(cls, url1, url2): + # type: (str, str) -> bool + """ + Compare two repo URLs for identity, ignoring incidental differences. + """ + return (cls.normalize_url(url1) == cls.normalize_url(url2)) + + def fetch_new(self, dest, url, rev_options): + # type: (str, HiddenText, RevOptions) -> None + """ + Fetch a revision from a repository, in the case that this is the + first fetch from the repository. + + Args: + dest: the directory to fetch the repository to. + rev_options: a RevOptions object. + """ + raise NotImplementedError + + def switch(self, dest, url, rev_options): + # type: (str, HiddenText, RevOptions) -> None + """ + Switch the repo at ``dest`` to point to ``URL``. + + Args: + rev_options: a RevOptions object. + """ + raise NotImplementedError + + def update(self, dest, url, rev_options): + # type: (str, HiddenText, RevOptions) -> None + """ + Update an already-existing repo to the given ``rev_options``. + + Args: + rev_options: a RevOptions object. + """ + raise NotImplementedError + + @classmethod + def is_commit_id_equal(cls, dest, name): + """ + Return whether the id of the current commit equals the given name. + + Args: + dest: the repository directory. + name: a string name. + """ + raise NotImplementedError + + def obtain(self, dest, url): + # type: (str, HiddenText) -> None + """ + Install or update in editable mode the package represented by this + VersionControl object. + + :param dest: the repository directory in which to install or update. + :param url: the repository URL starting with a vcs prefix. + """ + url, rev_options = self.get_url_rev_options(url) + + if not os.path.exists(dest): + self.fetch_new(dest, url, rev_options) + return + + rev_display = rev_options.to_display() + if self.is_repository_directory(dest): + existing_url = self.get_remote_url(dest) + if self.compare_urls(existing_url, url.secret): + logger.debug( + '%s in %s exists, and has correct URL (%s)', + self.repo_name.title(), + display_path(dest), + url, + ) + if not self.is_commit_id_equal(dest, rev_options.rev): + logger.info( + 'Updating %s %s%s', + display_path(dest), + self.repo_name, + rev_display, + ) + self.update(dest, url, rev_options) + else: + logger.info('Skipping because already up-to-date.') + return + + logger.warning( + '%s %s in %s exists with URL %s', + self.name, + self.repo_name, + display_path(dest), + existing_url, + ) + prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ', + ('s', 'i', 'w', 'b')) + else: + logger.warning( + 'Directory %s already exists, and is not a %s %s.', + dest, + self.name, + self.repo_name, + ) + # https://github.com/python/mypy/issues/1174 + prompt = ('(i)gnore, (w)ipe, (b)ackup ', # type: ignore + ('i', 'w', 'b')) + + logger.warning( + 'The plan is to install the %s repository %s', + self.name, + url, + ) + response = ask_path_exists('What to do? %s' % prompt[0], prompt[1]) + + if response == 'a': + sys.exit(-1) + + if response == 'w': + logger.warning('Deleting %s', display_path(dest)) + rmtree(dest) + self.fetch_new(dest, url, rev_options) + return + + if response == 'b': + dest_dir = backup_dir(dest) + logger.warning( + 'Backing up %s to %s', display_path(dest), dest_dir, + ) + shutil.move(dest, dest_dir) + self.fetch_new(dest, url, rev_options) + return + + # Do nothing if the response is "i". + if response == 's': + logger.info( + 'Switching %s %s to %s%s', + self.repo_name, + display_path(dest), + url, + rev_display, + ) + self.switch(dest, url, rev_options) + + def unpack(self, location, url): + # type: (str, HiddenText) -> None + """ + Clean up current location and download the url repository + (and vcs infos) into location + + :param url: the repository URL starting with a vcs prefix. + """ + if os.path.exists(location): + rmtree(location) + self.obtain(location, url=url) + + @classmethod + def get_remote_url(cls, location): + """ + Return the url used at location + + Raises RemoteNotFoundError if the repository does not have a remote + url configured. + """ + raise NotImplementedError + + @classmethod + def get_revision(cls, location): + """ + Return the current commit id of the files at the given location. + """ + raise NotImplementedError + + @classmethod + def run_command( + cls, + cmd, # type: Union[List[str], CommandArgs] + show_stdout=True, # type: bool + cwd=None, # type: Optional[str] + on_returncode='raise', # type: str + extra_ok_returncodes=None, # type: Optional[Iterable[int]] + command_desc=None, # type: Optional[str] + extra_environ=None, # type: Optional[Mapping[str, Any]] + spinner=None, # type: Optional[SpinnerInterface] + log_failed_cmd=True + ): + # type: (...) -> Text + """ + Run a VCS subcommand + This is simply a wrapper around call_subprocess that adds the VCS + command name, and checks that the VCS is available + """ + cmd = make_command(cls.name, *cmd) + try: + return call_subprocess(cmd, show_stdout, cwd, + on_returncode=on_returncode, + extra_ok_returncodes=extra_ok_returncodes, + command_desc=command_desc, + extra_environ=extra_environ, + unset_environ=cls.unset_environ, + spinner=spinner, + log_failed_cmd=log_failed_cmd) + except OSError as e: + # errno.ENOENT = no such file or directory + # In other words, the VCS executable isn't available + if e.errno == errno.ENOENT: + raise BadCommand( + 'Cannot find command %r - do you have ' + '%r installed and in your ' + 'PATH?' % (cls.name, cls.name)) + else: + raise # re-raise exception if a different error occurred + + @classmethod + def is_repository_directory(cls, path): + # type: (str) -> bool + """ + Return whether a directory path is a repository directory. + """ + logger.debug('Checking in %s for %s (%s)...', + path, cls.dirname, cls.name) + return os.path.exists(os.path.join(path, cls.dirname)) + + @classmethod + def controls_location(cls, location): + # type: (str) -> bool + """ + Check if a location is controlled by the vcs. + It is meant to be overridden to implement smarter detection + mechanisms for specific vcs. + + This can do more than is_repository_directory() alone. For example, + the Git override checks that Git is actually available. + """ + return cls.is_repository_directory(location) diff --git a/pipenv/patched/notpip/_internal/wheel.py b/pipenv/patched/notpip/_internal/wheel.py index 06c880e883..d4c155b481 100644 --- a/pipenv/patched/notpip/_internal/wheel.py +++ b/pipenv/patched/notpip/_internal/wheel.py @@ -1,6 +1,11 @@ """ Support for installing and building the "wheel" binary package format. """ + +# The following comment should be removed at some point in the future. +# mypy: strict-optional=False +# mypy: disallow-untyped-defs=False + from __future__ import absolute_import import collections @@ -19,44 +24,51 @@ from pipenv.patched.notpip._vendor import pkg_resources from pipenv.patched.notpip._vendor.distlib.scripts import ScriptMaker +from pipenv.patched.notpip._vendor.distlib.util import get_export_entry from pipenv.patched.notpip._vendor.packaging.utils import canonicalize_name from pipenv.patched.notpip._vendor.six import StringIO from pipenv.patched.notpip._internal import pep425tags -from pipenv.patched.notpip._internal.download import path_to_url, unpack_url from pipenv.patched.notpip._internal.exceptions import ( - InstallationError, InvalidWheelFilename, UnsupportedWheel, -) -from pipenv.patched.notpip._internal.locations import ( - PIP_DELETE_MARKER_FILENAME, distutils_scheme, + InstallationError, + InvalidWheelFilename, + UnsupportedWheel, ) +from pipenv.patched.notpip._internal.locations import distutils_scheme, get_major_minor_version from pipenv.patched.notpip._internal.models.link import Link from pipenv.patched.notpip._internal.utils.logging import indent_log -from pipenv.patched.notpip._internal.utils.misc import ( - call_subprocess, captured_stdout, ensure_dir, read_chunks, +from pipenv.patched.notpip._internal.utils.marker_files import has_delete_marker_file +from pipenv.patched.notpip._internal.utils.misc import captured_stdout, ensure_dir, read_chunks +from pipenv.patched.notpip._internal.utils.setuptools_build import make_setuptools_shim_args +from pipenv.patched.notpip._internal.utils.subprocess import ( + LOG_DIVIDER, + call_subprocess, + format_command_args, + runner_with_spinner_message, ) -from pipenv.patched.notpip._internal.utils.setuptools_build import SETUPTOOLS_SHIM from pipenv.patched.notpip._internal.utils.temp_dir import TempDirectory from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING from pipenv.patched.notpip._internal.utils.ui import open_spinner +from pipenv.patched.notpip._internal.utils.unpacking import unpack_file +from pipenv.patched.notpip._internal.utils.urls import path_to_url if MYPY_CHECK_RUNNING: - from typing import ( # noqa: F401 + from typing import ( Dict, List, Optional, Sequence, Mapping, Tuple, IO, Text, Any, - Union, Iterable + Iterable, Callable, Set, ) - from pipenv.patched.notpip._vendor.packaging.requirements import Requirement # noqa: F401 - from pipenv.patched.notpip._internal.req.req_install import InstallRequirement # noqa: F401 - from pipenv.patched.notpip._internal.download import PipSession # noqa: F401 - from pipenv.patched.notpip._internal.index import FormatControl, PackageFinder # noqa: F401 - from pipenv.patched.notpip._internal.operations.prepare import ( # noqa: F401 + from pipenv.patched.notpip._vendor.packaging.requirements import Requirement + from pipenv.patched.notpip._internal.req.req_install import InstallRequirement + from pipenv.patched.notpip._internal.operations.prepare import ( RequirementPreparer ) - from pipenv.patched.notpip._internal.cache import WheelCache # noqa: F401 - from pipenv.patched.notpip._internal.pep425tags import Pep425Tag # noqa: F401 + from pipenv.patched.notpip._internal.cache import WheelCache + from pipenv.patched.notpip._internal.pep425tags import Pep425Tag InstalledCSVRow = Tuple[str, ...] + BinaryAllowedPredicate = Callable[[InstallRequirement], bool] + VERSION_COMPATIBLE = (1, 0) @@ -68,8 +80,8 @@ def normpath(src, p): return os.path.relpath(src, p).replace(os.path.sep, '/') -def rehash(path, blocksize=1 << 20): - # type: (str, int) -> Tuple[str, str] +def hash_file(path, blocksize=1 << 20): + # type: (str, int) -> Tuple[Any, int] """Return (hash, length) for path using hashlib.sha256()""" h = hashlib.sha256() length = 0 @@ -77,6 +89,13 @@ def rehash(path, blocksize=1 << 20): for block in read_chunks(f, size=blocksize): length += len(block) h.update(block) + return (h, length) # type: ignore + + +def rehash(path, blocksize=1 << 20): + # type: (str, int) -> Tuple[str, str] + """Return (encoded_digest, length) for path using hashlib.sha256()""" + h, length = hash_file(path, blocksize) digest = 'sha256=' + urlsafe_b64encode( h.digest() ).decode('latin1').rstrip('=') @@ -114,7 +133,7 @@ def fix_script(path): firstline = script.readline() if not firstline.startswith(b'#!python'): return False - exename = os.environ.get('PIP_PYTHON_PATH', sys.executable).encode(sys.getfilesystemencoding()) + exename = sys.executable.encode(sys.getfilesystemencoding()) firstline = b'#!' + exename + os.linesep.encode("ascii") rest = script.read() with open(path, 'wb') as script: @@ -188,7 +207,7 @@ def message_about_scripts_not_on_PATH(scripts): return None # Group scripts by the path they were installed in - grouped_by_dir = collections.defaultdict(set) # type: Dict[str, set] + grouped_by_dir = collections.defaultdict(set) # type: Dict[str, Set[str]] for destfile in scripts: parent_dir = os.path.dirname(destfile) script_name = os.path.basename(destfile) @@ -201,24 +220,23 @@ def message_about_scripts_not_on_PATH(scripts): ] # If an executable sits with sys.executable, we don't warn for it. # This covers the case of venv invocations without activating the venv. - executable_loc = os.environ.get("PIP_PYTHON_PATH", sys.executable) - not_warn_dirs.append(os.path.normcase(os.path.dirname(executable_loc))) + not_warn_dirs.append(os.path.normcase(os.path.dirname(sys.executable))) warn_for = { parent_dir: scripts for parent_dir, scripts in grouped_by_dir.items() if os.path.normcase(parent_dir) not in not_warn_dirs - } + } # type: Dict[str, Set[str]] if not warn_for: return None # Format a message msg_lines = [] - for parent_dir, scripts in warn_for.items(): - scripts = sorted(scripts) - if len(scripts) == 1: - start_text = "script {} is".format(scripts[0]) + for parent_dir, dir_scripts in warn_for.items(): + sorted_scripts = sorted(dir_scripts) # type: List[str] + if len(sorted_scripts) == 1: + start_text = "script {} is".format(sorted_scripts[0]) else: start_text = "scripts {} are".format( - ", ".join(scripts[:-1]) + " and " + scripts[-1] + ", ".join(sorted_scripts[:-1]) + " and " + sorted_scripts[-1] ) msg_lines.append( @@ -296,6 +314,22 @@ def get_csv_rows_for_installed( return installed_rows +class MissingCallableSuffix(Exception): + pass + + +def _raise_for_invalid_entrypoint(specification): + entry = get_export_entry(specification) + if entry is not None and entry.suffix is None: + raise MissingCallableSuffix(str(entry)) + + +class PipScriptMaker(ScriptMaker): + def make(self, specification, options=None): + _raise_for_invalid_entrypoint(specification) + return super(PipScriptMaker, self).make(specification, options) + + def move_wheel_files( name, # type: str req, # type: Requirement @@ -458,7 +492,7 @@ def is_entrypoint_wrapper(name): dest = scheme[subdir] clobber(source, dest, False, fixer=fixer, filter=filter) - maker = ScriptMaker(None, scheme['scripts']) + maker = PipScriptMaker(None, scheme['scripts']) # Ensure old scripts are overwritten. # See https://github.com/pypa/pip/issues/1800 @@ -474,36 +508,7 @@ def is_entrypoint_wrapper(name): # See https://bitbucket.org/pypa/distlib/issue/32/ maker.set_mode = True - # Simplify the script and fix the fact that the default script swallows - # every single stack trace. - # See https://bitbucket.org/pypa/distlib/issue/34/ - # See https://bitbucket.org/pypa/distlib/issue/33/ - def _get_script_text(entry): - if entry.suffix is None: - raise InstallationError( - "Invalid script entry point: %s for req: %s - A callable " - "suffix is required. Cf https://packaging.python.org/en/" - "latest/distributing.html#console-scripts for more " - "information." % (entry, req) - ) - return maker.script_template % { - "module": entry.prefix, - "import_name": entry.suffix.split(".")[0], - "func": entry.suffix, - } - # ignore type, because mypy disallows assigning to a method, - # see https://github.com/python/mypy/issues/2427 - maker._get_script_text = _get_script_text # type: ignore - maker.script_template = r"""# -*- coding: utf-8 -*- -import re -import sys - -from %(module)s import %(import_name)s - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(%(func)s()) -""" + scripts_to_generate = [] # Special case pip and setuptools to generate versioned wrappers # @@ -541,15 +546,16 @@ def _get_script_text(entry): pip_script = console.pop('pip', None) if pip_script: if "ENSUREPIP_OPTIONS" not in os.environ: - spec = 'pip = ' + pip_script - generated.extend(maker.make(spec)) + scripts_to_generate.append('pip = ' + pip_script) if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall": - spec = 'pip%s = %s' % (sys.version[:1], pip_script) - generated.extend(maker.make(spec)) + scripts_to_generate.append( + 'pip%s = %s' % (sys.version_info[0], pip_script) + ) - spec = 'pip%s = %s' % (sys.version[:3], pip_script) - generated.extend(maker.make(spec)) + scripts_to_generate.append( + 'pip%s = %s' % (get_major_minor_version(), pip_script) + ) # Delete any other versioned pip entry points pip_ep = [k for k in console if re.match(r'pip(\d(\.\d)?)?$', k)] for k in pip_ep: @@ -557,11 +563,15 @@ def _get_script_text(entry): easy_install_script = console.pop('easy_install', None) if easy_install_script: if "ENSUREPIP_OPTIONS" not in os.environ: - spec = 'easy_install = ' + easy_install_script - generated.extend(maker.make(spec)) + scripts_to_generate.append( + 'easy_install = ' + easy_install_script + ) - spec = 'easy_install-%s = %s' % (sys.version[:3], easy_install_script) - generated.extend(maker.make(spec)) + scripts_to_generate.append( + 'easy_install-%s = %s' % ( + get_major_minor_version(), easy_install_script + ) + ) # Delete any other versioned easy_install entry points easy_install_ep = [ k for k in console if re.match(r'easy_install(-\d\.\d)?$', k) @@ -570,25 +580,37 @@ def _get_script_text(entry): del console[k] # Generate the console and GUI entry points specified in the wheel - if len(console) > 0: - generated_console_scripts = maker.make_multiple( - ['%s = %s' % kv for kv in console.items()] - ) - generated.extend(generated_console_scripts) + scripts_to_generate.extend( + '%s = %s' % kv for kv in console.items() + ) + + gui_scripts_to_generate = [ + '%s = %s' % kv for kv in gui.items() + ] - if warn_script_location: - msg = message_about_scripts_not_on_PATH(generated_console_scripts) - if msg is not None: - logger.warning(msg) + generated_console_scripts = [] # type: List[str] + + try: + generated_console_scripts = maker.make_multiple(scripts_to_generate) + generated.extend(generated_console_scripts) - if len(gui) > 0: generated.extend( - maker.make_multiple( - ['%s = %s' % kv for kv in gui.items()], - {'gui': True} - ) + maker.make_multiple(gui_scripts_to_generate, {'gui': True}) + ) + except MissingCallableSuffix as e: + entry = e.args[0] + raise InstallationError( + "Invalid script entry point: {} for req: {} - A callable " + "suffix is required. Cf https://packaging.python.org/en/" + "latest/distributing.html#console-scripts for more " + "information.".format(entry, req) ) + if warn_script_location: + msg = message_about_scripts_not_on_PATH(generated_console_scripts) + if msg is not None: + logger.warning(msg) + # Record pip as the installer installer = os.path.join(info_dir[0], 'INSTALLER') temp_installer = os.path.join(info_dir[0], 'INSTALLER.pip') @@ -664,6 +686,16 @@ def check_compatibility(version, name): ) +def format_tag(file_tag): + # type: (Tuple[str, ...]) -> str + """ + Format three tags in the form "<python_tag>-<abi_tag>-<platform_tag>". + + :param file_tag: A 3-tuple of tags (python_tag, abi_tag, platform_tag). + """ + return '-'.join(file_tag) + + class Wheel(object): """A wheel file""" @@ -703,25 +735,38 @@ def __init__(self, filename): for y in self.abis for z in self.plats } - def support_index_min(self, tags=None): - # type: (Optional[List[Pep425Tag]]) -> Optional[int] + def get_formatted_file_tags(self): + # type: () -> List[str] + """ + Return the wheel's tags as a sorted list of strings. + """ + return sorted(format_tag(tag) for tag in self.file_tags) + + def support_index_min(self, tags): + # type: (List[Pep425Tag]) -> int """ Return the lowest index that one of the wheel's file_tag combinations - achieves in the supported_tags list e.g. if there are 8 supported tags, - and one of the file tags is first in the list, then return 0. Returns - None is the wheel is not supported. + achieves in the given list of supported tags. + + For example, if there are 8 supported tags and one of the file tags + is first in the list, then return 0. + + :param tags: the PEP 425 tags to check the wheel against, in order + with most preferred first. + + :raises ValueError: If none of the wheel's file tags match one of + the supported tags. """ - if tags is None: # for mock - tags = pep425tags.get_supported() - indexes = [tags.index(c) for c in self.file_tags if c in tags] - return min(indexes) if indexes else None + return min(tags.index(tag) for tag in self.file_tags if tag in tags) - def supported(self, tags=None): - # type: (Optional[List[Pep425Tag]]) -> bool - """Is this wheel supported on this system?""" - if tags is None: # for mock - tags = pep425tags.get_supported() - return bool(set(tags).intersection(self.file_tags)) + def supported(self, tags): + # type: (List[Pep425Tag]) -> bool + """ + Return whether the wheel is compatible with one of the given tags. + + :param tags: the PEP 425 tags to check the wheel against. + """ + return not self.file_tags.isdisjoint(tags) def _contains_egg_info( @@ -735,9 +780,9 @@ def _contains_egg_info( def should_use_ephemeral_cache( req, # type: InstallRequirement - format_control, # type: FormatControl - autobuilding, # type: bool - cache_available # type: bool + should_unpack, # type: bool + cache_available, # type: bool + check_binary_allowed, # type: BinaryAllowedPredicate ): # type: (...) -> Optional[bool] """ @@ -745,37 +790,41 @@ def should_use_ephemeral_cache( ephemeral cache. :param cache_available: whether a cache directory is available for the - autobuilding=True case. + should_unpack=True case. :return: True or False to build the requirement with ephem_cache=True or False, respectively; or None not to build the requirement. """ if req.constraint: + # never build requirements that are merely constraints return None if req.is_wheel: - if not autobuilding: + if not should_unpack: logger.info( 'Skipping %s, due to already being wheel.', req.name, ) return None - if not autobuilding: + if not should_unpack: + # i.e. pip wheel, not pip install; + # return False, knowing that the caller will never cache + # in this case anyway, so this return merely means "build it". + # TODO improve this behavior return False if req.editable or not req.source_dir: return None - if req.link and not req.link.is_artifact: - # VCS checkout. Build wheel just for this run. - return True - - if "binary" not in format_control.get_allowed_formats( - canonicalize_name(req.name)): + if not check_binary_allowed(req): logger.info( - "Skipping bdist_wheel for %s, due to binaries " + "Skipping wheel build for %s, due to binaries " "being disabled for it.", req.name, ) return None + if req.link and req.link.is_vcs: + # VCS checkout. Build wheel just for this run. + return True + link = req.link base, ext = link.splitext() if cache_available and _contains_egg_info(base): @@ -787,7 +836,7 @@ def should_use_ephemeral_cache( return True -def format_command( +def format_command_result( command_args, # type: List[str] command_output, # type: str ): @@ -795,7 +844,8 @@ def format_command( """ Format command information for logging. """ - text = 'Command arguments: {}\n'.format(command_args) + command_desc = format_command_args(command_args) + text = 'Command arguments: {}\n'.format(command_desc) if not command_output: text += 'Command output: None' @@ -804,10 +854,7 @@ def format_command( else: if not command_output.endswith('\n'): command_output += '\n' - text += ( - 'Command output:\n{}' - '-----------------------------------------' - ).format(command_output) + text += 'Command output:\n{}{}'.format(command_output, LOG_DIVIDER) return text @@ -829,7 +876,7 @@ def get_legacy_build_wheel_path( msg = ( 'Legacy build of wheel for {!r} created no files.\n' ).format(req.name) - msg += format_command(command_args, command_output) + msg += format_command_result(command_args, command_output) logger.warning(msg) return None @@ -838,26 +885,33 @@ def get_legacy_build_wheel_path( 'Legacy build of wheel for {!r} created more than one file.\n' 'Filenames (choosing first): {}\n' ).format(req.name, names) - msg += format_command(command_args, command_output) + msg += format_command_result(command_args, command_output) logger.warning(msg) return os.path.join(temp_dir, names[0]) +def _always_true(_): + return True + + class WheelBuilder(object): """Build wheels from a RequirementSet.""" def __init__( self, - finder, # type: PackageFinder preparer, # type: RequirementPreparer wheel_cache, # type: WheelCache build_options=None, # type: Optional[List[str]] global_options=None, # type: Optional[List[str]] + check_binary_allowed=None, # type: Optional[BinaryAllowedPredicate] no_clean=False # type: bool ): # type: (...) -> None - self.finder = finder + if check_binary_allowed is None: + # Binaries allowed by default. + check_binary_allowed = _always_true + self.preparer = preparer self.wheel_cache = wheel_cache @@ -865,6 +919,7 @@ def __init__( self.build_options = build_options or [] self.global_options = global_options or [] + self.check_binary_allowed = check_binary_allowed self.no_clean = no_clean def _build_one(self, req, output_dir, python_tag=None): @@ -888,7 +943,12 @@ def _build_one_inside_env(self, req, output_dir, python_tag=None): wheel_name = os.path.basename(wheel_path) dest_path = os.path.join(output_dir, wheel_name) try: + wheel_hash, length = hash_file(wheel_path) shutil.move(wheel_path, dest_path) + logger.info('Created wheel for %s: ' + 'filename=%s size=%d sha256=%s', + req.name, wheel_name, length, + wheel_hash.hexdigest()) logger.info('Stored in directory: %s', output_dir) return dest_path except Exception: @@ -902,11 +962,11 @@ def _base_setup_args(self, req): # isolating. Currently, it breaks Python in virtualenvs, because it # relies on site.py to find parts of the standard library outside the # virtualenv. - executable_loc = os.environ.get('PIP_PYTHON_PATH', sys.executable) - return [ - executable_loc, '-u', '-c', - SETUPTOOLS_SHIM % req.setup_py - ] + list(self.global_options) + return make_setuptools_shim_args( + req.setup_py_path, + global_options=self.global_options, + unbuffered_output=True + ) def _build_one_pep517(self, req, tempd, python_tag=None): """Build one InstallRequirement using the PEP 517 build process. @@ -914,13 +974,23 @@ def _build_one_pep517(self, req, tempd, python_tag=None): Returns path to wheel if successfully built. Otherwise, returns None. """ assert req.metadata_directory is not None + if self.build_options: + # PEP 517 does not support --build-options + logger.error('Cannot build wheel for %s using PEP 517 when ' + '--build-options is present' % (req.name,)) + return None try: - req.spin_message = 'Building wheel for %s (PEP 517)' % (req.name,) logger.debug('Destination directory: %s', tempd) - wheel_name = req.pep517_backend.build_wheel( - tempd, - metadata_directory=req.metadata_directory + + runner = runner_with_spinner_message( + 'Building wheel for {} (PEP 517)'.format(req.name) ) + backend = req.pep517_backend + with backend.subprocess_runner(runner): + wheel_name = backend.build_wheel( + tempd, + metadata_directory=req.metadata_directory, + ) if python_tag: # General PEP 517 backends don't necessarily support # a "--python-tag" option, so we rename the wheel @@ -954,12 +1024,16 @@ def _build_one_legacy(self, req, tempd, python_tag=None): wheel_args += ["--python-tag", python_tag] try: - output = call_subprocess(wheel_args, cwd=req.setup_py_dir, - show_stdout=False, spinner=spinner) + output = call_subprocess( + wheel_args, + cwd=req.unpacked_source_directory, + spinner=spinner, + ) except Exception: spinner.finish("error") logger.error('Failed building wheel for %s', req.name) return None + names = os.listdir(tempd) wheel_path = get_legacy_build_wheel_path( names=names, @@ -976,7 +1050,7 @@ def _clean_one(self, req): logger.info('Running setup.py clean for %s', req.name) clean_args = base_args + ['clean', '--all'] try: - call_subprocess(clean_args, cwd=req.source_dir, show_stdout=False) + call_subprocess(clean_args, cwd=req.source_dir) return True except Exception: logger.error('Failed cleaning build dir for %s', req.name) @@ -985,41 +1059,54 @@ def _clean_one(self, req): def build( self, requirements, # type: Iterable[InstallRequirement] - session, # type: PipSession - autobuilding=False # type: bool + should_unpack=False # type: bool ): # type: (...) -> List[InstallRequirement] """Build wheels. - :param unpack: If True, replace the sdist we built from with the - newly built wheel, in preparation for installation. + :param should_unpack: If True, after building the wheel, unpack it + and replace the sdist with the unpacked version in preparation + for installation. :return: True if all the wheels built correctly. """ + # pip install uses should_unpack=True. + # pip install never provides a _wheel_dir. + # pip wheel uses should_unpack=False. + # pip wheel always provides a _wheel_dir (via the preparer). + assert ( + (should_unpack and not self._wheel_dir) or + (not should_unpack and self._wheel_dir) + ) + buildset = [] - format_control = self.finder.format_control - # Whether a cache directory is available for autobuilding=True. - cache_available = bool(self._wheel_dir or self.wheel_cache.cache_dir) + cache_available = bool(self.wheel_cache.cache_dir) for req in requirements: ephem_cache = should_use_ephemeral_cache( - req, format_control=format_control, autobuilding=autobuilding, + req, + should_unpack=should_unpack, cache_available=cache_available, + check_binary_allowed=self.check_binary_allowed, ) if ephem_cache is None: continue - buildset.append((req, ephem_cache)) + # Determine where the wheel should go. + if should_unpack: + if ephem_cache: + output_dir = self.wheel_cache.get_ephem_path_for_link( + req.link + ) + else: + output_dir = self.wheel_cache.get_path_for_link(req.link) + else: + output_dir = self._wheel_dir + + buildset.append((req, output_dir)) if not buildset: return [] - # Is any wheel build not using the ephemeral cache? - if any(not ephem_cache for _, ephem_cache in buildset): - have_directory_for_build = self._wheel_dir or ( - autobuilding and self.wheel_cache.cache_dir - ) - assert have_directory_for_build - # TODO by @pradyunsg # Should break up this method into 2 separate methods. @@ -1028,57 +1115,54 @@ def build( 'Building wheels for collected packages: %s', ', '.join([req.name for (req, _) in buildset]), ) - _cache = self.wheel_cache # shorter name + + python_tag = None + if should_unpack: + python_tag = pep425tags.implementation_tag + with indent_log(): build_success, build_failure = [], [] - for req, ephem in buildset: - python_tag = None - if autobuilding: - python_tag = pep425tags.implementation_tag - if ephem: - output_dir = _cache.get_ephem_path_for_link(req.link) - else: - output_dir = _cache.get_path_for_link(req.link) - try: - ensure_dir(output_dir) - except OSError as e: - logger.warning("Building wheel for %s failed: %s", - req.name, e) - build_failure.append(req) - continue - else: - output_dir = self._wheel_dir + for req, output_dir in buildset: + try: + ensure_dir(output_dir) + except OSError as e: + logger.warning( + "Building wheel for %s failed: %s", + req.name, e, + ) + build_failure.append(req) + continue + wheel_file = self._build_one( req, output_dir, python_tag=python_tag, ) if wheel_file: build_success.append(req) - if autobuilding: + if should_unpack: # XXX: This is mildly duplicative with prepare_files, # but not close enough to pull out to a single common # method. # The code below assumes temporary source dirs - # prevent it doing bad things. - if req.source_dir and not os.path.exists(os.path.join( - req.source_dir, PIP_DELETE_MARKER_FILENAME)): + if ( + req.source_dir and + not has_delete_marker_file(req.source_dir) + ): raise AssertionError( "bad source dir - missing marker") # Delete the source we built the wheel from req.remove_temporary_source() # set the build directory again - name is known from # the work prepare_files did. - req.source_dir = req.build_location( + req.source_dir = req.ensure_build_location( self.preparer.build_dir ) # Update the link for this. req.link = Link(path_to_url(wheel_file)) assert req.link.is_wheel # extract the wheel into the dir - unpack_url( - req.link, req.source_dir, None, False, - session=session, - ) + unpack_file(req.link.file_path, req.source_dir) else: build_failure.append(req) diff --git a/pipenv/patched/notpip/_vendor/__init__.py b/pipenv/patched/notpip/_vendor/__init__.py index 1256c03971..3b61408b9c 100644 --- a/pipenv/patched/notpip/_vendor/__init__.py +++ b/pipenv/patched/notpip/_vendor/__init__.py @@ -30,24 +30,21 @@ def vendored(modulename): vendored_name = "{0}.{1}".format(__name__, modulename) try: - __import__(vendored_name, globals(), locals(), level=0) + __import__(modulename, globals(), locals(), level=0) except ImportError: - try: - __import__(modulename, globals(), locals(), level=0) - except ImportError: - # We can just silently allow import failures to pass here. If we - # got to this point it means that ``import pipenv.patched.notpip._vendor.whatever`` - # failed and so did ``import whatever``. Since we're importing this - # upfront in an attempt to alias imports, not erroring here will - # just mean we get a regular import error whenever pip *actually* - # tries to import one of these modules to use it, which actually - # gives us a better error message than we would have otherwise - # gotten. - pass - else: - sys.modules[vendored_name] = sys.modules[modulename] - base, head = vendored_name.rsplit(".", 1) - setattr(sys.modules[base], head, sys.modules[modulename]) + # We can just silently allow import failures to pass here. If we + # got to this point it means that ``import pipenv.patched.notpip._vendor.whatever`` + # failed and so did ``import whatever``. Since we're importing this + # upfront in an attempt to alias imports, not erroring here will + # just mean we get a regular import error whenever pip *actually* + # tries to import one of these modules to use it, which actually + # gives us a better error message than we would have otherwise + # gotten. + pass + else: + sys.modules[vendored_name] = sys.modules[modulename] + base, head = vendored_name.rsplit(".", 1) + setattr(sys.modules[base], head, sys.modules[modulename]) # If we're operating in a debundled setup, then we want to go ahead and trigger @@ -63,10 +60,10 @@ def vendored(modulename): # Actually alias all of our vendored dependencies. vendored("cachecontrol") vendored("colorama") + vendored("contextlib2") vendored("distlib") vendored("distro") vendored("html5lib") - vendored("lockfile") vendored("six") vendored("six.moves") vendored("six.moves.urllib") @@ -80,6 +77,7 @@ def vendored(modulename): vendored("pytoml") vendored("retrying") vendored("requests") + vendored("requests.exceptions") vendored("requests.packages") vendored("requests.packages.urllib3") vendored("requests.packages.urllib3._collections") diff --git a/pipenv/patched/notpip/_vendor/cachecontrol/caches/file_cache.py b/pipenv/patched/notpip/_vendor/cachecontrol/caches/file_cache.py index 06f7d09ebb..607b945242 100644 --- a/pipenv/patched/notpip/_vendor/cachecontrol/caches/file_cache.py +++ b/pipenv/patched/notpip/_vendor/cachecontrol/caches/file_cache.py @@ -69,8 +69,8 @@ def __init__( raise ValueError("Cannot use use_dir_lock and lock_class together") try: - from pipenv.patched.notpip._vendor.lockfile import LockFile - from pipenv.patched.notpip._vendor.lockfile.mkdirlockfile import MkdirLockFile + from lockfile import LockFile + from lockfile.mkdirlockfile import MkdirLockFile except ImportError: notice = dedent( """ diff --git a/pipenv/patched/notpip/_vendor/certifi/__init__.py b/pipenv/patched/notpip/_vendor/certifi/__init__.py index ef71f3af34..8e358e4c8f 100644 --- a/pipenv/patched/notpip/_vendor/certifi/__init__.py +++ b/pipenv/patched/notpip/_vendor/certifi/__init__.py @@ -1,3 +1,3 @@ from .core import where -__version__ = "2018.11.29" +__version__ = "2019.09.11" diff --git a/pipenv/patched/notpip/_vendor/certifi/cacert.pem b/pipenv/patched/notpip/_vendor/certifi/cacert.pem index db68797e24..70fa91f618 100644 --- a/pipenv/patched/notpip/_vendor/certifi/cacert.pem +++ b/pipenv/patched/notpip/_vendor/certifi/cacert.pem @@ -771,36 +771,6 @@ vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep +OkuE6N36B9K -----END CERTIFICATE----- -# Issuer: CN=Class 2 Primary CA O=Certplus -# Subject: CN=Class 2 Primary CA O=Certplus -# Label: "Certplus Class 2 Primary CA" -# Serial: 177770208045934040241468760488327595043 -# MD5 Fingerprint: 88:2c:8c:52:b8:a2:3c:f3:f7:bb:03:ea:ae:ac:42:0b -# SHA1 Fingerprint: 74:20:74:41:72:9c:dd:92:ec:79:31:d8:23:10:8d:c2:81:92:e2:bb -# SHA256 Fingerprint: 0f:99:3c:8a:ef:97:ba:af:56:87:14:0e:d5:9a:d1:82:1b:b4:af:ac:f0:aa:9a:58:b5:d5:7a:33:8a:3a:fb:cb ------BEGIN CERTIFICATE----- -MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAw -PTELMAkGA1UEBhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFz -cyAyIFByaW1hcnkgQ0EwHhcNOTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9 -MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2VydHBsdXMxGzAZBgNVBAMTEkNsYXNz -IDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANxQ -ltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR5aiR -VhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyL -kcAbmXuZVg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCd -EgETjdyAYveVqUSISnFOYFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yas -H7WLO7dDWWuwJKZtkIvEcupdM5i3y95ee++U8Rs+yskhwcWYAqqi9lt3m/V+llU0 -HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRMECDAGAQH/AgEKMAsGA1Ud -DwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJYIZIAYb4 -QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMu -Y29tL0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/ -AN9WM2K191EBkOvDP9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8 -yfFC82x/xXp8HVGIutIKPidd3i1RTtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMR -FcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+7UCmnYR0ObncHoUW2ikbhiMA -ybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW//1IMwrh3KWB -kJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7 -l7+ijrRU ------END CERTIFICATE----- - # Issuer: CN=DST Root CA X3 O=Digital Signature Trust Co. # Subject: CN=DST Root CA X3 O=Digital Signature Trust Co. # Label: "DST Root CA X3" @@ -1219,36 +1189,6 @@ t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== -----END CERTIFICATE----- -# Issuer: CN=Deutsche Telekom Root CA 2 O=Deutsche Telekom AG OU=T-TeleSec Trust Center -# Subject: CN=Deutsche Telekom Root CA 2 O=Deutsche Telekom AG OU=T-TeleSec Trust Center -# Label: "Deutsche Telekom Root CA 2" -# Serial: 38 -# MD5 Fingerprint: 74:01:4a:91:b1:08:c4:58:ce:47:cd:f0:dd:11:53:08 -# SHA1 Fingerprint: 85:a4:08:c0:9c:19:3e:5d:51:58:7d:cd:d6:13:30:fd:8c:de:37:bf -# SHA256 Fingerprint: b6:19:1a:50:d0:c3:97:7f:7d:a9:9b:cd:aa:c8:6a:22:7d:ae:b9:67:9e:c7:0b:a3:b0:c9:d9:22:71:c1:70:d3 ------BEGIN CERTIFICATE----- -MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEc -MBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2Vj -IFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENB -IDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5MjM1OTAwWjBxMQswCQYDVQQGEwJE -RTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxl -U2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290 -IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEU -ha88EOQ5bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhC -QN/Po7qCWWqSG6wcmtoIKyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1Mjwr -rFDa1sPeg5TKqAyZMg4ISFZbavva4VhYAUlfckE8FQYBjl2tqriTtM2e66foai1S -NNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aKSe5TBY8ZTNXeWHmb0moc -QqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTVjlsB9WoH -txa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAP -BgNVHRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOC -AQEAlGRZrTlk5ynrE/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756Abrsp -tJh6sTtU6zkXR34ajgv8HzFZMQSyzhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpa -IzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8rZ7/gFnkm0W09juwzTkZmDLl -6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4Gdyd1Lx+4ivn+ -xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU -Cm26OWMohpLzGITY+9HPBVZkVw== ------END CERTIFICATE----- - # Issuer: CN=Cybertrust Global Root O=Cybertrust, Inc # Subject: CN=Cybertrust Global Root O=Cybertrust, Inc # Label: "Cybertrust Global Root" @@ -3453,46 +3393,6 @@ AAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3CekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ 5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su -----END CERTIFICATE----- -# Issuer: CN=Certinomis - Root CA O=Certinomis OU=0002 433998903 -# Subject: CN=Certinomis - Root CA O=Certinomis OU=0002 433998903 -# Label: "Certinomis - Root CA" -# Serial: 1 -# MD5 Fingerprint: 14:0a:fd:8d:a8:28:b5:38:69:db:56:7e:61:22:03:3f -# SHA1 Fingerprint: 9d:70:bb:01:a5:a4:a0:18:11:2e:f7:1c:01:b9:32:c5:34:e7:88:a8 -# SHA256 Fingerprint: 2a:99:f5:bc:11:74:b7:3c:bb:1d:62:08:84:e0:1c:34:e5:1c:cb:39:78:da:12:5f:0e:33:26:88:83:bf:41:58 ------BEGIN CERTIFICATE----- -MIIFkjCCA3qgAwIBAgIBATANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJGUjET -MBEGA1UEChMKQ2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxHTAb -BgNVBAMTFENlcnRpbm9taXMgLSBSb290IENBMB4XDTEzMTAyMTA5MTcxOFoXDTMz -MTAyMTA5MTcxOFowWjELMAkGA1UEBhMCRlIxEzARBgNVBAoTCkNlcnRpbm9taXMx -FzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMR0wGwYDVQQDExRDZXJ0aW5vbWlzIC0g -Um9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANTMCQosP5L2 -fxSeC5yaah1AMGT9qt8OHgZbn1CF6s2Nq0Nn3rD6foCWnoR4kkjW4znuzuRZWJfl -LieY6pOod5tK8O90gC3rMB+12ceAnGInkYjwSond3IjmFPnVAy//ldu9n+ws+hQV -WZUKxkd8aRi5pwP5ynapz8dvtF4F/u7BUrJ1Mofs7SlmO/NKFoL21prbcpjp3vDF -TKWrteoB4owuZH9kb/2jJZOLyKIOSY008B/sWEUuNKqEUL3nskoTuLAPrjhdsKkb -5nPJWqHZZkCqqU2mNAKthH6yI8H7KsZn9DS2sJVqM09xRLWtwHkziOC/7aOgFLSc -CbAK42C++PhmiM1b8XcF4LVzbsF9Ri6OSyemzTUK/eVNfaoqoynHWmgE6OXWk6Ri -wsXm9E/G+Z8ajYJJGYrKWUM66A0ywfRMEwNvbqY/kXPLynNvEiCL7sCCeN5LLsJJ -wx3tFvYk9CcbXFcx3FXuqB5vbKziRcxXV4p1VxngtViZSTYxPDMBbRZKzbgqg4SG -m/lg0h9tkQPTYKbVPZrdd5A9NaSfD171UkRpucC63M9933zZxKyGIjK8e2uR73r4 -F2iw4lNVYC2vPsKD2NkJK/DAZNuHi5HMkesE/Xa0lZrmFAYb1TQdvtj/dBxThZng -WVJKYe2InmtJiUZ+IFrZ50rlau7SZRFDAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIB -BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTvkUz1pcMw6C8I6tNxIqSSaHh0 -2TAfBgNVHSMEGDAWgBTvkUz1pcMw6C8I6tNxIqSSaHh02TANBgkqhkiG9w0BAQsF -AAOCAgEAfj1U2iJdGlg+O1QnurrMyOMaauo++RLrVl89UM7g6kgmJs95Vn6RHJk/ -0KGRHCwPT5iVWVO90CLYiF2cN/z7ZMF4jIuaYAnq1fohX9B0ZedQxb8uuQsLrbWw -F6YSjNRieOpWauwK0kDDPAUwPk2Ut59KA9N9J0u2/kTO+hkzGm2kQtHdzMjI1xZS -g081lLMSVX3l4kLr5JyTCcBMWwerx20RoFAXlCOotQqSD7J6wWAsOMwaplv/8gzj -qh8c3LigkyfeY+N/IZ865Z764BNqdeuWXGKRlI5nU7aJ+BIJy29SWwNyhlCVCNSN -h4YVH5Uk2KRvms6knZtt0rJ2BobGVgjF6wnaNsIbW0G+YSrjcOa4pvi2WsS9Iff/ -ql+hbHY5ZtbqTFXhADObE5hjyW/QASAJN1LnDE8+zbz1X5YnpyACleAu6AdBBR8V -btaw5BngDwKTACdyxYvRVB9dSsNAl35VpnzBMwQUAR1JIGkLGZOdblgi90AMRgwj -Y/M50n92Uaf0yKHxDHYiI0ZSKS3io0EHVmmY0gUJvGnHWmHNj4FgFU2A3ZDifcRQ -8ow7bkrHxuaAKzyBvBGAFhAn1/DNP3nMcyrDflOR1m749fPH0FFNjkulW+YZFzvW -gQncItzujrnEj1PhZ7szuIgVRs/taTX/dQ1G885x4cVrhkIGuUE= ------END CERTIFICATE----- - # Issuer: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed # Subject: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed # Label: "OISTE WISeKey Global Root GB CA" @@ -4510,3 +4410,149 @@ Nwf9JtmYhST/WSMDmu2dnajkXjjO11INb9I/bbEFa0nOipFGc/T2L/Coc3cOZayh jWZSaX5LaAzHHjcng6WMxwLkFM1JAbBzs/3GkDpv0mztO+7skb6iQ12LAEpmJURw 3kAP+HwV96LOPNdeE4yBFxgX0b3xdxA61GU5wSesVywlVP+i2k+KYTlerj1KjL0= -----END CERTIFICATE----- + +# Issuer: CN=emSign Root CA - G1 O=eMudhra Technologies Limited OU=emSign PKI +# Subject: CN=emSign Root CA - G1 O=eMudhra Technologies Limited OU=emSign PKI +# Label: "emSign Root CA - G1" +# Serial: 235931866688319308814040 +# MD5 Fingerprint: 9c:42:84:57:dd:cb:0b:a7:2e:95:ad:b6:f3:da:bc:ac +# SHA1 Fingerprint: 8a:c7:ad:8f:73:ac:4e:c1:b5:75:4d:a5:40:f4:fc:cf:7c:b5:8e:8c +# SHA256 Fingerprint: 40:f6:af:03:46:a9:9a:a1:cd:1d:55:5a:4e:9c:ce:62:c7:f9:63:46:03:ee:40:66:15:83:3d:c8:c8:d0:03:67 +-----BEGIN CERTIFICATE----- +MIIDlDCCAnygAwIBAgIKMfXkYgxsWO3W2DANBgkqhkiG9w0BAQsFADBnMQswCQYD +VQQGEwJJTjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBU +ZWNobm9sb2dpZXMgTGltaXRlZDEcMBoGA1UEAxMTZW1TaWduIFJvb3QgQ0EgLSBH +MTAeFw0xODAyMTgxODMwMDBaFw00MzAyMTgxODMwMDBaMGcxCzAJBgNVBAYTAklO +MRMwEQYDVQQLEwplbVNpZ24gUEtJMSUwIwYDVQQKExxlTXVkaHJhIFRlY2hub2xv +Z2llcyBMaW1pdGVkMRwwGgYDVQQDExNlbVNpZ24gUm9vdCBDQSAtIEcxMIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAk0u76WaK7p1b1TST0Bsew+eeuGQz +f2N4aLTNLnF115sgxk0pvLZoYIr3IZpWNVrzdr3YzZr/k1ZLpVkGoZM0Kd0WNHVO +8oG0x5ZOrRkVUkr+PHB1cM2vK6sVmjM8qrOLqs1D/fXqcP/tzxE7lM5OMhbTI0Aq +d7OvPAEsbO2ZLIvZTmmYsvePQbAyeGHWDV/D+qJAkh1cF+ZwPjXnorfCYuKrpDhM +tTk1b+oDafo6VGiFbdbyL0NVHpENDtjVaqSW0RM8LHhQ6DqS0hdW5TUaQBw+jSzt +Od9C4INBdN+jzcKGYEho42kLVACL5HZpIQ15TjQIXhTCzLG3rdd8cIrHhQIDAQAB +o0IwQDAdBgNVHQ4EFgQU++8Nhp6w492pufEhF38+/PB3KxowDgYDVR0PAQH/BAQD +AgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAFn/8oz1h31x +PaOfG1vR2vjTnGs2vZupYeveFix0PZ7mddrXuqe8QhfnPZHr5X3dPpzxz5KsbEjM +wiI/aTvFthUvozXGaCocV685743QNcMYDHsAVhzNixl03r4PEuDQqqE/AjSxcM6d +GNYIAwlG7mDgfrbESQRRfXBgvKqy/3lyeqYdPV8q+Mri/Tm3R7nrft8EI6/6nAYH +6ftjk4BAtcZsCjEozgyfz7MjNYBBjWzEN3uBL4ChQEKF6dk4jeihU80Bv2noWgby +RQuQ+q7hv53yrlc8pa6yVvSLZUDp/TGBLPQ5Cdjua6e0ph0VpZj3AYHYhX3zUVxx +iN66zB+Afko= +-----END CERTIFICATE----- + +# Issuer: CN=emSign ECC Root CA - G3 O=eMudhra Technologies Limited OU=emSign PKI +# Subject: CN=emSign ECC Root CA - G3 O=eMudhra Technologies Limited OU=emSign PKI +# Label: "emSign ECC Root CA - G3" +# Serial: 287880440101571086945156 +# MD5 Fingerprint: ce:0b:72:d1:9f:88:8e:d0:50:03:e8:e3:b8:8b:67:40 +# SHA1 Fingerprint: 30:43:fa:4f:f2:57:dc:a0:c3:80:ee:2e:58:ea:78:b2:3f:e6:bb:c1 +# SHA256 Fingerprint: 86:a1:ec:ba:08:9c:4a:8d:3b:be:27:34:c6:12:ba:34:1d:81:3e:04:3c:f9:e8:a8:62:cd:5c:57:a3:6b:be:6b +-----BEGIN CERTIFICATE----- +MIICTjCCAdOgAwIBAgIKPPYHqWhwDtqLhDAKBggqhkjOPQQDAzBrMQswCQYDVQQG +EwJJTjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBUZWNo +bm9sb2dpZXMgTGltaXRlZDEgMB4GA1UEAxMXZW1TaWduIEVDQyBSb290IENBIC0g +RzMwHhcNMTgwMjE4MTgzMDAwWhcNNDMwMjE4MTgzMDAwWjBrMQswCQYDVQQGEwJJ +TjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBUZWNobm9s +b2dpZXMgTGltaXRlZDEgMB4GA1UEAxMXZW1TaWduIEVDQyBSb290IENBIC0gRzMw +djAQBgcqhkjOPQIBBgUrgQQAIgNiAAQjpQy4LRL1KPOxst3iAhKAnjlfSU2fySU0 +WXTsuwYc58Byr+iuL+FBVIcUqEqy6HyC5ltqtdyzdc6LBtCGI79G1Y4PPwT01xyS +fvalY8L1X44uT6EYGQIrMgqCZH0Wk9GjQjBAMB0GA1UdDgQWBBR8XQKEE9TMipuB +zhccLikenEhjQjAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggq +hkjOPQQDAwNpADBmAjEAvvNhzwIQHWSVB7gYboiFBS+DCBeQyh+KTOgNG3qxrdWB +CUfvO6wIBHxcmbHtRwfSAjEAnbpV/KlK6O3t5nYBQnvI+GDZjVGLVTv7jHvrZQnD ++JbNR6iC8hZVdyR+EhCVBCyj +-----END CERTIFICATE----- + +# Issuer: CN=emSign Root CA - C1 O=eMudhra Inc OU=emSign PKI +# Subject: CN=emSign Root CA - C1 O=eMudhra Inc OU=emSign PKI +# Label: "emSign Root CA - C1" +# Serial: 825510296613316004955058 +# MD5 Fingerprint: d8:e3:5d:01:21:fa:78:5a:b0:df:ba:d2:ee:2a:5f:68 +# SHA1 Fingerprint: e7:2e:f1:df:fc:b2:09:28:cf:5d:d4:d5:67:37:b1:51:cb:86:4f:01 +# SHA256 Fingerprint: 12:56:09:aa:30:1d:a0:a2:49:b9:7a:82:39:cb:6a:34:21:6f:44:dc:ac:9f:39:54:b1:42:92:f2:e8:c8:60:8f +-----BEGIN CERTIFICATE----- +MIIDczCCAlugAwIBAgILAK7PALrEzzL4Q7IwDQYJKoZIhvcNAQELBQAwVjELMAkG +A1UEBhMCVVMxEzARBgNVBAsTCmVtU2lnbiBQS0kxFDASBgNVBAoTC2VNdWRocmEg +SW5jMRwwGgYDVQQDExNlbVNpZ24gUm9vdCBDQSAtIEMxMB4XDTE4MDIxODE4MzAw +MFoXDTQzMDIxODE4MzAwMFowVjELMAkGA1UEBhMCVVMxEzARBgNVBAsTCmVtU2ln +biBQS0kxFDASBgNVBAoTC2VNdWRocmEgSW5jMRwwGgYDVQQDExNlbVNpZ24gUm9v +dCBDQSAtIEMxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAz+upufGZ +BczYKCFK83M0UYRWEPWgTywS4/oTmifQz/l5GnRfHXk5/Fv4cI7gklL35CX5VIPZ +HdPIWoU/Xse2B+4+wM6ar6xWQio5JXDWv7V7Nq2s9nPczdcdioOl+yuQFTdrHCZH +3DspVpNqs8FqOp099cGXOFgFixwR4+S0uF2FHYP+eF8LRWgYSKVGczQ7/g/IdrvH +GPMF0Ybzhe3nudkyrVWIzqa2kbBPrH4VI5b2P/AgNBbeCsbEBEV5f6f9vtKppa+c +xSMq9zwhbL2vj07FOrLzNBL834AaSaTUqZX3noleoomslMuoaJuvimUnzYnu3Yy1 +aylwQ6BpC+S5DwIDAQABo0IwQDAdBgNVHQ4EFgQU/qHgcB4qAzlSWkK+XJGFehiq +TbUwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL +BQADggEBAMJKVvoVIXsoounlHfv4LcQ5lkFMOycsxGwYFYDGrK9HWS8mC+M2sO87 +/kOXSTKZEhVb3xEp/6tT+LvBeA+snFOvV71ojD1pM/CjoCNjO2RnIkSt1XHLVip4 +kqNPEjE2NuLe/gDEo2APJ62gsIq1NnpSob0n9CAnYuhNlCQT5AoE6TyrLshDCUrG +YQTlSTR+08TI9Q/Aqum6VF7zYytPT1DU/rl7mYw9wC68AivTxEDkigcxHpvOJpkT ++xHqmiIMERnHXhuBUDDIlhJu58tBf5E7oke3VIAb3ADMmpDqw8NQBmIMMMAVSKeo +WXzhriKi4gp6D/piq1JM4fHfyr6DDUI= +-----END CERTIFICATE----- + +# Issuer: CN=emSign ECC Root CA - C3 O=eMudhra Inc OU=emSign PKI +# Subject: CN=emSign ECC Root CA - C3 O=eMudhra Inc OU=emSign PKI +# Label: "emSign ECC Root CA - C3" +# Serial: 582948710642506000014504 +# MD5 Fingerprint: 3e:53:b3:a3:81:ee:d7:10:f8:d3:b0:1d:17:92:f5:d5 +# SHA1 Fingerprint: b6:af:43:c2:9b:81:53:7d:f6:ef:6b:c3:1f:1f:60:15:0c:ee:48:66 +# SHA256 Fingerprint: bc:4d:80:9b:15:18:9d:78:db:3e:1d:8c:f4:f9:72:6a:79:5d:a1:64:3c:a5:f1:35:8e:1d:db:0e:dc:0d:7e:b3 +-----BEGIN CERTIFICATE----- +MIICKzCCAbGgAwIBAgIKe3G2gla4EnycqDAKBggqhkjOPQQDAzBaMQswCQYDVQQG +EwJVUzETMBEGA1UECxMKZW1TaWduIFBLSTEUMBIGA1UEChMLZU11ZGhyYSBJbmMx +IDAeBgNVBAMTF2VtU2lnbiBFQ0MgUm9vdCBDQSAtIEMzMB4XDTE4MDIxODE4MzAw +MFoXDTQzMDIxODE4MzAwMFowWjELMAkGA1UEBhMCVVMxEzARBgNVBAsTCmVtU2ln +biBQS0kxFDASBgNVBAoTC2VNdWRocmEgSW5jMSAwHgYDVQQDExdlbVNpZ24gRUND +IFJvb3QgQ0EgLSBDMzB2MBAGByqGSM49AgEGBSuBBAAiA2IABP2lYa57JhAd6bci +MK4G9IGzsUJxlTm801Ljr6/58pc1kjZGDoeVjbk5Wum739D+yAdBPLtVb4Ojavti +sIGJAnB9SMVK4+kiVCJNk7tCDK93nCOmfddhEc5lx/h//vXyqaNCMEAwHQYDVR0O +BBYEFPtaSNCAIEDyqOkAB2kZd6fmw/TPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB +Af8EBTADAQH/MAoGCCqGSM49BAMDA2gAMGUCMQC02C8Cif22TGK6Q04ThHK1rt0c +3ta13FaPWEBaLd4gTCKDypOofu4SQMfWh0/434UCMBwUZOR8loMRnLDRWmFLpg9J +0wD8ofzkpf9/rdcw0Md3f76BB1UwUCAU9Vc4CqgxUQ== +-----END CERTIFICATE----- + +# Issuer: CN=Hongkong Post Root CA 3 O=Hongkong Post +# Subject: CN=Hongkong Post Root CA 3 O=Hongkong Post +# Label: "Hongkong Post Root CA 3" +# Serial: 46170865288971385588281144162979347873371282084 +# MD5 Fingerprint: 11:fc:9f:bd:73:30:02:8a:fd:3f:f3:58:b9:cb:20:f0 +# SHA1 Fingerprint: 58:a2:d0:ec:20:52:81:5b:c1:f3:f8:64:02:24:4e:c2:8e:02:4b:02 +# SHA256 Fingerprint: 5a:2f:c0:3f:0c:83:b0:90:bb:fa:40:60:4b:09:88:44:6c:76:36:18:3d:f9:84:6e:17:10:1a:44:7f:b8:ef:d6 +-----BEGIN CERTIFICATE----- +MIIFzzCCA7egAwIBAgIUCBZfikyl7ADJk0DfxMauI7gcWqQwDQYJKoZIhvcNAQEL +BQAwbzELMAkGA1UEBhMCSEsxEjAQBgNVBAgTCUhvbmcgS29uZzESMBAGA1UEBxMJ +SG9uZyBLb25nMRYwFAYDVQQKEw1Ib25na29uZyBQb3N0MSAwHgYDVQQDExdIb25n +a29uZyBQb3N0IFJvb3QgQ0EgMzAeFw0xNzA2MDMwMjI5NDZaFw00MjA2MDMwMjI5 +NDZaMG8xCzAJBgNVBAYTAkhLMRIwEAYDVQQIEwlIb25nIEtvbmcxEjAQBgNVBAcT +CUhvbmcgS29uZzEWMBQGA1UEChMNSG9uZ2tvbmcgUG9zdDEgMB4GA1UEAxMXSG9u +Z2tvbmcgUG9zdCBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCziNfqzg8gTr7m1gNt7ln8wlffKWihgw4+aMdoWJwcYEuJQwy51BWy7sFO +dem1p+/l6TWZ5Mwc50tfjTMwIDNT2aa71T4Tjukfh0mtUC1Qyhi+AViiE3CWu4mI +VoBc+L0sPOFMV4i707mV78vH9toxdCim5lSJ9UExyuUmGs2C4HDaOym71QP1mbpV +9WTRYA6ziUm4ii8F0oRFKHyPaFASePwLtVPLwpgchKOesL4jpNrcyCse2m5FHomY +2vkALgbpDDtw1VAliJnLzXNg99X/NWfFobxeq81KuEXryGgeDQ0URhLj0mRiikKY +vLTGCAj4/ahMZJx2Ab0vqWwzD9g/KLg8aQFChn5pwckGyuV6RmXpwtZQQS4/t+Tt +bNe/JgERohYpSms0BpDsE9K2+2p20jzt8NYt3eEV7KObLyzJPivkaTv/ciWxNoZb +x39ri1UbSsUgYT2uy1DhCDq+sI9jQVMwCFk8mB13umOResoQUGC/8Ne8lYePl8X+ +l2oBlKN8W4UdKjk60FSh0Tlxnf0h+bV78OLgAo9uliQlLKAeLKjEiafv7ZkGL7YK +TE/bosw3Gq9HhS2KX8Q0NEwA/RiTZxPRN+ZItIsGxVd7GYYKecsAyVKvQv83j+Gj +Hno9UKtjBucVtT+2RTeUN7F+8kjDf8V1/peNRY8apxpyKBpADwIDAQABo2MwYTAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBQXnc0e +i9Y5K3DTXNSguB+wAPzFYTAdBgNVHQ4EFgQUF53NHovWOStw01zUoLgfsAD8xWEw +DQYJKoZIhvcNAQELBQADggIBAFbVe27mIgHSQpsY1Q7XZiNc4/6gx5LS6ZStS6LG +7BJ8dNVI0lkUmcDrudHr9EgwW62nV3OZqdPlt9EuWSRY3GguLmLYauRwCy0gUCCk +MpXRAJi70/33MvJJrsZ64Ee+bs7Lo3I6LWldy8joRTnU+kLBEUx3XZL7av9YROXr +gZ6voJmtvqkBZss4HTzfQx/0TW60uhdG/H39h4F5ag0zD/ov+BS5gLNdTaqX4fnk +GMX41TiMJjz98iji7lpJiCzfeT2OnpA8vUFKOt1b9pq0zj8lMH8yfaIDlNDceqFS +3m6TjRgm/VWsvY+b0s+v54Ysyx8Jb6NvqYTUc79NoXQbTiNg8swOqn+knEwlqLJm +Ozj/2ZQw9nKEvmhVEA/GcywWaZMH/rFF7buiVWqw2rVKAiUnhde3t4ZEFolsgCs+ +l6mc1X5VTMbeRRAc6uk7nwNT7u56AQIWeNTowr5GdogTPyK7SBIdUgC0An4hGh6c +JfTzPV4e0hz5sy229zdcxsshTrD3mUcYhcErulWuBurQB7Lcq9CClnXO0lD+mefP +L5/ndtFhKvshuzHQqp9HpLIiyhY6UFfEW0NnxWViA0kB60PZ2Pierc+xYw5F9KBa +LJstxabArahH9CdMOA0uG0k7UvToiIMrVCjU8jVStDKDYmlkDJGcn5fqdBb9HxEG +mpv0 +-----END CERTIFICATE----- diff --git a/pipenv/patched/notpip/_vendor/certifi/core.py b/pipenv/patched/notpip/_vendor/certifi/core.py index 2d02ea44c4..7271acf40e 100644 --- a/pipenv/patched/notpip/_vendor/certifi/core.py +++ b/pipenv/patched/notpip/_vendor/certifi/core.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # -*- coding: utf-8 -*- """ @@ -14,7 +13,3 @@ def where(): f = os.path.dirname(__file__) return os.path.join(f, 'cacert.pem') - - -if __name__ == '__main__': - print(where()) diff --git a/pipenv/patched/notpip/_vendor/contextlib2.LICENSE.txt b/pipenv/patched/notpip/_vendor/contextlib2.LICENSE.txt new file mode 100644 index 0000000000..5de20277df --- /dev/null +++ b/pipenv/patched/notpip/_vendor/contextlib2.LICENSE.txt @@ -0,0 +1,122 @@ + + +A. HISTORY OF THE SOFTWARE +========================== + +contextlib2 is a derivative of the contextlib module distributed by the PSF +as part of the Python standard library. According, it is itself redistributed +under the PSF license (reproduced in full below). As the contextlib module +was added only in Python 2.5, the licenses for earlier Python versions are +not applicable and have not been included. + +Python was created in the early 1990s by Guido van Rossum at Stichting +Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands +as a successor of a language called ABC. Guido remains Python's +principal author, although it includes many contributions from others. + +In 1995, Guido continued his work on Python at the Corporation for +National Research Initiatives (CNRI, see http://www.cnri.reston.va.us) +in Reston, Virginia where he released several versions of the +software. + +In May 2000, Guido and the Python core development team moved to +BeOpen.com to form the BeOpen PythonLabs team. In October of the same +year, the PythonLabs team moved to Digital Creations (now Zope +Corporation, see http://www.zope.com). In 2001, the Python Software +Foundation (PSF, see http://www.python.org/psf/) was formed, a +non-profit organization created specifically to own Python-related +Intellectual Property. Zope Corporation is a sponsoring member of +the PSF. + +All Python releases are Open Source (see http://www.opensource.org for +the Open Source Definition). Historically, most, but not all, Python +releases have also been GPL-compatible; the table below summarizes +the various releases that included the contextlib module. + + Release Derived Year Owner GPL- + from compatible? (1) + + 2.5 2.4 2006 PSF yes + 2.5.1 2.5 2007 PSF yes + 2.5.2 2.5.1 2008 PSF yes + 2.5.3 2.5.2 2008 PSF yes + 2.6 2.5 2008 PSF yes + 2.6.1 2.6 2008 PSF yes + 2.6.2 2.6.1 2009 PSF yes + 2.6.3 2.6.2 2009 PSF yes + 2.6.4 2.6.3 2009 PSF yes + 2.6.5 2.6.4 2010 PSF yes + 3.0 2.6 2008 PSF yes + 3.0.1 3.0 2009 PSF yes + 3.1 3.0.1 2009 PSF yes + 3.1.1 3.1 2009 PSF yes + 3.1.2 3.1.1 2010 PSF yes + 3.1.3 3.1.2 2010 PSF yes + 3.1.4 3.1.3 2011 PSF yes + 3.2 3.1 2011 PSF yes + 3.2.1 3.2 2011 PSF yes + 3.2.2 3.2.1 2011 PSF yes + 3.3 3.2 2012 PSF yes + +Footnotes: + +(1) GPL-compatible doesn't mean that we're distributing Python under + the GPL. All Python licenses, unlike the GPL, let you distribute + a modified version without making your changes open source. The + GPL-compatible licenses make it possible to combine Python with + other software that is released under the GPL; the others don't. + +Thanks to the many outside volunteers who have worked under Guido's +direction to make these releases possible. + + +B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON +=============================================================== + +PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +-------------------------------------------- + +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby +grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +analyze, test, perform and/or display publicly, prepare derivative works, +distribute, and otherwise use Python alone or in any derivative version, +provided, however, that PSF's License Agreement and PSF's notice of copyright, +i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +2011 Python Software Foundation; All Rights Reserved" are retained in Python +alone or in any derivative version prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. diff --git a/pipenv/patched/notpip/_vendor/contextlib2.py b/pipenv/patched/notpip/_vendor/contextlib2.py new file mode 100644 index 0000000000..3aae8f4117 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/contextlib2.py @@ -0,0 +1,518 @@ +"""contextlib2 - backports and enhancements to the contextlib module""" + +import abc +import sys +import warnings +from collections import deque +from functools import wraps + +__all__ = ["contextmanager", "closing", "nullcontext", + "AbstractContextManager", + "ContextDecorator", "ExitStack", + "redirect_stdout", "redirect_stderr", "suppress"] + +# Backwards compatibility +__all__ += ["ContextStack"] + + +# Backport abc.ABC +if sys.version_info[:2] >= (3, 4): + _abc_ABC = abc.ABC +else: + _abc_ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()}) + + +# Backport classic class MRO +def _classic_mro(C, result): + if C in result: + return + result.append(C) + for B in C.__bases__: + _classic_mro(B, result) + return result + + +# Backport _collections_abc._check_methods +def _check_methods(C, *methods): + try: + mro = C.__mro__ + except AttributeError: + mro = tuple(_classic_mro(C, [])) + + for method in methods: + for B in mro: + if method in B.__dict__: + if B.__dict__[method] is None: + return NotImplemented + break + else: + return NotImplemented + return True + + +class AbstractContextManager(_abc_ABC): + """An abstract base class for context managers.""" + + def __enter__(self): + """Return `self` upon entering the runtime context.""" + return self + + @abc.abstractmethod + def __exit__(self, exc_type, exc_value, traceback): + """Raise any exception triggered within the runtime context.""" + return None + + @classmethod + def __subclasshook__(cls, C): + """Check whether subclass is considered a subclass of this ABC.""" + if cls is AbstractContextManager: + return _check_methods(C, "__enter__", "__exit__") + return NotImplemented + + +class ContextDecorator(object): + """A base class or mixin that enables context managers to work as decorators.""" + + def refresh_cm(self): + """Returns the context manager used to actually wrap the call to the + decorated function. + + The default implementation just returns *self*. + + Overriding this method allows otherwise one-shot context managers + like _GeneratorContextManager to support use as decorators via + implicit recreation. + + DEPRECATED: refresh_cm was never added to the standard library's + ContextDecorator API + """ + warnings.warn("refresh_cm was never added to the standard library", + DeprecationWarning) + return self._recreate_cm() + + def _recreate_cm(self): + """Return a recreated instance of self. + + Allows an otherwise one-shot context manager like + _GeneratorContextManager to support use as + a decorator via implicit recreation. + + This is a private interface just for _GeneratorContextManager. + See issue #11647 for details. + """ + return self + + def __call__(self, func): + @wraps(func) + def inner(*args, **kwds): + with self._recreate_cm(): + return func(*args, **kwds) + return inner + + +class _GeneratorContextManager(ContextDecorator): + """Helper for @contextmanager decorator.""" + + def __init__(self, func, args, kwds): + self.gen = func(*args, **kwds) + self.func, self.args, self.kwds = func, args, kwds + # Issue 19330: ensure context manager instances have good docstrings + doc = getattr(func, "__doc__", None) + if doc is None: + doc = type(self).__doc__ + self.__doc__ = doc + # Unfortunately, this still doesn't provide good help output when + # inspecting the created context manager instances, since pydoc + # currently bypasses the instance docstring and shows the docstring + # for the class instead. + # See http://bugs.python.org/issue19404 for more details. + + def _recreate_cm(self): + # _GCM instances are one-shot context managers, so the + # CM must be recreated each time a decorated function is + # called + return self.__class__(self.func, self.args, self.kwds) + + def __enter__(self): + try: + return next(self.gen) + except StopIteration: + raise RuntimeError("generator didn't yield") + + def __exit__(self, type, value, traceback): + if type is None: + try: + next(self.gen) + except StopIteration: + return + else: + raise RuntimeError("generator didn't stop") + else: + if value is None: + # Need to force instantiation so we can reliably + # tell if we get the same exception back + value = type() + try: + self.gen.throw(type, value, traceback) + raise RuntimeError("generator didn't stop after throw()") + except StopIteration as exc: + # Suppress StopIteration *unless* it's the same exception that + # was passed to throw(). This prevents a StopIteration + # raised inside the "with" statement from being suppressed. + return exc is not value + except RuntimeError as exc: + # Don't re-raise the passed in exception + if exc is value: + return False + # Likewise, avoid suppressing if a StopIteration exception + # was passed to throw() and later wrapped into a RuntimeError + # (see PEP 479). + if _HAVE_EXCEPTION_CHAINING and exc.__cause__ is value: + return False + raise + except: + # only re-raise if it's *not* the exception that was + # passed to throw(), because __exit__() must not raise + # an exception unless __exit__() itself failed. But throw() + # has to raise the exception to signal propagation, so this + # fixes the impedance mismatch between the throw() protocol + # and the __exit__() protocol. + # + if sys.exc_info()[1] is not value: + raise + + +def contextmanager(func): + """@contextmanager decorator. + + Typical usage: + + @contextmanager + def some_generator(<arguments>): + <setup> + try: + yield <value> + finally: + <cleanup> + + This makes this: + + with some_generator(<arguments>) as <variable>: + <body> + + equivalent to this: + + <setup> + try: + <variable> = <value> + <body> + finally: + <cleanup> + + """ + @wraps(func) + def helper(*args, **kwds): + return _GeneratorContextManager(func, args, kwds) + return helper + + +class closing(object): + """Context to automatically close something at the end of a block. + + Code like this: + + with closing(<module>.open(<arguments>)) as f: + <block> + + is equivalent to this: + + f = <module>.open(<arguments>) + try: + <block> + finally: + f.close() + + """ + def __init__(self, thing): + self.thing = thing + + def __enter__(self): + return self.thing + + def __exit__(self, *exc_info): + self.thing.close() + + +class _RedirectStream(object): + + _stream = None + + def __init__(self, new_target): + self._new_target = new_target + # We use a list of old targets to make this CM re-entrant + self._old_targets = [] + + def __enter__(self): + self._old_targets.append(getattr(sys, self._stream)) + setattr(sys, self._stream, self._new_target) + return self._new_target + + def __exit__(self, exctype, excinst, exctb): + setattr(sys, self._stream, self._old_targets.pop()) + + +class redirect_stdout(_RedirectStream): + """Context manager for temporarily redirecting stdout to another file. + + # How to send help() to stderr + with redirect_stdout(sys.stderr): + help(dir) + + # How to write help() to a file + with open('help.txt', 'w') as f: + with redirect_stdout(f): + help(pow) + """ + + _stream = "stdout" + + +class redirect_stderr(_RedirectStream): + """Context manager for temporarily redirecting stderr to another file.""" + + _stream = "stderr" + + +class suppress(object): + """Context manager to suppress specified exceptions + + After the exception is suppressed, execution proceeds with the next + statement following the with statement. + + with suppress(FileNotFoundError): + os.remove(somefile) + # Execution still resumes here if the file was already removed + """ + + def __init__(self, *exceptions): + self._exceptions = exceptions + + def __enter__(self): + pass + + def __exit__(self, exctype, excinst, exctb): + # Unlike isinstance and issubclass, CPython exception handling + # currently only looks at the concrete type hierarchy (ignoring + # the instance and subclass checking hooks). While Guido considers + # that a bug rather than a feature, it's a fairly hard one to fix + # due to various internal implementation details. suppress provides + # the simpler issubclass based semantics, rather than trying to + # exactly reproduce the limitations of the CPython interpreter. + # + # See http://bugs.python.org/issue12029 for more details + return exctype is not None and issubclass(exctype, self._exceptions) + + +# Context manipulation is Python 3 only +_HAVE_EXCEPTION_CHAINING = sys.version_info[0] >= 3 +if _HAVE_EXCEPTION_CHAINING: + def _make_context_fixer(frame_exc): + def _fix_exception_context(new_exc, old_exc): + # Context may not be correct, so find the end of the chain + while 1: + exc_context = new_exc.__context__ + if exc_context is old_exc: + # Context is already set correctly (see issue 20317) + return + if exc_context is None or exc_context is frame_exc: + break + new_exc = exc_context + # Change the end of the chain to point to the exception + # we expect it to reference + new_exc.__context__ = old_exc + return _fix_exception_context + + def _reraise_with_existing_context(exc_details): + try: + # bare "raise exc_details[1]" replaces our carefully + # set-up context + fixed_ctx = exc_details[1].__context__ + raise exc_details[1] + except BaseException: + exc_details[1].__context__ = fixed_ctx + raise +else: + # No exception context in Python 2 + def _make_context_fixer(frame_exc): + return lambda new_exc, old_exc: None + + # Use 3 argument raise in Python 2, + # but use exec to avoid SyntaxError in Python 3 + def _reraise_with_existing_context(exc_details): + exc_type, exc_value, exc_tb = exc_details + exec("raise exc_type, exc_value, exc_tb") + +# Handle old-style classes if they exist +try: + from types import InstanceType +except ImportError: + # Python 3 doesn't have old-style classes + _get_type = type +else: + # Need to handle old-style context managers on Python 2 + def _get_type(obj): + obj_type = type(obj) + if obj_type is InstanceType: + return obj.__class__ # Old-style class + return obj_type # New-style class + + +# Inspired by discussions on http://bugs.python.org/issue13585 +class ExitStack(object): + """Context manager for dynamic management of a stack of exit callbacks + + For example: + + with ExitStack() as stack: + files = [stack.enter_context(open(fname)) for fname in filenames] + # All opened files will automatically be closed at the end of + # the with statement, even if attempts to open files later + # in the list raise an exception + + """ + def __init__(self): + self._exit_callbacks = deque() + + def pop_all(self): + """Preserve the context stack by transferring it to a new instance""" + new_stack = type(self)() + new_stack._exit_callbacks = self._exit_callbacks + self._exit_callbacks = deque() + return new_stack + + def _push_cm_exit(self, cm, cm_exit): + """Helper to correctly register callbacks to __exit__ methods""" + def _exit_wrapper(*exc_details): + return cm_exit(cm, *exc_details) + _exit_wrapper.__self__ = cm + self.push(_exit_wrapper) + + def push(self, exit): + """Registers a callback with the standard __exit__ method signature + + Can suppress exceptions the same way __exit__ methods can. + + Also accepts any object with an __exit__ method (registering a call + to the method instead of the object itself) + """ + # We use an unbound method rather than a bound method to follow + # the standard lookup behaviour for special methods + _cb_type = _get_type(exit) + try: + exit_method = _cb_type.__exit__ + except AttributeError: + # Not a context manager, so assume its a callable + self._exit_callbacks.append(exit) + else: + self._push_cm_exit(exit, exit_method) + return exit # Allow use as a decorator + + def callback(self, callback, *args, **kwds): + """Registers an arbitrary callback and arguments. + + Cannot suppress exceptions. + """ + def _exit_wrapper(exc_type, exc, tb): + callback(*args, **kwds) + # We changed the signature, so using @wraps is not appropriate, but + # setting __wrapped__ may still help with introspection + _exit_wrapper.__wrapped__ = callback + self.push(_exit_wrapper) + return callback # Allow use as a decorator + + def enter_context(self, cm): + """Enters the supplied context manager + + If successful, also pushes its __exit__ method as a callback and + returns the result of the __enter__ method. + """ + # We look up the special methods on the type to match the with statement + _cm_type = _get_type(cm) + _exit = _cm_type.__exit__ + result = _cm_type.__enter__(cm) + self._push_cm_exit(cm, _exit) + return result + + def close(self): + """Immediately unwind the context stack""" + self.__exit__(None, None, None) + + def __enter__(self): + return self + + def __exit__(self, *exc_details): + received_exc = exc_details[0] is not None + + # We manipulate the exception state so it behaves as though + # we were actually nesting multiple with statements + frame_exc = sys.exc_info()[1] + _fix_exception_context = _make_context_fixer(frame_exc) + + # Callbacks are invoked in LIFO order to match the behaviour of + # nested context managers + suppressed_exc = False + pending_raise = False + while self._exit_callbacks: + cb = self._exit_callbacks.pop() + try: + if cb(*exc_details): + suppressed_exc = True + pending_raise = False + exc_details = (None, None, None) + except: + new_exc_details = sys.exc_info() + # simulate the stack of exceptions by setting the context + _fix_exception_context(new_exc_details[1], exc_details[1]) + pending_raise = True + exc_details = new_exc_details + if pending_raise: + _reraise_with_existing_context(exc_details) + return received_exc and suppressed_exc + + +# Preserve backwards compatibility +class ContextStack(ExitStack): + """Backwards compatibility alias for ExitStack""" + + def __init__(self): + warnings.warn("ContextStack has been renamed to ExitStack", + DeprecationWarning) + super(ContextStack, self).__init__() + + def register_exit(self, callback): + return self.push(callback) + + def register(self, callback, *args, **kwds): + return self.callback(callback, *args, **kwds) + + def preserve(self): + return self.pop_all() + + +class nullcontext(AbstractContextManager): + """Context manager that does no additional processing. + Used as a stand-in for a normal context manager, when a particular + block of code is only sometimes used with a normal context manager: + cm = optional_cm if condition else nullcontext() + with cm: + # Perform operation, using optional_cm if condition is True + """ + + def __init__(self, enter_result=None): + self.enter_result = enter_result + + def __enter__(self): + return self.enter_result + + def __exit__(self, *excinfo): + pass diff --git a/pipenv/patched/notpip/_vendor/distlib/__init__.py b/pipenv/patched/notpip/_vendor/distlib/__init__.py index a786b4d3b7..a2d70d475a 100644 --- a/pipenv/patched/notpip/_vendor/distlib/__init__.py +++ b/pipenv/patched/notpip/_vendor/distlib/__init__.py @@ -1,12 +1,12 @@ # -*- coding: utf-8 -*- # -# Copyright (C) 2012-2017 Vinay Sajip. +# Copyright (C) 2012-2019 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # import logging -__version__ = '0.2.8' +__version__ = '0.2.9.post0' class DistlibException(Exception): pass diff --git a/pipenv/patched/notpip/_vendor/distlib/metadata.py b/pipenv/patched/notpip/_vendor/distlib/metadata.py index 77eed7f968..2d61378e99 100644 --- a/pipenv/patched/notpip/_vendor/distlib/metadata.py +++ b/pipenv/patched/notpip/_vendor/distlib/metadata.py @@ -91,9 +91,11 @@ class MetadataInvalidError(DistlibException): _426_MARKERS = ('Private-Version', 'Provides-Extra', 'Obsoleted-By', 'Setup-Requires-Dist', 'Extension') -# See issue #106: Sometimes 'Requires' occurs wrongly in the metadata. Include -# it in the tuple literal below to allow it (for now) -_566_FIELDS = _426_FIELDS + ('Description-Content-Type', 'Requires') +# See issue #106: Sometimes 'Requires' and 'Provides' occur wrongly in +# the metadata. Include them in the tuple literal below to allow them +# (for now). +_566_FIELDS = _426_FIELDS + ('Description-Content-Type', + 'Requires', 'Provides') _566_MARKERS = ('Description-Content-Type',) diff --git a/pipenv/patched/notpip/_vendor/distlib/scripts.py b/pipenv/patched/notpip/_vendor/distlib/scripts.py index 8e22cb9163..5965e241d6 100644 --- a/pipenv/patched/notpip/_vendor/distlib/scripts.py +++ b/pipenv/patched/notpip/_vendor/distlib/scripts.py @@ -39,27 +39,12 @@ # check if Python is called on the first line with this expression FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$') SCRIPT_TEMPLATE = r'''# -*- coding: utf-8 -*- +import re +import sys +from %(module)s import %(import_name)s if __name__ == '__main__': - import sys, re - - def _resolve(module, func): - __import__(module) - mod = sys.modules[module] - parts = func.split('.') - result = getattr(mod, parts.pop(0)) - for p in parts: - result = getattr(result, p) - return result - - try: - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - - func = _resolve('%(module)s', '%(func)s') - rc = func() # None interpreted as 0 - except Exception as e: # only supporting Python >= 2.6 - sys.stderr.write('%%s\n' %% e) - rc = 1 - sys.exit(rc) + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(%(func)s()) ''' @@ -225,6 +210,7 @@ def _get_shebang(self, encoding, post_interp=b'', options=None): def _get_script_text(self, entry): return self.script_template % dict(module=entry.prefix, + import_name=entry.suffix.split('.')[0], func=entry.suffix) manifest = _DEFAULT_MANIFEST diff --git a/pipenv/patched/notpip/_vendor/distlib/t32.exe b/pipenv/patched/notpip/_vendor/distlib/t32.exe index a09d926872d84ae22a617dfe9ebb560d420b37de..5d5bce1f4a65f0bea9636a5a825646c520903df4 100644 GIT binary patch delta 16450 zcmd^mdt8*o_y6-Ow?&qV3Ua@wpeSCJz26rV5EXP4T_i;@k=D)BY}eF8uoV?o5*;nC zrFluaQh3WNiiwt$nw6y)nbjCpRHl^V{@(NKs-N%Y_j|p5|NQ>@@$&4PIdkUBdCr_U zGjpEDs!IV?mjdb*Prfi}z}5Qh6%Sz4{r4-rLA|;Eii&@Q@_5B(Lg}t}1Lfyu-mCZu z<<<I>ip2tbm5=|huRJ1@W2R5FF`28o$Mm)2a9lngz{Q4bpUC<CS~w9O!gu1hGa$U| zC4Mu2^xy}GUffK^@V)ptGN13sTgVptUM45_uA;I0>h5u*jTehLHIsk9GM;o7J<A_y z*dW@#Cr1tCIM=Dj+Ukyb9*@P=@1HD==7fRPm}Q}oC^Fi=Yx1RFjyu~l&V9>WATE-7 zYI8d*;l~0RlLm2?(e$Hm;_@GuWi7Jhqi=0ahumW;%|KI4MMq6V8`VKu`B{B)_}UY; zvEUd@lRX}fT~y?8c2H$3$Fb5BLw@&<9|2lBtBSp3n?KKScD|6tMsi%CyNznm({(CE z-|UEXw`DZ><+R%aHWb%ZT>5ioO%8Sc(Yoju$3^&C={5-|3rMNl86$|d+If%jHa*yt z)ow%17xz6LcbmLXe>Z$xqir^{s&U@3&~c2Mb1Ng~wmkr{i^06%k#PDJ%-}lJ&s}iC zub{=w^(mkpC;oM~u5ncF9qBmJxQMAd0)tiJxzEHm9$Sk6?q0T9=wJVOJmv0?gY_V| zPNkOX#m<15lD5{d?i?D>fzjuS<c)UD!ffz!w4l2n%+rPA-0je}IlfU4^d<yBKy7xi zr!}nH9q~SsSg!Bv2+GqpJ@ad;UrkP%$JxDuPK`s)oVMc0lY|L$-I7@C13XT#{7#Lt zo$e>&1C?PmLHFBk!SvsgO@X>@yP>`7lth2pd8yS;ekYeHG(t>T1CR4v$s0kvJGa7H z?Aep7wb`kPA`4wOihLi`i{DE+i4%Iigtp>h`)$t`7BtVGhwxy{{;ApbvN#J};BDk> zPm%($hS!j#;#|@1|Bz3_*`jWpNn~(l-_L`X$5c1MV+w7T(Q7oF_K(L?`&>-;fgsnZ znDXjIM_0SpLJyHy!7(u^v_V%_vk1+sEsV9fBgiYk(eZO4IIg(3y3r|iofegA1D!vG z9|&aa#S!E}@BseUnof!Kt$%ww^2YL-Kqlt2$kil*FnhS|d`A{nNWTpfYyrW^<*w(F zdN^Xh>oWByWWLh|77LvW0q6_SZ2VqK{(*4szyfzoV5^|it7Jx}G?6rjkWT4YYJZ<< zlIH45?H<n=Okrzk?QBtNXC^@YKtKpo@=A98RI`t9dcBO?>l9ab71h?z=4w_xj#cNP zBKJC0k)N&I(t@(QMl$%ccdb$h)O25#H&$O^s%QPnRL=)bajlE_t^7{0`y>5j@?=P| zVZ6{7#Ttu?YO8O<-z=vCxr{%pg?6#+=D+aGC}~R;$5xU2ivCX~aEZ|8ySwCcNUE$C zVhA(5!)y2?=TCywdfMv26~4C7LO0wM+(b0lYn#7Ff+Vi8<w0w?(CTgaAk-(Tliyut z34N23)>dDJau#dN7FN>24m=uGNVSJ)5_z9JT2F|OH);h&TNX%Yvd~aa?coy&i-hT= z6EF){N$`-l2wRndxZ3Jla8V)jSZEs-9wydHvv{KNzQZzeRVMXtX4oSp)DBIt_dt{s zVU58K)psyJk!`u)60hB1E)glXRjvPIi%&#t^$kn{B;`X=?NL^7H`iK3<!-U7QB+=8 zeb)I`#>orq?l!lecWoY;B3M)PMYNo_&|Xu09uMpmX8eC|b)Rty&LoJ85&B*6O7uJ9 zSIc_ZMYWDhv6UXgj6ZUQg@VQY9R3#~tl(pweawqI`<Qi$Z1rfiS_|pxVrCC%Asq+B z&)s|-R@a9v=`x6OivDc6xfx;FF0s{lu!rm+RiV+6+<1;_=DJ~{qBqI*&>q8r5o>C4 z#QJ7uD3u6}AaJc`v00RB_0AM}7Xi!L!+yUur_Ewx(rH4XFb}QU5E>((M{pDg3ybM9 z6cU;c&FDq8mYUoi>`_*?Q(S$=*_CdDBzw0>HBxGdBcsAnMfN!249gui5pIj=j<M1h z7986=Q3g;f^b9tg;+nuoC0;qBUDG-^EA}%CDPd!*gAVRCcmt%zXmdIoUFnzT0vGQ@ zKOqs}-T7Ta7T#Mlt~Z$wo`hJqAbhAK0USO9948-#KhtGG2FHaTXk(9taAlz*GRT;S zVu=i9@bP_44n(XNaA+X3Pon+<IL_u__KQSEET}6Ee|HmNGPY+s=TXM=7=)ErN@jKL z*5??yuu!+BHHhY8_rR0q2!*?O@}$<tLb`=9(9CG^c4t*wXBm^XeTQlgvqqBL-&&(= z%!RGn$^FjGh_woit1>zuo^u4yQW<e|QAS-<a@^vP60QneTRXW<r&v8vW#r>7G4VUm z;?9vQ&hs|HhB*>fV+tI-Y)#hzVJ=83E@|>oY{byDd-B--LBb4_G@bj8KQg=PX}*N0 zBYVa^%iKzs$ar8k`@wNBA6A-8o{21!pI{@O!M0FS(60Z;8BPDe-qp<YWj6Z_sGGa_ zHgIi$_?gbaYNTJ0zaw>tH`qp`Z*l}TbKiUe?`jmg3)<b6tFOQ_pC^{6xa34ghFnZ= z=R#q?df@;daksTf=<u$jCMv1hyND)dy$<8L6#?h0Eoh^o$<e4Kp>>EIzA2S=X&BjU zGA~N)Ox}r352|MiB&##I7TqtQ85@+3%MMiDNOL+1P8~x_NUs<bzk!s*r1d$^x~yXi zS87Ub$Q#`iV*NdY8-&6bDtjd3bMi(^0)LpCiJ2a{6+4Je^pJ33icRF-AQNL#I{g5; zuxe(LrLjZ!8RWy*RLS><*FNT@<e%8Y&_j<1C1E5r?rn(&Vc5s=D!CSy##a&l__?B@ zP_i(7EZ;yb#4GqkBrsu=w2f(dQo<fbn<Ivr@USRa4uY?LFi_VSPis0`Sf0N~bwbY` z?}H|&m5tE(AB)+YE7|RGM$ljh`7ogmM!21z;fqNB#OOg09Q^gOkpCLzSrB~V!~tdF znBC5Z>N}23uKT<rz;$2b{8||JHmOeR)2RVW!oVhSFtMNhqv0I4o#Jpye;CG=_xxZw zPC%R^+{!X62nZVFst|JyJrk0IwqkdCYr2JYCCN$A{2-!EN|j2PYDaMPwFl8?IAbw8 zadmhn?mnBi)6Pgbnzaj_{TFd3N!4@nIIea!Zx>l<P@8b1s0osL5G<qVJ@{TT2NQ9$ z4X7JY^hRrZAr6&#j{K37W*81xO-1aFhJXRXE=<R;kAh+m^bT@YNUA-Ui<OK;7+DI8 zj2bQr&1a+@#WiC0@2yA3z~uhJ-b>>+tGk_U9gdQYRdI-t=ZCRH5dpIVxIXo?c5&w> zV1a~K=xe`1;qnBo*oAp!5vQaHdx3?SA&Trw)`+`9ev_5z$q&hC1<U_<IL9vIEf`yb zDDzZ`u(Ij?qz6IoGy;4v0*5`)Hk(PBhy6!>N2}b5sO1c?;$V*m<RC>U1(ijZMYx&R zo<K{maTU`Jgj9hLLGuN|g^`GfILHpbFwkFYkEe}^knf3_jqtO5Gi1_k;MsnIP5$Tt zAD5VJWL)$^SV8C(Olugy&eKw+p8m80alCRaqivfA|JYuNAB#sh7KKM?!7r_E_0DEa zi`IF=8EB#R6EKl!=+Zg>Hx9Sh9b8jz6}PDbn70^{HjTP-uGZwVxGHXNjQ<AX_XmG- z4!bAdn$kk2<DA??^Pus~b@$1u-H&H4Vb773JP4O2+8Oj>Y!OjaAllx<al;d}5U9(@ z7&sV;r@!DCi*3RFT%#u{5^V_A^BQ*cIM4Ha$=p;KKa=c9P2xw9Gx(K~pHlO1v6QB% zBJ0>(jPzsCNbaerxB*eGk_Bml2EW`L0&#L*vKEfIjqNQSMZJ~2a0?SsEY#ip^>?x) zE{DWor9TMd+}73Zb_i*mYS+*l@^4yd-{sJyx>4YFc7+6b|0f6#>NN@Vt?>}z+g!+q zUOLHq5e(rL)Oe4q?xl=9$gJqv*@EK<XV1r(3ka@N^f~fvulF>}N_O0*yr#iH60LZ0 zW*qoS1lEVdb<g7nDx^(VkHzkSj>+<mC(!=nZ12>_dElDlT~r=rB%9%=I>M*ViCl=N zitd32+q(|pu+xc~=yb9yUFByhRgtFjHJ!JAk7*kzap#KtK9x%}l#ud1b2_K^s8)fh zqY>n0A1Pl*;`)y5yyCjI$Ah*7q_S_Ws7gbc`;HImVd1#<u@!ReA!4c^{WAuNHh)JZ zWt8=(9K`}6rsk~tqTJ*85r?8eYmuEBW9y}5m#J%8$c>CX{CX1BF9R1a9eztmdA~*c zJn}=oXZX#exc^AezI3vue}-tgg?!OJiMNu!`ls@lByPaYPTFg1%fs5PCuau~_1ob! zTlnJpT;w3m5oD$Ow`{V~V+L_GLv3!QfH-epp>8Gl1N-v|vS45w|0Y>Cum``J931#+ zWW69Fe2Ep0Q=3CAMV_e5WTv#6=yDoaD$U_nlXKFJPSPvFxRJK=<YJ~H#W!wecdpIo z9oa&^0!wktBwL}|xRsR4UW)cHbrrOE$d*DP<;tL+zXmhyK`e4Jzm?R<H}X43hT?e6 zu&=Pb_8);|>8osg6uX!1MFmHNXl;zMtA(aNVu%qKJp1bzcf=!l02!!E8F1q==5Oga zv>mU)Pe!waUJyv<eYGakJo_6_qX#dO3T1+X?Ixc6ml$Og*{MwJZ3j!Sc5;WKi+iX) zbO?bC2sed{>}rni6o{S8>?qWxEauzDaMgVMN?5bW6SWt`Nq^4mv3iuV<C&ix1jXHk z(`&p(Y4CckKzYb;gxpufnd(6FoOgxJV4b$HuC8{EBMd8~5iPDzLD<cDxI1ow5j-Nn zoZoR_GW@_I%qe13XTh8=skaUE3c~IXW5@Q<j<8jXSbL5M8R_rAojBM^zx*6j`5nyo z8S;cC4enU0Nhn{zsP5c;=!<7v=&dQdwVB?U!CNc!*0Q{{G2U9Xx0da#<#}r=Z*8J+ zYkL;l)I!rpxHc~Gq5FEgPE<tOFA<~GD%yI9tkd?7KE+Jbj+hWk6}SY;8z(ne-OBUi zE3KYCLZWotdoi<aXZGC2?9&{e0V>W$N=caoY<sp~>Ahs4ZXmystkfBTm|bjVbI7N< z5&St4uh;O8k+FIu|1w#j@5Mh$-qjE2%!Y;DQ8w|9LRdae+VsO?C;aU3Sg8){+e*`f z-#9p+mHLrFLss@z1|j%5I^kG?sD$8KOcM|#Yz_o}+eG}(ZSXfC9WvHhWb>~8<p#tY z9E-MN<MC5W%(J7&EkkF~ECcZv;t&|(jDwL>EHTa(&jZ8aiW^JFHDj!Jxln0Y+2AoI z@LlI)m||BuAHF0AYuN5bZ5A@XoasLUp@2*^D|#z{u&Yy}Ln3dqus_<9vWMy3o8z+3 z)iAfOl}3_d=7gMIuyf(+j9_}y_U3QUEYc4TSwu?(I%GI^diMFloj}WfNLy`-NZ~nK zDLNF<v-u=BYqav}P^g8gw;0S81fWZ>Dhub6SUi97oDfg^=zX?RLU~6N6#soB*_ky2 zPkb}058u+zb&!nbuaexsN`5(6ICvWH>x26Pj~J4U{Hk_HLiFPwW0kiT!$o>)1M1TU zadVR~6eTl;jNsXC9Jx3oTJ-Y>(mEv0M@Y$z<8z5VJHcBIIqEwvMcArHMYfUujvUTT z4!Z$uAMBwtpU~`*;Dr(Ff#@q^$jF@1_^zl~Xh<PwcP|&mH?|^s$c|l+wyorB&JY}u zLvzb8g>!OCq48(AsfkRa_1HnI=_0qO+T-ZzTIA0=LW?|~6;VXRp}C!G;1Eu3XGq!5 z(QxtoLsNTs_hV;t5gm0J4n&u=AfqQ7MsXQw4XH7;1{G1on!J&G85xzACixe$?K^>Q zB~^Lxk}n<+=9Aa*#){tJ$-j96LUSL{Z(bvn1u10wu!t_f52-hou}Jv$HFBaLD&y(b z;IBt9mAI6-+V4Au((~6)(e!W@)6>9eG_4kQOp(?0^lRkMu=tS8zScClhO`WelvJSJ z%=KUrQ`QWZ5;8oMzeNrXPw$6&=jht}RB^@`*LUH5C*seBoG8Be^#qQ4NDX>GJw<{? zBq!|ohKa&~7NJa%f*Z!of$;m?S&mTp0?8gRxOW=tgd>W+%^{(&0-mLBbB5w*LBmQg zUvOgVl!wS$BL?!b$TuT~^W#W*eovkw`T0X6gRefc-w$L{e!L|95#bm)nO`3|89SZN z!lElMoU}`ihm@Oc_K01s2u9N%g3(sre`vH}^d|N_X0#IeEif2utY9?#A;OPL9MIrv z?M?Tv)|l2LoOu)FXUl8Cn9U>T8jyWfRDMnJN5+IcB~XMUPsG>c>5&@#D0zKkYo}4z z4_zr7ReVKW8I{sC4b_?$8iK-WI{G%bII6FhVk-mv6XI`qJa>_|Qij6!ctJp0Rqm4) zXiY(c{=^)lw%I+KM*fLqcexnpb6nYzs7&Y~+>dmZ$(xoqoK~7GuVGJ|HF_U@)5Z+r zYsjoIZvIPhZ%hII4jEP`ixjpNSHUfur`z;761FV_kX40;(P6~cN&HCi`dF*v!e<XP z{)$M(^@^u#)B6EbcZt|_@4hphZbgMXYBebsmlQSMSDW;xBmWtx8YhdE`C6id7G|>6 z1BLWBIXkYG<od;j!~9I{jf?A>52hvzQAmTP!DYOMPIrzNz7s`X5x51T557pu)>O$% zP`p>VE*HrxYb-KQHP%#7>P7O7RV%tbjr?q#l2A4pl2}@HAsu%JIx`n_`pX+z!*TR} zq9{`&xkWY>S^34}_o7(x^2T&BZM;5Z>wctIC!fbWRj<V+h$BP>wV>*oqCZLF_+Fx! z|B_*wCXgY;&+-==4i|S5i4+zx>gD)eMia-a@EcOIr4fCX;Brup1Gl^OJZiQ3uHu)* zgQFpGVg}FuO|nW>^I_y-$us-{k~^t|f0y*0EaU4M9-DlR7j+uZP&6fx_dlS<rS<n6 zQKa_q82){->+uZ##~5kk&UEtc<H@3gi6m+208tk;89B8de}g<Xb)f%Z++WGtQxo}n z<lNLTrl<C>@LxB>mc>0;p2ckf{0+!?0{boCMZjsaJ<KWMTx*5xa2A5|?I9D(;?iy- zR=V3=9$~*Vpuz$$+v1SiK7}~RG7s_Osn_CCz1QIgn^*+TsYTmE&&flDc~1XR<n8r| zQQdvjK$N~!vZsl(tnUTql}+mv`exHZ`raneF)lWw8#ujQa)H!L>-qS2fkSYNdnY}f zgC38k^5&25G$ck@>N1Y19&vp2kfRA4MaLPl{@b}xw96x&ec-vd3e6s8B+G8eNmzMV z*!SbvCXM8nGl4EWNvg{8)CMs6JmT9E4@O>sj_v|U^;yTa$Cz}jGh%Ke?fi&!FY%PW zFDkoA4n5JGUqh}w(VK4~{x%c;H5p=)Nnc|}`r;Ztn01h~2uFOlqHvIK4s$j6*D4m_ zRSf6+z@rp0Ha&Ndown4T|Lz1+8q3uP*&pP~yhn~oJm5KYB97`Hw`~38#}K)^TX!gE z?QA-mCr_BvgzFO3;l_=DT7P`#0Y%8lj3)Wh`*gdC<Bpw2X5=$=?kUFm$OJ2GChqBp zd{44xdiQ~)7)Y3YIE0W1iLj-!secOehf|-93Y+@S<K*w@iBY_-`XlV*oBEn#Z0fsD zXQQxmv^(fdwiFw-q>z4&kY3DkK;d-lF%q#cAtVs}y^e5#Y_KPaB6g62_EFt;Bbg#^ zEWuU4w(wbYL}&Q`d802cL??gTkpAQdUbLi{IA<hx8-b@Tu5|?RD*~4^I)AqN4=tjO zFUjjO;)lnfsmf$5t{u-idazfVxnBxX6NITTTMI386_U-~sTt~O_V@d<3KMh2qZG0! zr*QIgr7@Iq^(OVU$Yho&p)U($hW^iqN;ymhKUK`{B%7bQ!8ek{Gkfq`$c~w#<3u0$ z-pw3~=sSqB7A{YW#9J><6!#qopEXJ}{}L&kmCJXK*Jkw=`F}^w&r0NfCcn<=-tWN6 z%wEopLf<^+bhHNha5hB~SQ6KIu*!quW)+z-J4tLkxSUm%qcW~fBu=|-r0?*0Iw}IV zyv=)&VWmW<vxL4UN=&|;UB%l;>C;1^90%CG<_h!BB4FOge2*M_`r~eE;KSY)CopeI z?0&Lo&RoSTWE@-{;c|q;Zg<+gPw<eMBzgr{Glz^8N6!OAQoGcHB1h~t#*mz6Ts<EL zwV0}ewwj#wo8|s&!M1p!u41~eO0(T6M4}jS{F$H_cbk<aFn)m{k1^bFzD2($JsfEw zzZ^2mF+=qDUUI<ElmD1pam4av4SzWj_<pgOY=R>~PCCvZ&FE24F$g_Rf~0Q>U-AMx z((YHAbHjp`Cy8<H7=9C}n;Xe5B|GQ#7s=ium*!geu_WW!Q(a<UGd!W<!Mm4?cL_f) zg@2v&m^UCSq8_UhhuA!}V*VhL=0!_Zf#kK5yq+wcSBh-&^?8{%Lr2b^OseLu53+%m zC0ddf91UK+3~r9h>_YOxg5J1ug+8Yc)h#1Ko_n4Z&Ch)kHSZJH?i6m*3C-L&+|OG( z<4jH7U#P`-@Xv)$^2f;Jif;UMGPfe%;15+{&?5T6i-N^&(`q1Yd>&9c?FJkN_X+vO zw0;%aO@vS(yay1oNNFMWD_-Rnl5LB+^4rP2MaiP{%jAniJrpxwa>y*C<JUb9b?c!h z*9-p#Q9bKOzsl^&sTjsHS4<}erGyp=Wdt1|l#z6ZP{vR_D^*k>R8wd_p-iQzLYYnz zgt8xv6iO+T2&IDh3uOZBfC`?u8hTqO4fGaD*QqQ|<=ia#oj}W`UkPO%y&#nN^sG=? z=n0`5OWzmD@$_AxoJjWwWhvbzlx1{-P}=Bfp`1Z$gmN}rER+s9Unu9%r-gDMosP1S zDY3tbt%3=Ryr1nZ)FPnE8JZ)YD;cU2&^m?=6i}9uq^SbBo}sY<x|yM20=ku<ege9K zq3yM7h}{hRML>^KGW@!LH!}1~0X@aga{_vXp(g|sZpG34Kn0!8v)XQ<;UGh|2<T;o zt`X3y3|%ImEeu^Kpf?!$w185em9$*IZ!z)&0Toh376ENz)Eoi5%}||ywlj2~fOar6 zRX{mhS7@w&vRD5!OhCm9^#dyCEMc|3Rx%?*F!bk@l?;z$__qQ%hM|`PG=ZUK1vG`B z#{@K$p$!6>&d}Wg%HH77EdnZK=o$f4Fm#!KY8bjuKn)CiT0pb-QsCtRp3TS;1T>GK z7NCOC`K&frXs|F;Eudo=+D|~oGqk&aPGo2|0WD={$V&3z^WBG0Hx{}(*IyqX!G6W= z1YdY5Vtqh_k8;XOk?I39KFWInWv)W6Gzb(c-AWX$w8{lu&Io-#jE~dir9|oj5_}Y^ zmlC57Nbym!y_5ufK&p=-^HNgu0qH(UikFhA59sHkgn20%eL$9v((%{|;b9E=fNUS> zXD=m7ACTvxT=r73^#S=l%4shpPak0MQT7Uyx%qnKSb<_i#$UF?E^+R75Se3lkljoA zip)F6r6srcA>>4Lyr_<lYt`M7cC0}}POaTavbZ{wsjS=!S_{CRkS<GmGfw@|R8~w` znvhlpj@n_|G87KL3_uxRB48{aACOJRt4s5`GETRLN!|_E3phjmT{=zDZylrG{R+cf zB{P@Bb(5|YbYS$lEZ}8;m21h%%aTR?){*1O(phnHnKX@YVC)vOQ$QP_9l(Js21EdQ zt|O-9d6C{R3?O6!#sX~X8a6KP!%LQ9JECh}!PUm}4*9eurAxFC{`D3JO}5e(p-9w9 z!mlU~W2wL<dx~v0<EO;AB7^^fY+Er{G62UF-&y(s`E5le5>nmWaiJeKcs$yi8_Xx? z9VL10GTdX|at{;*A0gM={X{QqA(E9D;MJ_m<j)iP%AOjEvxkuJ>sC7i+w>e0K4<8l z7kyO6z!M&krB-ne3PF|a%aJt1*6B8`Bp<BoB{2zXI4&@K@2*UVKEw3P+KlH~{SFo! zj-+g(?I5cYqQw(IZlbf_AqBPR*mdXDs+e-MalI3;gR&Rv;Y(N;aU0KLB-{GG@okmo zOZKklAA}{hawYk-wiv4Cy|5dv1%7`aJ|tk{1J~L^qE`*Yo<Dh&PBL%918TwsvU8P- zze)Pk=|bOL|IlEg*OS?G-6Ahk;mJ#otrf04NWF-uj%=(;dOU^c!&17Fv;l7-U+2iO z(l>CxVAtYA*Zr2cA6e-pR;L36p%UolJ;u`{5FULo^2tkJLLU1%4g4SU{UmO6Fa81< zv^oj5n9|j$ar1C&#nG6h`6l?jFFAMvU!Txf<mJ`6uK5sDqoFbszDfU_w5}eZ{1qn+ zR|;Nnqx3b*1$r|PpQPQ%*7&D5!tEkEUS?V87m>o*%!(AmlWXEcOLviFYZCc%vU5!` zpGrPhGgA~DOOzxXXR=A8C$h}X6B#=p?<c+aOXM0Ek6-oLDf~BN!`gm)JvqJh8Yny0 zeTCo37e8gJmX`*iQuUG*zhAzTn{d01X~mLYv;%03wAJj{m;D2rgIi6s>r*9r1qNIj z@Ftv0U!Rf|?i5n45jM*F=mRuAybM*L!X9;69lHld7W=9uL4+{IcjVjkz4;R)XhW~i z8#t)=hQ76mU4wg%1f%!%6Wjw=m?k?e+kP12cX0X!=}I@OA`3Poc3$wPcOuZ%uSxi{ z*j|DDt`^?$Ng@3=<v*@lfw*pE7Sbw4!vVh^`DsI<B!vkrxKSgfeyiBsE8Gc!cXVlY zFp}W$JtKauhj$410>9wECQl&IZ&bys#SZGp5xYztXPC>}*BS2qsNPH$c}UI1dEH|j zu-#>BzctmHkrolsEN4;mP5q2o(qmH^{}ajE)GDfOXi#pB5#h!>^5q24rh8<@%PFGy z_ZnV!xj&D@?~zy5@K2IaTW9fC$nmY;i8{YZUfqT$(r{_p-#kB~;gjvjyeQ@`^4pFH zqQkdI{?1{11$l92t?2rnByCqRKau3@D(iEkoXr=4VdxuKT&F!*oB^;Na0Ac=P`!?& zj5hN5u9Bp87C)F2ceDG;*5N|Fpkp+?>4o8L6U%Ge_-*8g*ZQTUvx!*?Zg$ftxmr`w zf{P5xE+)`iMui(>EG7qEONvbLRs9}{rn|_`uURGAu(%)OM@ateWKrN^^3?7mQJ+nu zc6WB~h=<L%`ZiTJ&coL2P!8hebfJG$KJ2-kwC~OtaQjWn)8To(_gn+4^m|5tI*uNA zKLz|mdw{jpP+f)oxa2>%C#BCWFNeng?v4M!{lb57FCu%vUGg8?qyK|D=fAj><lj9d zxO+@^-OPVNHob1)W#rcDn?#RQkeWAU@aYYA-nh)CjG6)`342a(l6yWU@ho`gB(IWR z->T=YG`#Zmcz^x_(*E8(EScL4EQ8j2uhX9oB3lkl>s>z9H?eb$W^r>4XK_0K9>9nr zczpuc0$6*L^m%_mf|P~Jpqhk0dE<oEZgk|VhxW71B3s|Lo4pJ!Ng&>iwMNl1GaoXX z2a1dC8PB7z=bPymx)*J1rZ<pbhvK8^d^P8z&M`B|{6lGhdCXaA$c{r9{Bd&rP&D3T z&_n8OWi8Mu=LQ{)4o&ha_#VpMi7qC}!@U%~R~Rui8|&Ki6vipI;b9*tT(yVL!j+g^ zT!_UiYd-}W5@>uaS#@}5;z8CVzcQqzq{FpH%sJz2>=UZmTnYSlIr;N&Y6=_mPA)=1 zJaPwY9M|crR+qctvfJ5$@hge?h?;kkSw|{@t7kwl$UZ^-I?{xf-tQmHjk|7tuy-v5 z=04qO^*CeLO)-GP9_tgi0=o!y3HlNmeP6JxB-Ufek{PUD5B4czgq<utRv@G{ju~N) zu;aR@MeNj9j4wuW@F~us5Nz-<)J{r{C*p1DbH`=ag5Ew}*7YYBMr=bW&Dnv#BDM$P zyph&u4y~{~^qmwNdAczcClGgIPU3H`c|6|Fpn`$nB|s6Kzl$M`k@PR}OC$T-G5kav zp0@9a!MvJ0bs|0eP2}B*{hBOv!t)rc9htHFj>mA`U`LKRUt1BaDj`Qt^hqBN^I;ys zT?My2^jAb0?;{t{9r=)=b+$XgSicuZ*hz(G^z$V9<X}<g^JMwS{=v(qA&R2yM{?|B zGM`1Ro%}eq6yj=fIv_Ec_C>qzW2xk6<W!TKe}c3(siG%>M}FnzQnn)<Dub~#0nQM* zh2)&d?mZT5cQ3Q=>+?3?>-nB1YsTGX$=*KH1kPqIly%B2Yp6dJ#7p{4eQ30g8=5|d z;VUgjn8G$<dp9};Xz`tZtvDx(9G&gFEefwV@oKRfQZ|@gVCSC|@{ss2gMCMaZ2lQ6 zYG0Ip5-41=gng0r=nKz$XouHwB6=2GeXjyWyNvZ1<GwS1mXXJrja@drfaR8hFH9uG zyPtQ)Qr#4?r#W7fIE9>U&JKNd7zF!XN$n)jXNHUZ`IJmOGnap!e0|2C$iRuy_wCt{ z95#DURe#Ts3^kAq4YCTl!@B<N=2l4|`=htX@UtuV81m)W$$o2Fvd9V1-O7{zo*Mwj z0*nOM01E&w0CoTx0apQk0K|bjmkQ7UMgk@QW&)N1)&mS7Jl7Wx2jBpIfOa3y2``y@ z0Ca$Sz$AbJusn#b<aPi#3HSnV9}tF1Z4#g_Km*7IOawRpivTMCn*qB4`vIo_p8&1{ z{s4%Bp#Y#4Km*7Hj0emH%mXY3tOe`>GzM4l+}A+b0D+x&E(IV3<N`_oGXaYLD*>AU zZv##OE&_f9<gq8k--me0y?`$O|NXZv33r_=gq{sq+>#HGqC{5r!C!s9FS5AY<VS=D z4G;bkkq88QGQg$ao8pv)Js<auPc(2dxG9{Io5@Y(Y}_<%4mX)A<EEoDaT6OprWvBd z*~o<Yq1Twg?81FPA?uWlKWCGVLQWSX3=r`os#V{qRE6ZQhU4m%t?u59%x;bG^TcJ5 zCtDN!G-=*)N-u9&+s9jyzTPshpHPbO`jg?e;sRe8ki|6&Aog2Hejm%dEn5uabyR0% zd8>a8AwS<r3Qx+(;*fE~b8uY$oGkL&t=O=Z+()!qxy1BKoM`a~CQFn*hRF#3qA-i` z3xBT`W|3K~v7+K}jD!D&eD_PTNIFMg&w7@zi%2zwiVM^up*V6GLqn=?d0PU&A2I(D z>-W@pK`A7ePyCw9x0CPidx7x3CHrOV^R{%5LBAzuo;WM?Vg0y&&t!4u{2op0E;h+6 z_>W6i5)b}<B&&am|Nnh4jrxDQm>Pa>+r$rabCxL%+n8xj&6+zcclwNJg|nwkE#h+c zLdP8Z8&<?|I)P)ztfwBIK66^bx<3*F-fN#7)+^!?ZvX#RIPhX1KeMosrRzFL2T323 zE|Atr_eqaSzm&e5c|S8;HbwTT>^<3W*-hDBvH-bWZjn!vJLS*I*UR_G&&j`+2P#4o z{S|{1lNAdUZp8-0KE*M`CB+rR4~pLvoH9TetxQw)RW4GlRyHd8s+20L%AtBm)uy_y zN>f|alhx(w=hQD&s$W-srw-6WX&%$mYChBasQFvdNt>Y^rFClkblr6(-ALU>x*Gi| z{Wg8Pp{GG(7-A?hhMEkf!KTrsI@61$ded>!In!kmHT`C~Z|Y=@HtWp=<_YH6<^|@h z=FiO6&8=oGh2z%aLBpjcX<Ft1#pB8^l;0>l%0N}+YSkW9cg;GDQk$yVp?gzzRCiYQ zoldX6Zg|XCX<TXCVBBeZ*Lc8q)_B2q)!5UNZjza1n4UJxGc7XxY5LpbF$I`I%$?0K z=45j(bAPh}!w)tOH;*wtW-d3+GCQEg5_7G2y?KXukNJJ`G4m<&hvpXZPv$#jXjCc1 z=NKp(GJnYQm-Un_mF<yzB>O@pktfJ=<wf!``IGYJ<hAlO^6T>3@=J<qidID*rBrED z4pCMqH!F`TFDiLel4_1BUp-bmL%mFW4c6?eNzfK(i?y$4cWU3!X6qK~*6YNESVN(q z)L=KvGb}c^4aW^18ZH?se=*!ObTSSwPBU&azGX}_=}a!uAEwUcrRFu}o#q4Pi{`Ik z!oSU27RTjb-FJ}=kgBCdX|8kxd}guqb?IU0r_%ULd**wYfwHr*yRupG8u>=~XL3J9 zCq*|!f+AfpK%rI`6}gHrit&n4MY&>*VxFQ}u}ZN{@k*uQgyIWDhr*y7tel~oqnxLF zLAgtLPMNEku6joGhN?l;p%STM)QRdeb%t7|)~L<u9CZPnwOBnxZCB5Q6IH2~s#mD% z)a%rn)LYf>s6SADr2b5OS>2-kLH&#R5B1+_u_jd0Rg<7GXa>X2VztTIbnQUxXzgij z<tN(jw70as!`VW0U33F<8aQ0B?it-8-BR6)y3M+`bU*6ablvocdbQr9pRNxvBpIZJ zXAP?i8w`65M+_$o=MDbG&c<HGBBR6DX#CX}W$FPh86$YeZqqNO+ooW%1>R6?-eNv% z{?^P{IBvP%jNPT9Dy4Iz^Q5mz-;f@V{wlpI^~*G5=46h}d?E9t%<Y*+GQY|EF*8&a zCrg&~mx<*Wa+SPTzFYpA{EnPcbW!vX3|p#rLQ$nytJtX6uGp{mS`nw52`hf8YFDjQ zw`uNb_UpdYwdzHN^@g*CktTn0nE9mn0&~7n_@4n~o^+hFZ>A#iQf3!fwrqy1QEpY( z6n7LGln1a(jH-Oq6RL&UIl4f@FoV<ZqG5pvon~{~W)v5tUr4`}UXxyz{wVz!kI*wy znrVSIY|cED`M6xKC{P?x{H+ixM<}hzQsvXih068HOG=T-tQw}8qFPa@I-<IvI*vJu z(e~C()=t;HuHC0Su05~4qTQr_Q@>Y#NPk@afj-lqhW9>aXf}LixNi8#AU1{?<wmP< zs&SU_Su9kyaj)@+vDx^v`FHkYRl>t@6Q#4HFG#ma8>Amd12WSx|B)vs#vxp7R_sz7 zRa{pDC@bGn9#kDweX9CJ#jAtV3lWTdQBTuUYL;v25sKDmAJ<*gd35XbF7rC`YgoS@ zc?YQDIQD+QPZ}ldE!AQzlwhdEQX<`ni99PEE?X#DE88U7F54}8Q&ungO6HLz%2Va( z@_uruTp`!U4e~7c3VB_nd@Ta%X1PcaiiI*zp-^ZP21SWtnc{s#qvDj}jN+W)Ji_W_ z#Z^TwWk0NZg;JxOs@ka9s@jRw!IVF#x+o|gj<7lbt6-ygn|hb}E%kfqMs;^hUyW3A zP}8jWQgaQ_)vBGKEz>@ueNnqfyH$HYTN$p4(#7lKI;(Dq?g`zqx`n!0-8$VS-A>&b zx_!FCx({@p=)Tfj)BU9TUDu&g>qqOK)c<C<W9Tr9F^)Hu88;aljDt<vOxfl#Gq)AK z1nZBLR=_K=Wl8d1<;e=AVlGy?Oj)K}iGV9rSyW3^b*c(=ipHcVtJIv)v}gjgNroun zF4Ij@lv%~*uO9!U97SwqHG)a4e6@U?e1m+8d>h2|Q{*V7Dqc}ED@H5#DK9JkP<F-U zvPjjedK%N0qIp3xR43C9*U!{HYdXwiHFDe%6nCWB%xRg+GH1)LD8E&Hulz~bs{Bn^ z`KR)(G6Vs1plYmYmFiX1A=THa{_1h+nFuKd)Hl?>sS`CB8imH8S*kgy`AKsdp>?Fz zs$HvH4okhPKcXLQc-pYju+Q+UvBFqmT!a1MhOy1)F@~7-n?lV!%%jb7&D?p8Q$Pua z^f@f@`_kgfrC4oSWJjfPe}u{I2$2PfX^4PZRl5-N-o=~-s@YkKnX@#rEYp@bBXf4< z!py3dHca956C<(T>^7e<Q}%!y&q4|(mZnSdq&Ddi>2m3A=@}`NiZj!(sx6rlGv{Tl w&pe0~*^<f0Qe;`OQut?;Y_n{utWnk?<K!vu#!`8ee5IV@4)lARKNa-9045Gnl>h($ delta 16218 zcmd^mdwf&H_V3>5E3_#s^!+ZSFKB@@J4ut~(N<_Fk3yi35}uVu4Ml~62?zoOLy;0H zj{yY*1bK*vh?J+i3azv#96&)45bF_fu&DKjr9vh5J39f+`Q7{Z+<)$Ww;xt!&6+i9 zX0KVZX3g50g_nI6UiMkJXu@AZd*4{sZT@2z`qh#7-=f~=vuys~VtI1@mtyIh{{hM? z&HLtmgYw3@#QCp@^c5cdqn`4ZSPq{$$;xD|>K574RKRhAc^~eRX_rTHp1)>J!UypF z9M=TGCO7e|UL=X{Em8eWhV!ZX3^IrB&gYSh_&rQc@gb7I{Hkuzq?MOSa?X$kVDTs2 zB(wQlwd*D8`GhMN#nBL2Q`Pa%<uX|_ALMcLAAz9N<(X(^C>iD*lE8K1xU=;mop+tZ z(o(@yQ`lh&I^k0npU;_w(U#7{;oYaQ)H)blYYI99m$fVdE!Fcos^_=T=g?O;t7{Be zbILjb9K&d$%jK$&l)CI4)Evoiteg<RaqFuKs87cum&?(RTs`t`bwRtXalTje{Pq$v zc-t9Unw$NoH_ve;6}-zXrK%7nLFyD*sQ|f-hD2SXEza3m;bW<pfBBE?&iT@eMz4bQ z_0CoZy|V}^JHc_m-e!6#fQ<4<j5>m$A=}Kv-J|Eavf4doEB%bD@(HgTfcZEJ?q(F+ ztMGvo3)<GZ<uY;fyp-cGf}<fb!+tBn-kkBr9Es3Y!I{}?hqxtUqNQ{a<T_i0I^CI= zy8BnHYoG`dz5BQLur>XRuJL19I+r_xkL*Q5xvq=NuSnPU(yuM4UeyJyOxE})^e<?& zOc0mBaaU%p=#9q~+N$mCv>SQTSJk;u9PXY|)YLQNnr}|HAC%*0km=6YFSmFJZG}wI zK|j*d?<7B*{Op(3rDq40qThIPO}}Jisfk`6LXxDZ{2ye1G&b!k*n`DVanJRY3C+*b zPoRoM@8o_D+0>7?8+mIcSuWM^v&pB@LW%q#3GCEQVh<)Iow9o7`LTtss>8yUSg)ej zFuLu5%T@DAWY8f$M?++JRh=!QLTaK>WKXBaNITjvS4X1+&8)36Yx_Bf-0Tz)b18`9 zES9P|yVP+;Qm*y2{}gn{m$e@bBEkN>`M*i7e_YOYf4N*jU3s-HlXFJmsFy$*2o7UX zNzmFl>%|Un>fl0e@UT%*nUpN@v41EZOV0SGNRqwCFaGJ3$Gtow${MShp`|7)OiOaj z3`t8D*5kv@?q*v627)w*vj0?lfN{F5^90)i<_gXgmfnmdbr@JZ$tJBnK*7@z*jUBN zi6A<t#JQGLgtO(<vS(_lT7g%Z&bap%or38)+l9I+7ajrnPyCxb#MX8!PR(m<BM5C4 z=LKE+OJU2pEPN1XlP!GUZ}ZF7HO>63MdJJ&9oyKmYs7BPvu>7>nyPy+J<}OqF5|wr zv_fjV^Eb>sOpci{BNeXeNU+RDI$7+aX(w^A<jRfk32X^$?j?w~|0HU*yLBbFg4WcT zXzP8^U?dY-#HD-waR+~|`%FW+dOH=nbhFky?s8o0;^x=?ANXsk?m`(RbM?kbhR|vF zsjj+(U!hLBk(JhaOx9WqKNGSTl`&cqA9<)_L3N5fU~)A@5^OTB#&X@4_O^^um)f1J zPH}|)t)aT=2E;hUMG_k;|EpV*Q#6Vi6XZogUe$HTJAJ9$tI4Z|bu6v1Wl7ESIF{$h z+F8e9P9}P;4Q@&_6qjfUt^;hst^?x3tOSeMTuL{=uQ(dyrSxa4{<%BJf4lT9UEt!b z`J7$yNBx~%p&VBsv(9kAR~uqr0<P-$w}d+9IH~SC=t9?M52as6b6g`A4u41IlG&Y; zCcOp!P+cHJV<6oqHo}{7Eob4U%e6XtB3=5VN5!L>f>x82jYs#!iaw-ei^*jVHkZ)1 zFt(#1xVqq0V*&HlE(!+^-*k&!k0u?RV`Lc+QxQJCT23!Sk?ujsl7~@bKu}@S2v`?( zNu-&+&zxDSD@+fHiCW0cpoF*{ag2sdS=DBb)E0EuLZ}sNus44?f!qq}#^(^<;55md zRMICno-ZOpg8R!p#1nX)(L}Zcztr`&o*Wl+sFlqgUc^Md>q%0(SY-8hbPr!Saduf& zIiwHf7*E&r;y9~|sc0uU!n&{9yq)!Ml{ovbbybl``Pdet*tTe|a@kWWWaG6yyVLVI zTVM$t4jIfPz2D+TzlEOhz+Z`lvvI=sme3NK#Te*m&>A@z1`+DUjH^G)1?O{QP1oe; z?OANdt=rW3OfB&h-sWmmT_H5SoP5#M9-OY^xP=BAX298e=)Ejb8lnm-S8?2;Au?_u zy0-W`&Lo;$VPnabkjNM<TAT&4MMdt$arLM;>Jm+?{c=d}&Jhr28CUNnm671kZXs0o zPZAz-Ts<*`p5dpGh_LQaq0Gj_Wju^i<W43`83&G;_9rD_B|-xmt_fSJy0~5Uxjllu z38qF4TEzT;I=MT0fNP7TFLe=JGMz=f3CoFVW#@*j-qxv+`}SKHQk~RU-0r-J{S!zN zh&((xArX=x7YpB|L>zE8W)8b>wzkOVPhH8>@c3}%HqO>NU5H2=cY|S<HN~xT9N8GY zIItHETF+8n>{8o1VgfHY6--t{ru(gAyQMjpoQTYfogDmx%MYkfM}I_7@3KeIsl+!* z&2J}$sFWUiSr-*!xL!SuZLs-L-9xs)>cMycXOd^6nZy|t%QuppQBwmAPsz=lNqlr1 zf0O8=6aA||#{g0~kCaFE<tLL}(aExfI7U1&x|3_sae<{z2@e8^cg!By2Dn8JOD;JP zlfu`MTQRdF_he*j><Iog`8Za|e@t%24wYYIvz-V_gBL82Vwy0kFzN+@r~i%sXtK%G zlFp_P&_<@jbx*ROS)7&$e#zq2so9Bus!+DmVGpJ|0?4ko9(+8x9H-%zk-+$f{8l#3 zOOW+n<Aj3X8Rrn<Q#MYn(;i&aX7hJE;%z>TM-uxrao|76l=vS0CEyna9!S>3XX+je z<hZRA5haZTR#i8r6J0uhA@)H|)nx1f2+DWNmvS~86EX~K7H4})x``GNS9}C-BM}M7 z@?Z`E#$xyiKYFMKSXhA6;a<2$Y~jwpGyV#P<+kkAWLkne`~4!0tC_)9NX#^(RSZI^ z{R9{MWCUFYR`=$mH9(zkpto9LO3Wsjok6}xNQwLz)cR8PM|Z+!W7ybq3|miTCiWU| zAcf=1&UUIEgpwXobMSWMcsjUGJfM%`f~%#gvoIE>9$=#3zhUU|SkB@omvPL6jH`!> zm$L_oBs5VYO=d%x=`NDqEv5MWzn`p@!*CaBlfX&LNEG)dJre)et)0c;V8OPp2(_A+ zq*pLUp-rpW45wocFeB)N19_gj)vdU4G}Z&!mR7{laq#LE>Vk6>3Bh!*NO*22oE##^ zEDVGBODke%eH`Sw!mMx}TQ@)^-8}@$c#AFLPsJWCDc#7pXc@F1cI!l!GeQMVC$Z`2 z&e-6o`_EytEn~6LTg&ica;Zk3aH&lAr3rW4>r&9HwcoP)nkWx$nvS2A-U!c~QfH^? z;v2Y(>?0p0Ma2AsGmHA6)pJBS3!3Tk2uJJbP4ZpRr~T%!`Q|0$bC|<>n6ZV$X)l9l z-Ti$YP#aK(fiZAU2u~f1$J)u8t8;~gq77bkMlJ1rk>~5kkrW00ExDT#&z~Rxsqy?f zq-Sao_T@{d>d+ZX=fe<53fILW8ltw7<Ei=l7UD_`&-fcpWwC5wCxFv?Z(E+sVUwB6 zbi^-=T-dV8*^XnqWm1KP7LmNP<em#LbXA?mZx3-P_tF<AIK_H(?B13b2zl)CUr)=C z&6D7Xo%3&D)C1&lnks4tY|rA@&Vp2~VsMQZ6;;3{Z<3_+eHx~e3LJE9P1ICf1sfie z=>ldDjPp<Cc<8eEl~Bt7$Z-~TOc2hGp&N*-M{?*4aE*6+50@&GEnL`A(yzzN&`P*H zboV{n9+J=D_>tDr=gDV1)V}A=owFvYh*!_mU4m{Q)EXjl7D~M?2r><IksUo}c3J*| zn`#!RIq;i3GUT|MjLsO*rR`t;-FBOt%qW!PWfQ;5QJqhlIPM6JE6&+hN-vWaGW$rR z|00_*CnqJKg@w1mS>X%Z7H{MHz~!=n8*aUw&F&{dHj(6BJ@}PmNUscDMdspn6xq>h z0e_XG^nQt7L)P~mA_?n3?)1)(^c_aR`^58YMAavmzeGm&+3vsnCOej}Ygdv^^3u!= zZp{TPdc=hyWbiZ77rtZbl^&VTRrj|#RU%@)g)O*_ESLA<H<M4~(fl@YMV<t_UH)$9 zPEkbAVly76rhuACU14j;o~&?*CY7AYD&T)7GDU~~hU?<Eq1L^mt6)p?jN8RoXf?P; zHqn9`7`b}9wZv&yM|KErMtGP)#MwMyTTVtOReom3v*0*hsm$fqk&DW=_zh&5>SV!z zZ{P<G9)o7-Gi*m#oJ;njg5x|w8)*+Q(KSyQ4rBXUEUtrl8TY)W^nZ})>crkA5S&ZS zq3z^C{A4tms6ixM^wj!@J?c=S$={IU>R8zc7@h0jWkz{OepDx?eRUPH)K2KIb#?al z#vB4L2RM?GjO=I(b`?wQjb^%+$g?f{MY1e=j&3ouS?>zljpDR7=X9A}su?lNRQH17 zY(?Z5<5KC}#w${uFzhErO|)?-h^~u{z~@<~Ev&1f-DT^HO>!75jzCe^oxQMlT&Y^w z-k<q9?jrghnNK-J-q7ShonLFV^l=M<-;YG9fVLxwhN{9En2;gfu4><wPG;KoD^P_t ztau%HPn*I=lJnZw^0q6WItw$=7tc!FHKn_D&0W*GYv<gxJa_GcyVlQL+v~0sxohvb zYhw+Y+w)+iCc26Y&4~_uV!kf75tY)(SIGPvvqW}<T*>Jbag3>mg~gqy61PdAZbH4; zscIroIvu~CjMQ~YWoq5Z^tpxUr_o0PwCR~Vu3D7`Y->MJ)4RzAT_1iiIj7V6F}+x; z`;j31AijZ&(QEiKWQ|_MKO&#$Q~7J;cYW_JY*@@Y%qqPtI^rfGHw=s#^|Q-mrq^&n zndztE?<X)oGu=vvAun%^0mg_<{s<jhVJqQpE%X?Cf%SQ~d+P}N&@JqUW8wS=bE&m- z9w>{!aCk7<PMAoDv8N=+Kyr=I$gGVvKEp318;x_MGtjzd{;hc=J~v9bP^>gBuFcJj z<wK%|VMs?iAGFvHd$+=iN{5jb^Rm2~5S);$dCIhvK-hJs&L$J;Oze*)!kwGx{xpuu zLs$K*o@V+Ud6XAhP=hBzjt;XsZr%MGW)$j$hbyI=NQVsjcGm%Km=S1qu`@-v_Epy4 zM4q#jp+hO1T1+P74^z!UtYb0NR7t^Xg5x;{Z8A?jp1Ja`&xwA-i*ABkHhEhZCcbJY z`7ysQp18*|J@~7&CC?~${xVt8SH&+R$NEkIp46`wa8ti@?6W!jVk7=}9=p5U0@Fy- z`s~GZX;uP;Y9_n;4dU5vH0fFpAz3z<WE4bu2xY)-lUE92-SvRuo(L$|T1Ji+7!X50 zEJ)~l1G5#6J9hXTCh3LaI^76n4@7fI$m@k=F)yKJq92ricK@m+rmh9)9~MZ3S{IT| z{rmFuq`3cN{v6rYzYH@DDN2rGBF!fbV=tFFja4pNh+}~_ZwoASeOZc&*odM+|1xlh zvDi+st!Nm3gtQeUr@D_~dqgQcdIp9=7dIn0Cf@RJQD_OMHn#Yc(#<4qzz}{cSur3* z_5iNm6CQ6Qbpv8#Up^(wB0minAqkWa!@%BweV@{A-y{=;B$9OlgS+}Zq276oxxluY zWXq7Sj7>XWsK>FExG_1}AKClU^EXk^B-t&r9$2-e#pH}EHCs3BAP)w{1g!V8rqC56 zaZspiF6xb35|bE5@&?ICXkwSny+Cxw$Cti!xrmUc2>b;|MUtgj>ioUs7@0Px2Y-vK z9hBZ<Sk2&MX-1Rd`yj7VF=qo#S?>HihU1=4{T@><lRJYFVhbUqk>!%%=46SuA>8SM zy|FXT7DzKl^5AFEvY<;uFuGQojHZZqp03p%h{%J+kHeb6&~b`>MrsE4;XfkB2M^?@ z5NR>Xc4rp%m3@a}%cJ5~$t%S%vcpdaW5~P3djmVJJ>hug8gwh~dh!Y7j+1HOqidpG zryYS_S3P>7*8y}17@1zj(Rsk2*Ab##ryU{R4~gq-^0cPWLe?7D5|2nXjwSRuvt<dU zeL(go`ipN!=FrGMf02UZ07pOkh7233;m?z0LtFgs;{0+Xa`e(wQfW#IIgM&{B;AR^ ztvWi0>@)S0R=MfZ$Su>v!Wegj7nwwn+rCDUQSCf^iB=Z}>rTx?9+};f>3-ZBoL4Q? z<63d$j;9yE=DFW|{WV!WESk?J?+ts8r)22x1NikR8Njb6Lra{vUtKRL=1-CyBNU-g z?7(mo-$iD;RaYQmM@APieZ*08$QU`EA5WHzG|OK6^2v;6k-H;PW14szh*h8>P33s_ z$R0xtsIW&(BE8J<Vd0+I!!Mq6-1G%0Gb<u5fWr9pi!JQ=S`L-a3zJFnl*p*nf1$BK z#x7<NwDM`+*T{8qbkDmW)MLOB8ZiY1<c?6C1yWd27}Y<KN58v75=xV00id`||D#J} zXlWFGk4!C1mYljoR+MTboy*B*rO(FNm}_Up$vpb(Cm5U=tle9vYYC#mNdHk;lHDz2 z?kF?gja(WPMQ*&6B)ReL)o+KAS#NhIT`juAIR}w6J9`1^TD1lTC&G>l`U$EY+Z{(< zu%t?Kf0Nh^V@RuIHlI>!9UU&=yO7GUvEGr0%E>!p<GtG+x?HuV#%A#RSLBy*tN0jF zIsPTSl>9NincdbWDEOtdeq|4N$xj1oy(h-;{AzM)dnC!36v-1(GATo%`jaH==t*`> zN|5|DhI~G$x8$yxJe-t?drbP|KHg*RVW2US<KW-tOdeiY_&z%uR-Swgr_Yo;?pMIz z@;q)GU@PDbAj*ofgym#d{=iIcj)a;JAi?TF8XhkxOA8d^bxtsDTy_~Er@PH8ig38Z zlUa`nbJ^@rG*K_~vak*@&NF1-l*CxdEa*Y~e^}5yRG4MFSx<(nj|+2psxwe}c1uV- zS-3uxFDKtkNtJ~^rN@&l<<S8S5Zvamhp5ZDPdt74$udj<df0_b{GHn{I6M-|Oh(hq zPdWNN;iv~k=}E?{`)*bko%fU{0qC6-Xm;5{S&~Xdj+RgETsMlH(a3SxV`=Lt((Ab* z*^X0BMjk*GJ(nPR<tZVa?0)Wu<kEHWoV6Q2om5-X5RiXtHS+yQt5qQng`2Zjs=c7K zd~>OZzWcqnBOrZ%qu#qlxd1OfIQyqAm6%@X*iME|P452Hb}*%|#EO`zL2}C-gWNfR zgfk0`!|svwQ+o*+a8vHHJP@>2wpxu7#*DAWy@~esq=s)lS@lvgy=5YQP3;kW1kqCk zkL1T*AQlT=%Eg-LHlnGB<2hok==SVq7)V@Qthbo#2)0TkSl?@)Kk*Q&QDN&l8>Q#u zz^Snq`mDPCvQ!smQ{{6rqv<pco9PJ~{o}Z}?nn+*#7iC>Cwt$D?fDX#-K)KcP3nIn zyKM10rM76-EGfOSl}wp7wA%rsQG~j2xGGpL%wi!t%Mb{4o^JHqx!Nn!2Jw}@<Df#| z%aR^=H_Fo2>||SoIqD5IzQ37{f|@KfHeY_3@8UZ9&lTSNOX>HB?x9vowSi?k5O;j; zmQd0nm}we1EHoM{HKTZ2QUzZ^YuR1EQtgKo&NY{q=mzAo-7#K&N0&x#uRj)Ifty_7 z9Oy=}_l1<`*$8MIDNOi(ukW%?$Zs!L_*cn*7jN+!N$=@N{9H0@`mpHf*lkw!KIueo zPrSJ*SsE9Lms75=_GWTy`cTR3FG=8xLiocGGtwj-&1C+JI5?ZPXLQS4xe1d)UZ})l z9t9mOojf>O*)c3pYd&1&g5xM7o&OOpH6OOJiWQZSJ!FWQok-E)1#g%XxX|jp2(i*z ztg~dkD~uz{|FIATA2_pb*iAUl>PSat7d-{c?XRbhDKpQ9cN}=!@*OaDi{k((dTEw& zFd|>a=eS%UdE1RnL^BMlI-V8)YvkCg5SjxNiR&_KWE>GrLooUGOOEJs`|*5qGk#h6 zul-KBH`~q4uCU8wg)K9532PSXMKBL6|A(}gT(G4`n){J9+w+ohwPdord(2zdNA4wy zE_cko8f_18Hm)CTa;a&4?Ynkd7~14)O@jkY+s+}I2>Oq5a%om1uOUCr8Xo@g<JO?X z12H`}$0Ra*b}z|>kIAChX8u`nb#_D7V5kfasC@i#W$j+F=jBBHHFDwQ-dS@$!rn$K zoX+;yguS@4dEOh@BX#%P?~Ov}Utn@8ZsSL!_nb05k*t`L#djyC=1h<wY4hZdB+Om! zR|HWkjdODD@lJFJHVJaGgGtpZX-J%W_=-}}x`h1j%B!q+Zr-<Hntwux4Y;evHge~1 z`)}!DqSJ_Fz82vmnLmwRKwR^~`8_0j!C-v{v<Y>W(jjk%3cg1RfH?6EzGliTm=$g< z!g)Gx1v{3Wz6p>rCmTyPEqIsLll;mMxXY212@?7>nN^vjoa~laLL=8c7IpWDsG+a_ zA4L7Of}E@DSJ?-{Fwc-CmNJ?kmccYaEJJCKSVmGmR;no{RugGEM7m}r(^j!er&KI6 zX|q_$=~b~*(u-mlOPj<}L+iv+PY<JXG~~G|XXVikMOr_)T`Y^}MzI`B*NUZy)`;Z@ zS|ygF=zOspOYLG=Mqd!i$#jZXTIo2ke4dUJ%Nev-EN!$vEMKMuv7AR$C@a|{4lZPS zB#n^|G7n5+MRX}cyNKv=hWd)=N`^kHVZ3V?+A5;!82Y1#ZeZv&5#7wt3nIFWp$#Is zlc7gM^jIas_lS5ML$``(14Gw|XcI$gM3g0!=&L}*IbUQoo7ixep;JZlDnrMM=naOJ zh-foIi$wGmLk%KIfmTw5h~H)8G!YdOQL!T0%BWpL^d3WfMYNrv50^6ybTG74L^(KE z`lE<4UrVovsFa}>fQoaLvDzuIA(){Dmsc_=p$z{(Bu6rIvxvqrbghUcGIW`UCNs2B zMAI2+6H)dKk4_a)IYY;bsFI;2BC26%k%;OUY7o&pz6`iR#QQOFnur!LG#02h>A|el zS!^&d)JsH1Ftpvtray|Izli8qhW<-L%NY8#lT<jm4cHF~a$DXy9~sV4cB^>TO_Az+ zf<2U#Zi-yzqw!D{h?H4Mok}lK%*ZcacBE7mx;cY&K9L?yrJE9}^NICPlH8O?oll~N z66~hL>U@$tlt;_NStaUx(mj;BZc4JwC(}c@=B8+LK6xHW(`YwIuk-2WAsuv6@^n5$ z9?CX1rJv4cu!pkRO)1j(m^_q)B4yTKooa+gG1C{x7mF)oUu=Esre|#>MO8f|8@3W# z)m=V>Ji8=D(z==~SP~xZy&4`lxrQTo+)9*-QC%5|pFF_i)nwn2G{$*-Nir+=*J4vz z(NHsh`w7Kmz&Stz;22;(U^igfYVyo$MInsS>0*+lfKWg>+4$NN+441ve*YT`rzf44 zMu)FN6Do|p5(yEm0dSWP<I)7l@-<|_(sWi-FO{b-4vd|Lq6lCDi~^JatbiE+#~N~T zX;G+qj13@c1MCMht*Ona?!n8fIF9H&1SE9R9x{DdV%MMau&=;(5cgT@)?y-Ii^-N{ z<(*lgvA!bFx|H!3kuYZlK8VV9J|lB(d=iHDCM%p-NPC@fMhEt)b-A<!x0p>{Jw~W= zG9N^Wm-mrOJxUfV&y?KXKz1+B0Pof1S;*Y`*L2qu9fH}gghhBw-(X$+Od~#@=%5M@ z)z;^fOJb@~9>#<)m90yWlEl&JG%O}lYEosJ5Y`~O-inBOF?q8lF=8j1=jONZT)*$Z zhMUqM9<&|gA8`@VV<6Yli+ji~HR;HshQF?6lY2cnEtG{;c(oq1n7I+Bp$Q{d*Zq~p zmAJmfXIYCMz%4mdi^=P+TQGI{`c7Q(SFMN%n7ID2Y59^vEBf=n#I+(vc4ggT>YR0? zV5NgEBxhIV1O|ZYxw!6mlLW5{58b#BlOKm%uy|ua@<vL1NzSVHiIHqREMffkBZ@pT z4L}sauF`RiN6oW7H`9k7p-z{-$%M)<Klj~$z5>G2FGey@vmi_UFFNVJ=pp2jRjGV$ z^8KoKe%I>N$<dlmnRk-0Tp~Wb5Tk^{xA1ui%_YXwIU(;Ops3c+6(~H9pdilGgH(e+ za3pe6{<Pss1PpiLd^xVIpT;}U7E~drz{@f--4QBAZDu->1dwRS?d>Fu#35uVAPM-M zYzlc%@>3+aLelwZ<PqtP_%&&bf`!lKHEBpgE?6@PzhAF;mj94w*JdImS-$orC<SkP zgWrTVFEG~DH~XNHxXz5<+3N~phplAOWmz<80a`6@$#os*_akg$+)DDzx@1`wks+2b zM3GMG6H~6+#Jp~>wTYQg3Ywo>u)am4!XEYX3U+}GJ?N>vjnXrQkyNfvldS5-k}afe zePCez)3yOC$PIH;+8$8c_o{EYv3pfTu$8irhJo%GDTKDYPNLt6>+&_i1CQjnKwGYn zV^gA1eZ3vcyzQSQ^n4-&F#C4;we|BwCN`^tnivgNfMoL8TX8ZL*EoxBRZD5v>+Dt+ zbOi+W=o}lJbbebMx%N1YoC2xC=(2Zq<o2`&IX~Z<ODm6)v2VZJEfVUWRvgmRRU01X zUo5D4Y@lN~`Sa}*ylsft&?4EluU5J-Qo^q#-QJ0nY&b%4-$|6rJyQGZJH2>*G+D8E zHNS}@ZJFNbCKiNks)6L;_#|>{%OT0OEyVsV97yeZ@BYQ}(`&bEOW-9@wdC~nF_L5Z zkb4<`^`5$;M)JeQq<u$%-xw>FV*6%NIXRpt-z&;ITFzDt9x-QE9+$l{kDChk6wu}U zJT49}39uGzh{(zI_r}HR7d=+3v(fo=%Rn*Z&@l|(&O&*2N#f3M?2YW5nJH2)EKV}G z*?nl-jp}jDxbCn#Wi0&?o0r+c;e}-B&iK#|Jk^ybJ*y^=Pj;GRy`K`)B=-FT$r}ra z;r)2Yk+;aW_xq)7tbELm3v_)|9q#Hr;$6Kfef3EbO(9M17xcch8@u`F%bvGsz0LGX zM!<AzNoLvz{8YP-xkg{L5O;lgi0F1D_ISh1;j)2y*?({^_#fOalEvUI{txc_|KL{r z5AGCle%CnMJ~BSY<v%0SJ}^OD$3A#l^3yzG+5J49Tzh8sRX%ahv#_4G=Ze;Y6r7m7 zTaK#7diszPd-n1HwbMTu<<0*|nra{N!$`w^mW{i=-|o$Okr$3kNt-yrv$pRZ%j4et z1g9;a&(S>Y6~JqN^MHn9<gX)RV$+%1^sA2b#rI<^;q>~8k5x;5c#+Kbq$1bN;E?&^ z9b8Ko9r=`D98g@8n_N%*pJ$~bX+GN6N~e;jqcLG|o|@!o=j|_$0Y_7O)0weE5!=xW zekj>|Gy+C@_-J<c*k;TMAN^f>0lQDQjxVDs{E-FrIhLyQyfBHh7O}1+U~m-Q67QNf zDgx*aEW*OMC}mlaXK^rrwrV+<aIAkEi##pD^}f~PIvfk6oIS?6ISB8)3t66UA*nx> zoXAFPD}-B!L56{i<2aMo;&9Ht>a;gw{2=0YJR6zN+~e~*g*}gnLH026>G66#lT@E5 zjGkWc_zVjH<~-Aa@3bS?jgcpJPV@+!i?a;J1x-ey=UcWYl6o>h_SaPOOJbiiZk$Sn zoh)W<dGkpF-a*_vnG^O8_Cz?Xodx)mXF&iC`bd0?)~ha#my<zt3VfWrpl))=YzIbc zMHa{20h^IlbfPblKkIS>{ju#mw)5e0B>z+t|0@}DsvxfGPM6F5K~&~@FtfTjrF6p% zhS-MCSIO~H-FP+m?o>3M_Q9!Vc!3yBr>9>)QqSU5Z=x;O+$_DMdt@7pxPyfrwVt+8 z`utK-bGk>m5_W}k2yztPbJ17eY}}8G*H(ieMQd-jb!PomlbffNlJ6Ih(E4X28yAsL z^}RZUO@Vtw+rP-`^$ECW@2Ni@H43XyUC;rE5%kbxCXJonboXSkra|D}Bux$K2=)o6 zaQ)5_*3ch34vmjJfc7L|pY}`BU^@4&vajQd@IL`syPvKed5>j}d(h7(F=>IU)0dNK z7k=u;%MML^qO{Sq<V+-Axe%FL=tf!*PG1FTY4h2Pcv)iWQo&op@In<YGPh0;mls&s zY%$5nJ|DyvWq1=X5}Vqy2<>IyYI2QZ-=-l+?fC|DFlLUIfD&p!S5M|}n8UCaV?46^ z(8;7{lcDRf*RkCS@EwcHvh!7Y6m2XcbDCl#yUNHLP5lBt900+dms?xNPfY_QJuedB z>@0pC*>zU09D=CT^A+0R0=9avMBPJM0;Yjf>mSS!v$w@|Z)anRtc3m1MI`#O<p|H- z`)q>O^5(qSVeO54WjpSbzCJt`2}lLx0fqz00rLRs0Q&)-0h$3FfUdsa1*idqfboDC zfM5N2?gn5R@V$UzfXjfpfJcB}yxUF%<N(Y7n;&1vEd{a{PzSgOxB>VX&<5y?n{FyV z4$uRJ07e6>fLVZrfHi<^fKLGD09OG&1KI)pop>$@&=ZgYC<KfG%mge4yxXag=MDq; z0`M<DJD@9`AQ2!37y&~7WdIvsF<>)bAK)C|0f7B2$J71Kzf*CDpL6rLAJ*a0aTe$B z?7Sx|%~$fcnenKv>K+mH_&@AO(2TDTIF5Z{T>Jj{w3ymfnl6bmA^qtE((sagC6ewl z5}ND3FW8l`KVY5j?^}F{szv9YsDjC6b6o3^Ro%kL%9cp4jOaY#Y>D$4mFzATrMSxv z)7@ol4|kcGA(j%KOj2<-+IMU(+?;xmWq0GfmMPpV<8<URRQu$*tF_M%|6k&RZuZUN z9${Cr>Hg9;j|BY^)p>KlQ`+tVGX9rnN&Y}4OClS_WCXo2Jdg2<e-nr2krgdblBf}k zgO4WsuL%;zKSXvii?K^?FUC;OzHcwd<Mu3JXh1XWWVZnLBNKj!@;d*fI4OQ0`To}g zq!9Vv68J3A?Y9K48T;KWndI5u60&}868o@zTrc3ey`C=D0=9I`&>c&#c>dOrkAI8# z|9xxp|9`YKYNLLCo9|S>m)K_F-+<a5{vGE-E=q>%YoF0MHSbHL7yhqs=nY@~#ga;_ z66Y_^myedumDkAk%TLI^lJCm$S9DiQQoN*CsMw`As5qnOP;?RE1hvptun4n+8eyBz zD4Y|16}*&*N`rE+a<p=mvQoKLxk0%@xlegYc}96vd0W|{%vB9nEmF0p+Efv0gZd5i zHudM~4z*9VEW2B_A-g1dS!MQ~>=W5u8ntGT=1omcZLzjOJ6C&Odm!g%&iS1Fx}mzs zy6L)=dac1~c+oK5aLjPpaKq4IXg7EpLya-Uo<_B?ukktK9OG-o4aObDb4Hi3b8bYg zF1I2VPnO7WEcaO;|5|ue*{1wknV?EleXKgG%FjNKJyO$8TiKvJr~OuYN6Y7o$$3W? zq1WpN=*{|x`WN*!{Sy7_`t|x|{g3+J^gRr{4Jw1qu-9-<oJ5o1pN1=#$`6L04ZmYL zE`y&j$QWVlZp<`h8MBS~#(~C>#%GP?##zQ!jEgY6jmB-pS|es7=eRl)Gvx2ePgTmB z<$bftvzBI&tg~6)WO*q<72Oo+3XNi*VyNOB#m9<O!rQ`5;kNLL@Ic_8Y>RT9as^a; zQ>jqN)S>E3b$|6H^{47f>KoaCnkdZ#%@oaajkh*mJ4)M>^G!~OE>YK0r_tr>2I(qw z%XDjWAL<V2KGXfGOVN+kSH7VCR{ucX*RaoU$xvt<Zk%GAZCq`9%lNMGi17>Kug3ev zzm2}R0lCq+`B3RAxhrz7=W=-*SA?D2Q$ACEMqZtDD65ZRfMT?QDC!lTDZWzNP*BCM ziu;Pc6~01OAyP;bQU!&e5ekH%!bo9)ut<1AI3{!m5@lwkQlZo+hbYUH%ay*W9x6dK zP4$xMnCi6Zs_I+Sk1(BotJ+mAm5(|Q&l;ufrtYa$t99x;b)kBo+N2(-9-}T(Pghr| zoob?9r{1jIuKrNHPklt)q&~0yQhfvFCdrP@{wDjoY?}RBb{EYO&Fh*in%$a@HT9bF zn#wOUziQewowZR~L2J|&YR%fQ+84Atw0pH*!7lD=A8C8!e4cYNr!^-V#%R&mbn|qt z>Q?F+beD9uVJ}MkLj4~775%^TRNvL0F&JT}9~$->&KiP^(Z&MfIIR9V#tX*pjX}AJ zTutu8+-bS9bNA#P%B|1s$mJ?c_`fAE1|NAB`4D-je3E>ge4~7a{Ji{{{5$zC@}{ho zthTHug;ntpHe#LP3&kB#v57*OkSB}~Mhla$A>R_N3B94k6{^FkL+W5ngyt))OB<j| z(jC^_(Y<I$Hug6D%lNy|nrmmfqKxA-C<e+$$Sd#36<L$BmS;69W(iA$0m{2df0bOd zQuU7N6V(OHr&^0{gRTie&KuBU2FJ1211a(z@?P>Rxk|2;8{j);$sO{o^5gQ~<Plj1 z6t4@Lg&_EZG0IKK_m#EE2IU2Kf;81g)oRrS)qX5%kUCr4H9InUnr3dLW}oJm=5LKu z+g00L+gtl<jw{Dc7pUv1i_|TKweHaUQx~mI)vNS5`Z4-4Y{~ca2lc1*pXtBQ-_-jV zf(+4y24lb6k8)4vexLgXd%A@jXGP&FkCcy+zbr2nreptH5PlQ>5`vW~<tU|(DpDP% z?xQYNKdY{M4knVHU7Y=?XduBFi{{^&(^`Gbgq-%AAB=w*{c@vnGjfOJPJ-%Q%Uz%Q zLGJO~3+|y-avb6zEL{Zju`BCF*1fEU*uYYSOcAWmDkdoAD;6u3DwZo&D%L30DK;oJ zD}GkAD()%T;ZG+DFF@ZbDup$|I$?uwT=+?df-_B4rYkd*a-|ZxLa$tcU9nQRM!8N| zuX>>3)IRDEHJf{qT7kJg2fIJ0KC1px{XorU`(_7bcg;@BUX;Bodu4W{CQXyAF=`4m z2Q^1E4Vo`CZJLJ~P8*?}uC;4l(Gu-J?I~@ew(@K34ec-5KeZ3FJ~;t7VL355DLK7! zv^mC{{yD>P#^$`4^D*pyD172r{XYF+eS`j?KE&{@frmG1Fz(O&mZ^L*$Mr*@mT${i zpco=l3TuV0gg=GVSo^!ma#+(|)ooR)YL~h=ds_CY>_6eC1kE7bGx{$p4dKRTjFXsK z+>8HejiOM#50?Ik;)LQf4xY~x|5OYW#tL?UU=8mJi<CbqrK%)VU)2=V9@Tx-7WHnm z2`9*G?Zlk9Ih%6cGqf;SbsV=8MXG$Ne69SDe6u1z6{HGLMW|v_399a@G?h*@L^VOR zSXFsa^@ZwZmA`r%T*xN%C+eTnq1g$rhcVfcv$tg*%>H+FgeF-tShE0*qd{|6dtCd^ zoVz)5bz5{-bU*6e(|@Qx0&~2g4>cqj`rv%HZ7{%#F2Yf?4YtBv6x}_SChsk;mLHUR zXPwXbMWGaOgki#XVZN|78As+t)mN(Ps_)_H)M^$r6=CM1vc_hWVPjgeY*{bAx&2vQ zH|n0T-MBP&XKqt2h0b|VSyFkryhuJ-Zj~>U@02&msa%>BnkCQDXN}5wIcr_kwye6W q<}6N;sK`@{Rg@{_;Y8mIH`1)&;Fa@)GU0jQkbWZH;D_iT_<sTYcpjbr diff --git a/pipenv/patched/notpip/_vendor/distlib/t64.exe b/pipenv/patched/notpip/_vendor/distlib/t64.exe index 9da9b40de922fb203df6b9a1d0ad4139af536850..039ce441b721ee180d373e5590289a6aa9249a51 100644 GIT binary patch delta 26052 zcmeHvd0bW17w<U-E+AaEpj_tRf`BNf2&f>Spr{80CFfZbB^xEZn5G4GrKICEa$A_0 znr3N}rlvS!4q&2^LupQwR>#mnbHE(V`>t~i*zdjHd;h$@-ski9xc#oZ_S)0h)7rzi zC99oEK6WafKmL{Sht~ohZ)|q23@_Ly!+-0I;TvmI9JcX874tUM1MBl&*mxayEl}CG zRpsaE@>6uoS8>QQ`O~fizH=!c+|Og}I8Og-<ia^O!Ik6c!T3sJHpR05zLSHomS+R` zuDs0V^X&|lzd0_5-4tRCjU2a0cqc5Nbtmi8*;Yh(?Gpa;gsW$+LWqY?caRX`lHjhg zZbCagh)ux%aqJEJpUS?%|BJW$AiTr}MLxxG@@>S;wr57fiFPqhw43W5f;rQV<vXSd z-?U~QIridR*w2oZ;(-W2_|@J#ASbDB!qx1usY!B#k>fI?+R`e7#X-chbiZ8pSW!d? zGDufQI4N%pLR9$WRe>6jo!^*Y-}?|^rAmfLeAqXkO8QZ@Lo8d3HZOwT>&R2WKW0hC zGNr?E*J59e11(B*@^5PLXT{PgYbc6RPuX}=fd3*SpRZ)aJQ5uqTzeB5*ah7<N=N0R z9$Lo873z6VisCgs$=IxCoGBg1l71AW<8rkR$seagXCpUMuo(55@c>-ZR61*VcfWl7 z5o(F-Md?&Vr8hE@mrH8?6fx#8iXgx0#&K#zq`r-+LK3PR`d$}fno!%1pbM&PwsaVo zqH6L`WQv@SQ5P*E%H<8@l4083H;*ewF?0jJ$dDub+E75r*(+2Po<vPfLxu9-|0PMj zCCSVG{JZR3km?}&xELiT<>HJ0({{s!f7shje$h#CPzd#haS`cR(!nfgY+RN!6XaYm zOS+OVF=Il;_>6Jm57e^bPC>0kpy!JAj=OrZzn#L8)?eVb(kjz}1<=Gkv>n&*YPQ|b zo3gbJi<l<bvm-|79e}(NywX|OqczKRZr3SGja(FvaD@6|9#nAeJ`}oynxV9+=x~EU zO;U{|SFw+r<9VK)c8+oHiipx#h!_>KKQp>SxxbRdbEQYDE(s?U^CzsAOFOeiD7tLI zL9wiFp(x!?IGJ!nF)wFxT_X6YY_&^padi{2`r{u(nV-|h#ZmS9)@SS}M-_Iq!hfQ4 zR54%qiRxkA4N<8^-UH!;lT@X`OndKXa?W6m+i_P>l<Lfg!fA-3m|f3PgyHBcHS%)m zuy*q*EpiDW@3;eD#r!L!I?O)2hNOx#spXo~SdzM}N>!<O53eJkTuta*O{g1$M7#T^ zVD2Q9SNHvb11jbZS$V6pV&$A#=v)YAOVx;d{Jx@OO5;fV!y-nZVwq*ZJNxs9aGONY zCV7fEnX;qiv3-Xk9@PUzQ7h&u=2Kdl5n2IWMS>6oe<t{~wY6f|xCj@q?D+^!v5a}X z>}bgtmN7!KUAKtRA5ei0qM;2G^BJfIQ%J^MR_GdB{DU0XhI&ttKV*KoN2MO<qL`PF z4ka4sMYUq-d8=EN!zEF=ckKkViru^)f*F;Ve;{;1KH<_ldcFt0v<fw*x#(aMspw@t z({_&nbPKNx=?A&8HJXd`g@hgSvHdF|A&UGi$=ePKy1bF(%ODR?S(GjnWF|*h{Y~55 zi2sttcNJ|11O&7#`dtqgr3P4C3Np*uQXow!;~uE<2+F;gDyS9^vk;B(;xJ@dVCxlL z)V{!$60Xewdmx1hP?-b@ZcfTpEl~N8&J{>xRGReqfA36%v|0*m#UbHV|0fAurG(y9 z6WVc4QBXuuL?l(<YcDlUu|K8WU;@juy+&T4ag9`R({_gfRQ*@6Y&^7#X+e~pj}S%a zuqat0az)!g3+9Az6Ay?I#@8scd?6Zsw%>3x`|ykr86!pe`XuxbasKtyD4!@9lmLA& zmX<qFPwo68$FV8K;Ns<u9G7r6(_Y|0?cFNd-m&I3CXu$;_Gv;xn+)kR73~^UD^$`W z%XUbhS&^f{U4bqtd+^W&tr;}BU6E)(a@txjr8+Cp>beYMgByr<K!~<Kpg((|1Eey_ z5=R_1Bz0)dbGh<F=m(Rx-TcR0wTbkZ{&@^-(Qc#EMd>k%b`LFv6c!P?c^?F+2|Gh1 zePMSIvsZ4+gKX&l3cTamBaX|IPGC}qR^ygsMpzW{lpipM99I|2uPK}SCQ9H>%m@W# z)_J+2eO^RvK{B*!MW;oZoR)t=8>7~(M6`WDa`z&+pxD-{ty<94ls~%pCPb$ORJHWE zCXWWJMguyl4T$a)rp8#Q$9V1EG3NZ27&PF~+$Y2`Yec$y8QoWn{~ndOn7VI0l{*Q^ zWJ~iR(xu}$szyGg^brt>fL&_mw7a|AAw^(uJuN>(Go#JsVQsjPM5cs24{F^>`jLao zNLphk=9kfi_MvSGGLnZygcNni!enc)kNHy`D?oku)JQL)f1t1ApzBGh4!^{FSy0wp z%s(L7-=$=cSPAmWu{&mJJuTb5ju;75S+*lemh>n~I%3Zi3T|!8k`(dY_u2LgaF3|| zhwL4Xj>S>P4Oumy0>}#aq?_J7jL{r`tg<o9C1pc9aoKiv9@R#(!x(_dvdiX0L}!;x ziLl7)u!PX`REjZNi-|B=p8F8gfBP#aFoiC_Xhwt-_xZj&SLD@3+`r$m;NE^kDH8rZ zAnrfndF@M<ZR%zmW5I0C`9N0woh>#+r4_=4p}7)$ESCn5yX>Qi_(7y$A7-acMd7kT zODxn$n>e9Wc@ev5@)6SfSu4*}kBU&3O_Xq%pWHT-4fTu=riQXPo+;twF+4}Bs(eyy zLSNYzqR3M*zXuMMmwoI9&(M%Q$V8vD1Cui&(yeCsG>i%q%IYS6$J%&B^T$}KS7=&> zh3Bk3Qb`RmD>XO-p~5cmrWlBpIbf!^6@c!M+{eWlC`x@irG=`P`-MP=wUT$RWnRy@ zp9^l$LP0R|Z2eN}{7&e7+fCGWEvD^f<)NJzYdtaK6%6m9mvc~rSH#jIMFH|Ts7u#h zK@jt7!-_wOMs^6Z$YT?+JfOs!e6tHEg{baGSCri6K?pN4Y2n-n;xv-3W0MVmolzGL ztf=xch>d*Q8$OqR3#3d)&+_>e5_4xmYq<}k<ijn@gen&9Z8m-5!E*=Fhthf}8KMWX ze%@ilhQT~17j`GD7|!*iGFBBu*mGTwS6Q$;2!TVjz}<SFb7cT>Y?zgB1dZGuDT-y@ zylDG2PPFZJ5tXBuM<m1B5N`MlMk8c;L8h-=bjXrwvhC)ssXPqrDsg|4!S-`#7W>UR zC2eC5jw?x9+73g?YLGLMR9*N`ezt?r*Ct_VsMSg>O6OpYtZ+w2%DEXmMZ3Y`#U_|L z2zjaOEpve1aTp2Bl0Vt*d%xPow%=3kl*)FS1ALEZ7KGjGuEzgvMpwjt$CKSK$96b@ z5}^EXq7u0baun6wv~z}^3GuIOra=1u3Y>e4_3`PG*vS(mNH_$O4_3zA90^PJEh-z@ z*GEqFf6~JbVSHShPLt24Y^P70zfVsL<NSK)xR?AJ!gh6lE-?~*Vh+AN`43oc-+sI| zEA#Ck`1`R#zA-_bX=-z~Mz0X1AE*Na!04CGHW=manbGfS{ztagZ-D!0j3~XoP4Z!8 z|2}*o8|`o753-B?GveD1fHfo?&c>QLElc_VL+OD?$!gZLtx2&yhZc#Eug!u%NVCZl zHa|dgK9GuXdze{WKvC=dL@)7f$tBdAjSO66{0(-Mp6^U11jY64hlDA?4z{Mp)<`iQ zR^@l%{`&^Z$NLg_&h|^F_}s}+Sq|PcXvpzq^b#)+!$61<=10TwL}m@@l-lD-y45gt zY{!+-yG0$SXwM@9>1>WvOPxzIGf&8aQ9+Cq=||`%lwAty-FBECkAZr`wB1o`h*FKQ zY|QZKG;rfGrTvQ8c9``Ejx#=k)b$G7WyQhO{_nv$(HU()+G><*W2pTC*u;<kzJa|G z(pH!r&sK%x@Qv(7$Phk~rH3Z)H<>jwMtGqM`zUmT@mH)%dKxpBgXJlHAscRK&#z>~ zmPGeMeQ2?&R-0iRtFq*{pTO=>7a!-r0>VZXFYiLDixk`zvymv>G%c{9o4`aZN)VHS zA7kRO7Rhf82dm6rf$HQw!>Bpuw325GC8B5~;{Hza8?4%aV(DFLKQa-M5r`#5l-$r! ztqF2_@~2eK@&~8DmO3Y`$fR{ML7vNQg$)V+xHHF<C!EK!Q+n6*ZWR)4BQJ1&(z`v~ zSwVQ`XP+B_PK|YTCl)h|?OM6VpXf$qPBfKx%k2?CpQS=TN`4IiInp7l)wJKpMhr;# zz}m4oWS6zu0V|G^1<Z!hfd(^K`<A{5N6^%?t&&eNB|MDzt+jZrz^vE2ibR8E2ZzO) z@`xFZ1RB0gsA{NYp>4aw=V5?h)JRS81kxA!$7ktCyhfHZl^JX&l!i8<G%%WU;wo>w z&R%aj&iihoqLh<#SBwZ!*-=tS+X$a$k1LARG2whel9+FJA0G7H5yx3<M3-V;WRJRp z+tM1ncuv4f*0C+-cdP`Qe8AE?)3?H!B0rCrSra{^iwYzP>ywk0wTNCqRC)MVw1J$D zU^KAn4n*YSFo-B-wigvDGdOla2S^#i6_aNgl9RiS)`eY2_zqT4Ibwv)q8+FF3s~xy zkn>O)tR~J-$T2XpCBtJMEJ0-k1t3c1dVu8q1OAuCUn5PBjfs9p5vEYWFXO)kU#ZD9 zuQ(zd3oQ7f!K#&~<NtD+w8p`nw;ha|ctA5x?u|jIX<DC!T_tVkvaz9?e|;g!1yxQ$ z`4#iud#I@l*{x9qWH?G)M}}eFPTNL#QYF=!><}AFS<adv!U9g>*iVrG#YV)ZFiSRV z8?z-urb!|XoyUY|4dJ4+AVSTn%y5=u?Yh@ih0TRwDzPNPUNsd%XPV_9Pg`{a#(`UH za00WHBUe>z;6P!RQiO|&R;2OM;5nyz3KjF?-I(1Cg%H%#y&RRN4a6nzQQOdc)Q(5s zv84@w5Le`?$<2Tq453|gM1Z_7QgEcOo7F|_=++Nb5@!uT>@-kvG$hf09y_&?98=6u zU`qoXGb)LbC7sD~I3d5z_O)y8REhLt+m=7u8F}H)pe+xhKj2Nt;AK=gsMR<no7g8E z;)m0*Rz{^k<sFq5|Bhp{sv_Ejx|(+W)w*F2*7O@YNrd_r4UOlXh<HN8t04v=zo;TK zo=HSx9b?ix+f1a=no7^%d`K}Ia%Hc^3=91{l4cnNHmxNkExbGCZToeKm46S)ycNY7 zW1_pwi%=sj*rX<B-VsGD<9!S3ffKrAwiNs+I9Zb8-*?mfY*r%<))4d@8x-q1RU+e1 zs=>h>l6#RNmKmF9un(sNf9Gn<)rvV}JEdO~0vl|gnzr%2{N5Ec<rGTU7V&^<h(H1z zwa_MttC-*1Mq?(}FN%E^8_$=pN3j9I4iDzvv8`ad&w6(B_dkO<);1-g5Y3$(VNE!h zQMnXVkT<tyg&hO^+|~H&T+meNb_kOv`Z}6xZO3H3H@n!eTk;FvP?wRK6!Tb6l!~aB zMUW`2XQ@(Tv^`KTevitHn1w~jUXT5Um{2PhvFuLWe51Ca3;AkYh(;Yum{VXe{$(Gw zv6Hh`08F6_>0A~~aknt=wzBF@(}XVfSeMRX!78jvG>5l>oz$lZqsLLSRe4e}2h^QL z@xQ{`HHUkv;kuD|P9FO&8HW}bSUkNE0CTY69=qJR9TVcBgvVd94sny?)3+ekGDC(N zS_(b8J$f`asZKKk&^R&Rs>jJ0J5Wz`yTEN>d*kBydF*CfwEr$wo`dm<WlS;qZq|z5 z(UvyWi^X@D*j7SM>zi<1w7=?^VISy(9sYsLvhl|3vhj`ue}698(*o|Ytz9MscfL(J z#EC;`A!$>(DP=iQ``{ev<F8qKd@jG7+2T`$pU1KN@g0SoV*qWKLxP1bWf2K$g*!j7 zBMDuF$4yHTyCfgIhw<8*R^q@$#SKfH-LUX)N;MmY;wd#)CN7>QtIMzkf@0a5n5GbD zSbC2=o7gYJ@q(h*A|svALu?No7e!VZd_dFCR?L1I+401Xkn2v(jNiUOBb~NAcvyt< zYD|LFhB_za+BI2#dD6FQ2jRoJ?4_=mox4MfjKAL~C)&wKCvc{rGY_fEK|$12dE_G} zv#VWq2)zt!RkvV|JPWnjuov)OaOxfQUAF^#XSTR|8LS|`q{!m8|HARN(yGWylonGx zvQlcPem{~qu#y;!io@MEYCs64(tiMV4H9(Cn@VrMvlr$8jxOXoezbJ-aV#_BJ76yB z<5Fgrpc2M1!)PLYgrRtPhoY3B?6!XlubRdD?@Ze>2eREs!$Pm$qHJ8K4DR)ywKBM> zWe7fTi*-)+Y4hfv#J^yrU5<R|2St(1Y*cbLbEH3NT;#ojrYE@-7N}~2$)BxF9xXWj z%^oMOz*e5E=rL6Y{D(QDbPyfRP{T>sv~e;UN6jX|>_oRcV)d~lV}C9GeENxmL6e|) zUOw_K{+`pUC?&l3eT^j|cwY$=A;w@=ukmszQFv!*-Z)C(S&be=L3TsZO#nJmfNcjI zOi<QkBf*j+5|GnK*gkjEUYv=M_@hQZa{oqaYf&<(hL6n&<uyPJL8<=2gHx<~YKU<D z6dReEAte6ImZ!!E@r|rDwTsaCHd9jLEptE7c2hWUklgR0KilprR(Hj`3w)dV)xX(O zY2m^%Rjeqjvk?6U`y{Qmkoh<JBdv#L-N$HTcUMHAN$uNpEU{-;_seJ~OxlWh<vJpc zV&MDWW~UKNpqSSX&xRA*GKU6ddDHhe96wK8_VsmaQO{w;xnRJ66R92sxwqyUrTG#y z-!#n^rTGdqpP%NNt@&J4pQLJDP3V4|+H|Wmja87brH<C0!QZF$)Dv=WYs{q+dj)t6 zm8sbJh<1|K*Ri*H#k$Qxt5^e|Z}&#FzgJs6oBi4=*2N79C^H1#Wd7-v&<E5KRPmi_ zX;4~2aPE&jX`~)#-n*8KO7F}2vX$wgu<|^U)06q>EIcDQ_~v?=0FJIfWJIE4?Gxar zcEVR{*zAl*;qq#>Dx-T;w+5=kQMucA3O*`F>9`Yi1N@k*En~0Q*~5(a#XYHKO6RP8 zuq_fm%bj|uMRK7Fig055N|Ej6y_Bup{cV8!8p??!VhsGnY4CrhiUjAYB56|q4LG)N zpi&A-9c%~}G3D}%8=@`kM-#H68#edjpyd7D*cwC1M8^J+3>=#&=4~eC*(am3aG$1# z1vDE~8~$v92W^tMF!Bn>((|U$)6_(gdm@ri%-vSAl0H5z9;(N0HCx%IXYAQkByd*F zq@&_8X9Z_y?z?d+0S#**@PDlOzgopyGP{f&_>o$}Sb)48O@T^_8Y`JtAA?oixAdyj zw8I5?%bu|O;Vp`)HeAFc&V#1~+{0cy1D41EoXG(d^B*6wrJ2Qo|5wc1x3BO=Jsa0I zDtP$>jF~3Xum~~jgB;{CFrZX-TiVKcwyLjR@w&T6XSX-ZG|YUXHd1Yl(aG&atPTlB z$fEK_RGevlMS|)%bwOFz4bTq^K*jvS3UG1m`#TctfD~w*(C!3RAOR@d{A80w$FUYU zNrrJwmxdENAv3d5CEA)&XZ0t}AaIbz2Vb#){enkb#iaJ%5~znsSPQXSWTo!)j>%nA z(V`3-W9pnxje`+lzVIa#+|xpK-7)#`XjawFw_6z{@;?{N`fsLI+DXLHZ?k^>fVuZi z7xoQdgZuk?)uQI2v?wB70KzKDWOMp=44j30p>V734l;b@r?p}P+tTJ;V*C4t`+tO` zNk1o}Qys~@91IN657@*0eFXOvEJJMPnFy;*G;nkpbF9qW!-dTjI|tsu;*7H}+-429 zrv+ltm~lvQKj_T96;p&yotYx~cuyxW`$CG0{^)R~%sk$iwa?0I?W@MGc1bxkZ()Oz z6zuHGinC%omoHbf+*i#13*+Ug6Wf{9N!a~9`y;EZ@U{~(4M_4|w~T5kqn#9M4h=U^ zoeh3_pXCkcESTSC?+i$AsWMQfpYcAc9nh;|DY|OH4OllBwpz>)$K~tQG*_tiVbR!+ zM@ZOB#J-RErH#4Ak_Hw9TtV-e6;Az_a?7-@lA*uMl;ERxS@pot9aBe95!CpdX+z&3 z*)U#<aa=KnsxemTG5X$RIoWv~ZU1YGx;ttfh_R%C@@S4xd58U!os)M4#~rUu74!dw ziL?Q=5pAiBoHbXJjE#3QVETK?1F@Ww5}cH3Y$+ma54`7ff$%&MhLmQw|Mngg)%|IW zc~jo}t!nF!G_W^wMw_C2D0HYN9n;jv9oSzvg)SS+P(+Qqggrf|U+cjbZYFyPI!|`~ z6(k<&$o38Lah-TrJvA^W4(#%v-o{<9RR&1jzhK&3$T|)lD@6Rl-WqH%-J-?LZmbc@ zOp5siV><?q9Ju0VB##YGj97ZZ8fcsA!duN435q#pDV61ztn5SM*xI7P2RudPc+qLm z9y}e9u<|w03g%`@;ftIU^8?1l=C<?fKtT>iP~#fd{JWR1CAr~3Xan1p8!3EW&o1VM z@h>tZw?p^2OVqi>mllTydtvs(UxL}Ao+>BYZE(*j8~2E7aFXj0gaZ-x+x0XO{(Oh! zJryfh>e=F_ItdHkVSAtI;S*s%me`S)<!3Ka5>O&1-(wy_@_9ElW5|2PDVS`s?frOk zVFU9X+QHHZ3|msTsq{?}wZK>A*YKL`3j#8`$Fd1SliYrQbqt}R1i6-dG}P?B4gPAp zY{AOnCT~zZYEIqQiJ`0c99B3iV#)9<;mx<%z~L!uPPvi2F?=DvmzhQc@@rVkh~6<< z_K+&8V11vpddTZEUmko#ZWsmjsj5O$)3763HX@$?iX9yh8EeEGfD>yvxE7x~GD>tX ze%s)TVt)J+%-9SUi`6&bq;wqj^@4ne1&wUaCv53AavE<8f#s<y<MS;$N5%2RUtl)s zdN{@Ij_wft1sJLx7LguS!dK>Zt%T?$WMSIf-(<bV^yrbRi4`Tv{Xk`wSsFUvOsG9B zS3BdhwJ2Eb40elZ!c~u|nvHDRm>$LpFaq_0Ji{K2N#e^{*RfND$#>X_vEjm>XIRzP zmC1`wVMS5OlH{o%syR9?x^@UwV}eq8#I*2ZP$`DVFxgGEWuS3Eku5dhdaD0>tYTb; zRxyZ#xQ?gTcjKP*_roz@wswUPC`%_PJuH-*yqe{V_iJ?pvo2y5zRO-5e?^%41{*p7 zS3yhJoC&*wDoWKha8=Ca1u9jL{OSekB&;WP#-5s(<@ePX+!(w%$`zdM3@37?24|45 zjBTD+&cDU_<jv;oY<FI}4x?YkBqG&g;WOiE6dsIW^9iW?sB-N)teGhXr1ABz(7u0% zxlcM|{09<{^-#>S=d(+bd<40i-JMhz{tJxhovtW6a(T8K*MP~x&su#NjLp0e<!s^P zLG5;aiIg8a&M;+O!1?89#r#4Etu%(BK>2)avp(fU_GofI_#H5=#Xf?%s~Q62YWSeJ z)lZ~4*0SXMjHnslB%H^FIjbe;8f;N6;XF0ShoDl@`rTlw^UXs48*F=i2VXZz%1^#} zw7Iz(M#2Pn#G3NM6E~|&^$@TcWSJlIfWYUTgJKS#7!;1Uu=w=Gm;H{iK~tiQBUFys zo360erzA&tfzf;d`ypO(AbKiYfVCD=PR(#vuPT3#ce9_T1n}Rphf`8U{XBwHYg&L= zos%zo)iMN&9HD?V@^>0X9fmKfXIF|L{(FwQ*T>I@b}GuxwnuKhj4qeolx0hO$Q8xx zpm%BVUiS9XR|E%{x#5cLY?Y-=`dinuQ|$D&jwwgwb0SQ1xBU~?mjyn-ptF=iwq8n6 zYg<Z(A}>|&%-6kSk)-Mi1_&?iW3f;734ebBX2R?mMd=HBysYnPI(v_mQ^3$iPz`(M z=?*bw2@f7hu|2^^I4ZM;G+51bM_|gNR9mCz3Z!jzeieH8^|#n>PlpLd-eRq$B^VcA zq1DszLOQ-|;<QP89os)G0Cx%(rZu#hb5u+59ai^DKOz1o^Lw^C-<9P)TP?KzjxFCZ z$+>5i?XZh{earmkp5}#7m)LXP1O+-_e(s0NDJa9V_o0}77iF(h%-3eIuou#!oe`#( zKbnQK?ZZn@Vx=yl<l)mr0hqLX5chHoh0mYGT(<=|?uvy*I&6E{u?deBSj3Cng+2G! zrEOh=yHl9g_87M7#i6__E8p(RPhjn)TZA$j%bFf5d;kNwGMv3XeH;G`%PsVCfK|^P zSGIFp0%FU1g-*ON0!xWr`>)yhm!gcHU?w2DrN|SP%|E>q*zwd1bjK#*;FV6lmoU={ z303G@tuT21!gSqGi^7YzmVmM^vG5s#atgt<SvXk)>a*6{XqD0<vuCEH4ZOs0GoKTs z%h}RjS^FR1E~;xTShg#;i#k7Qpyc?cX>Ycli|uFL+82W%mhG7lmh{W+=9J`nAd$E> zRF<Q#=<9*<&l*?W@kFp6NcPn(9~L{ai!op2t0Sh7Ju@?cpUW1{eBO2CM0JK~C+}g7 z*71ISRikgy78IApLwB_(rk>5Qt`&w=vyh?)^Pew3Klx2iz--)rskbchvutEhxYu@s zfZc<R1=F!#-p<}C>fY+Waty~q4L{VfKL-Rj#o;cfvWopt)YESnmIS&0Dl-f^j3g0r zHv%VDvGiGyeq$-H%$c~e)!};v2Bcj21$%W?KtQW1$fJw@`aY)gvdDk-VJ0kVS8YhU zz`mZ9;8V991~_`q(nA=LOp{r7i2XCmBJ6#E1-;xk;to~@?K<0vQFjPeb(DH%e@YgQ zu3%E;zrd!y+|hIv(}Zfoj)q;;{~Y`LWlQk}@YI8G+Qh9Ofs_N*5HwXe(R4UI6B8Kb zZ7dj1(NuO89R^1P^1<OKr*s+V(;Z|6wYc5<<5`Mi{4NKL-}`W;zY8{zRwF8R9JRoG zA9Xgmwuqz6F`gcku>rIFa$ciA^so^?y@$=$df3#I;L%8O=6F$hAWDCW`yZo+MNR<A z_B(o*G79I?x6#9r)E?Fy8TDWrX9sq=yRA7T+(Ci64V96q(X$>QfU-K%A|OxR#ymHA zGvk~98g6rj@V(e8a{`5gXIaIZw)|IHcFj4;KR5qJ6bOen12DkQt`0cHPf%#)3Zz@i ztq4p_!!8_mGDqdFLZAfuI0ybMhXqwdzuWusJ6#|k=GVx7V|mni$@^8{pg%wz!Qbq} zikqUE+DGL=c5|+;ZzTz8J!a;U9^=76=lKWaL7ei*TnZUmWG=jD)^^yyFaH5u#*aNe z&&MZzv({~XLwAzzU>%3#xqWQeykdS5^LzCIzkuC&by4>>XoHV>4aWGuA?<4P-R8Gg z=tqYQOE88oaU~T{mcer%EtaMIw1-u`Hd5$P%e;!?JVN&%j&d}i3NI2(d)VOOL}B<; zR$4sV(*x$7m@lK2Pl+(IN3uVPW1>$@QM;2H?TYfT+-FI*)jqlwe0pGk0QA^*r?9k= z*jATeqM%r37qHJtqFTF*RG%Z-gSTt!O~mdj>|4jqmjw4&kF}y<77bV29yq9}p`$70 z#(XkU&SoW?SIpV}qU%7DbW(HR_I#EyKPD<3>15qIm0cDb2TFT6r}oxbTjWzGppFCd z0*Eb~zeLzv$bw#fkMGI0yx!OE>K9~Y*xVF(C#dH2yWbbg6K~eWRI-P*VQr@j#QBs1 z&AwrB$>+`TBS2Mien?s#_&J-opsO$?kA1dasE;WR1t{xVug>5-#C&=Mcm60FQyLiS zGXXuiMy~!0i|On5PfEJ>fg=?cBX)o`4-V8KEL9_foWSXkRD(hO8l*)jr(Q9i1`Azy zgB<f2W2KSCas82inn{PF>|p7n32!!_h<$6Qy#DfMShwdV*-%Prj^rlyxd|KVugA#Q z4NKl>?M|$q?A6oq9?W)_FnkMaubvJl@)Yxj*FfHP2$^e&^`PXY-Z{%1w%Fcq<6D0g zk4(`;YMc4*v(<0XP~7!q52v^PLiN6758e!eMIF2_n4iH?7DoG=>7vFlE%;l_UrVow z%~|-fAdF$-%3{42jD-Z1yQ^Z}ggaZPY7$#f7S21dePv1f4%So_>9K0vlXi+($70@k z!EYD_0PRF*7a}=+TQbGP<*{UvFS1Q<tr2>Ez=lb)g}jmMxRlLLW`T?R`3Tl^QKE2Q z1bc2#q!2NREm_o0xG<9av?!G?V7~TX60mm_28?9S+j}PTfUOIw-9E5!e)7eO>c~6r zI*q)+PZ*oQphucHE@HwlIgVYl_Y!bz6|#7laH)b-E>03shp}6W`v>;HmRO9b$5ey) z6!YxY)eg0_7z1r&0~_{s41a@_ydBh|e5ksPnekvfFB4Bu@Z_Ba*iEppT>ZNwJ4=u; zX0RL_=NyxhirFQ5EECFG3ymdPe98|x6n`?0dd@&k`9qEHlb33QMV2%oT7F$4;^a9h z;U>SNG1BE}8X?LPH6m9Yp%J6x9F546Gc{tGoT3p)a=b<q%I)VBtGUmXEh<YYq4DCa z(F=)o(dcrbAI>FVX&KRt8oiq6s~WwY=(8HViRfC5t|YojqpOJCs?o6CIJtPe=BXpk z3XR4}#L49vT~G7^jh2a?t<jA{Kd;eEMCWNV9cIeIG#Uq9u&^|m&Whz!jrJotPNR!) zf`{h@nkSlQKaGwf+NjY<L~|OQPW0_pR85FP%Nm_a^aYJBB)U$cXA^x;qf3ac)aZpo zfANZ{f2o{2tF?e-L@(9oX+$s7==DU;)96h^7ix4R(Ni?Kis(@qT|;!XM%NLYuF<QB zPSohLd@(%Hnx~!uf;3tt+Eb$&iFVZJCZe0>sI}(=(APEEh3I;X_9XhWM*9(6qtO<k zcWZPs(VIXQqeI1!=VL7(iRfh-O^2ZJB8?V_F45>*qOBS|is)$?ok#RIjh;qyu0|IU zovG2YiB8h!63`>$Sk1GL0xTL`PPDg1FC*GTqgNCCaJE|e^+Y#n^d_ROYIG&hXW52| zklb}2Vp=!Io7I4UxJYpSK;x{|IG*6Vqj45%oKfIZYn*u+2m4F+JsRg(=DRGcI9u~% zXu%c)_trR_HBK}*Ng5|u<KW!Ly_3dq(>O`sL};A*FH>y>rh^lpaemi0A~>EJ=Zwag z22RMc8tZ_@Dg?`0<9w}gW`pCZaaO4ubxHyA(F#==Z)r@LTfl78m_-_sCK)iVXv{no z_I`xN%kMo|P8{B2L*I`PdcL=%<oz^<;+Qp5^o2BY(1D_(@_hp*59.mI<0*E=|h z!ZjIgrrOXdTxJ-yp*fUZ^0?=ax6FNz27@)o1&}p@?LiuGl*mi{=<bjnci}O=HNtPC z`kJ4me%PyD#*&sT(D8S$(MyLVeFzgFBJTxUTZF=a^{vL*1!%xi@5s+I|61}tFMrJT ze`pbWC$TFZruoHcL6HFU8y}nr=PBmneOc1VAos1GzyQTbwF}+{O=FW*4)I9N!;<qo z9wp(0Y?nN?e`Q-=7qA*m$@Z171IrvEAnpwD<Ltr8X-Qt6qH?IRirS+G)sD6g@RQSE zVZqeFo<%Xglu4Q~JiA4Ov0K;&tGZc2p>R4=Z40hqj)Vvvy~FRO`puc_=Bg+@i}|f? zD;U0DX{!VJUxN`wc1WtYUvUuaL3p(RQ|j$LYN`u2(^U%&&Wlw)!s#QI8Cqbz8hDBJ zMe;<pa&>>t?(ae)bfksXe~P(PA9iPT$Kn~K@YBnO%m^_<lCz~-+0r#ReFdFC;pt%? z)!1w@m7bw^cK1LSc#3%sWucz)nHDfiXxL-j8c!^gji)d=_IZT@i1Tu9Ria9jI1Z)! z`7tGOme|E4Fj5m(*dp+QDo{ym3h%Cki!I@M)$nD_;l3cU>8ZNm8w8NL!cih#6IaZI z&2oQbAhgV|5JC-}*lZSm%*Cs>m4;bh;aShi6deaSle1GUTd})C-Ulg;<UXE4q(;TO zHG>UVlO^6<OOhw7iHdmwNg&Vmxb0N$LU6??xu4Yq+9>8UU4RalH5ti$1A-NE+YEMP zO*rq&oYuyBELikppn5K1{nm;?r$wxM?LhaRr6(aTFnR49ek_~(ad?<z2`UmM8T^1{ z?ybq!Ms5pX{jg;dTlaDM;<sMJuzFT3Jz3-|fASpYaiZ;?`j`8lkSREtqty@PKQEdx z&*F3};bb=5{`#k@^+mmkrY9SYeu$|jmv(plYt`$+7}55ie%2o1ID;d<^%iAnL`uy& zX}mm6Qn%Bw2jmwJst$_lX{ya@h95WkXXIhc0jIQphiuNeZi4$;?CW(t{QInCT?{{m z-CoztIAkMIfKe$K@=92@Pdc`rQ>G|kN%9bYUfIhF)xG?+{Z7=B_+sV?4tXbk#uj~& z<2SJl@;xd?<3@*i=63=rhc^)$*v(Jc#9XaV`#PS*jKgGDMcS|%PGRCo9_>9qGS+8F zMw4mj0mZDOvCg0R3m4MZfKTIu85`KVPvh|}Xyd2tc?&!DsYOtH*@I7e37@61)b)YE z@2c~V5l`UphLK*~<WWj}rGtvObt<#2?=EDlXJ4)l6Sk$Wy7d$IS6Jr_;rvK8WJ7!5 zrBB(b8zTMJB%|DCuU$uZaXgWItwAez!l!K4hBx@nS;XgUT|;|lmFldpr#=s0WuGk< zj;v#mpN9)+$t>&h<HE5d=Kn=Z@Orfe8~4*Yp*$)Rt_zJWnK6ek5hbxnU$hbGy0KYb zbj|a!kxpAd4{{VpTufBnrxjp+H!4iS5vqk4hPQiaB6wx4-Z-!At`%d;LzwIHB1Alx z{TM2DL<-ktBR*w|Pm$cOtYvOr1{f=_ov7x5D3dQ(moJ0(eQeN|y>qg`wpn0Ks=-u- zYj5K}?NqNF#?1e8suv5^|L#=Jg<bqIENSojmfD;GiNv*2y;)(X4V~)k(YW%vPXxn3 zs;7F{Uj-D8XyISOC_qt63o&Kl41V|;6z5eOGU5wD{Rn6bSbA~vBOvXNk&ffLS@6b* zxOX%CaMu?D*2Yj8WSC^0g=)3KtinW9$(XdUbu!EMOJb-6<dfJ0X>FpFehCso>D$OV zS_0&LCF+UM>Z$<NyTDg?`4cv_0AH{^9U?fr%vNmd6W(PKM(S1A%l~o2eF_YH2<0(; zQ-_!@d;j~0J7z1|&TgJIpN_Z>|Hl#cg5K&8_oUuz&ZY!o(35mFu*yxd`PHn;*TH-h z8~pYCR-BD$dQ|pfPi^ie?0B8MzgZDB&EK+o%L$&p%%1%w%=cIconu%3=Q(y^C-oeA zF~SscG`&ptujkkWYPd@$I>*K<%68l42GfKg^<Axh1E4+)K8zzsdMPY#WBay+@EdWs zJ%LYRY1=#ET>H80k+}AIXS)yoD_g%kOfaRequW#X*37x`Zz29Jdsx|CaK5{x-3}Mt z{r#dAl|>b?)SaFA@$C7XmY7UDJx2yfw8O`||4lTts(OY!88I4gH>;mv_j!+P-<cDC z)vERO-9Y_>5py{8_E{-u`><NieDOcdun)e=y6s9c4sQ|1p>XjoU+*D$Ivt(_Y-gYC z3iqgeh17mdyEmE6e%d7t{A6ZxUJbpGQKuC5FP0*sC^4VD?xDA;qO||tx1Km}FzwBB zak15><1?eDTcXRi*$cZZLYrsV+q*-Owtq_9q~mQXCSQAG8j_(?m9987lUE>!E=Hos z^ES!ha5n#}xc`AcE@4S~Lb?j31@2gdD13w}tjEmycg)oPjM)(}+jnr##c|(%LcR;z zjxxfl-4pUPc>a~qPfXhL5`Td`+LO;$vPpXbOp(*n#maVs7n9xN7P7bYKEnsI+k58- zN87UL`_hD<LiW|ZEFq&UyT30|xRuK+Ridz-pxc(!RRefo+Z1+vfB&`@2Vq#1ztB9a zDz%%VVleb!Wqg5+_|_^MPi3{=wiPy{Zn^nwHs8t(w;VJi+OmEJ`wDvsx7ZI3<Av$d zw_G`76ok<>cC$JnsLzYq<qmxmn2Q^pN7@Zfm16!mkaej^?&t&o#k@KYY1xOvw1k1N z<7xDvg3Od_E!RBfaA}~J=LWKhnlOITmK`;3@_hD|jPI5Ud>Xq@8)`f@xuvjUnDen7 z9;+~3XHFFJ@1spWx3R&;LWB;Z+4N&k?Y8(*u`}<$kP33fJ)VyorbbrRi*6ukhOJRp z-o_vnx8I3fI_Bna?{%d1jF^v4xjH)7*u7(s!rW0T<al(bpr!T*I}W1*-HZCj(=kiw z%g+Kf^>}3T1Tc`O+^r>`AqPFTONx6crQ2T2{~qg#_&VEsJlxw`7rvg?lFA$=ACDI< z&1XR;f-^pN9TvE4UV@P;!Xu>Y1W)}r_(pGBJf00a+4dhLGr_ontYmrmeAqg;X&xvU ze}k;QC#E28_R5K9%LvSdqVz3&+oV1YI7!EH`yu0rml~0B8}{vqm=Fb1GbVLs>F~8} z=-p`QGe{8c-eF$+Rl?lra{T5I!!{R%34`t4T$UwmSYcg2j{%6^xAaEQ6be$z&ah$b z_QA?w)ci_on0xrp;<zf*>o9`EvIN{#cPn8B>v{@zi<#s17R$$U?0`uLXK_{5nPNV@ zq^-i41vWNyHp7R-Ec5%Ph4#hl%kQ5K**gA-otX|AE6fy88lFx+N+Dln@h1~nw<$$u zpMbDx4wD@|6&%lAJ{jj029AAVI995SZ1c$(gKNFisjrRg)o9LCngH{Gjsib|(f6TZ zHhI$Q@QSH4Q03pn4Kmb>8)O)tbpGzU0C`Lvhq8nt3#9ocn|W$d@N6iH4D9?&`ndsY z>Po9#!?8S!?i2DjcJox2(9?@~pPpzOKDMPd|H6t-2jimk!_%RjwHOzf(xVL1(rPiK z>*1$a?dfRq=~<L0c2(+kmo9d(m7HA2TAhjSnFDe93QE81HFSg@#mpi-X385sBDEUC zNzlP_tEr*q%#oL`QjOz6`Kd8%&Y7-h%Q5Ri=m1?)AQrZbm;<{WRK0hLEZU2~B2DNk zU1+5yw5otLoQdaCSjZ1ugyaJD)DP{k7R~*kZ(z+-x(sn4<+q~3BjLFXGm!4asqFj@ zPx0$n#~(w|(;$_Z{Cvc0YY}~)wd6xZ*()FpULiT*JN*e*DX7|rJe<O3AUuaYdp;rB zy`ALZflT@_eDHZUYQ-WC*c`HHRCAk?z7Hi)-&F7e3W!_@Pfl4U>A<x$k7yP=qMm%? zmpAY^vqk=H1a{mc_6~daV=R7urv2HY2@5d5GWOARzI~(%OrXB>eE)4Q=|LN^>>_VQ zH2w9+rywPc<-X?6($7T>*jP)`yn=DCOpG~;e)U>l!+H4y2-%VhMYsY`W4cpiam|KD zlju?mK73(|NlLwntfB91ue4$t&$W+T59YnIVjw=(zAP#yL>R0^k@8Q&DSJvp{tKkK zdl}S<IsSxO@mVbGr(?x?hqd&wfgme=Alu+78CK#tvW&+zsO1br{%gP1POdvhNN$Ap zuAw9D0tBnKc0;ifJCpPbyC}$}A^4dN2NK)x4BeI4uG4pFxab)4B){`R6=kr9Sys&> zTY^pRzAO*R*-2tx20H4jzSZ7AEq-k=rBhp=es*QMo&?$5V@Nmm9~T+rEQo;f*%)^A zd^=%uE5={w?3IYElr+>&%x}E=?d!;TA<MWB%s<N}Ug&L_+Z}ajc}n#H+jL<tU$o?6 zWWcGJ7}fvfwbXONSg(t{czkPlF|AD;_F6l&ekk%7bg~l9PFysHM)XzZN`r{&P(Kl$ z&sgx;Msq)scQS`dU7kJ|M&EKA*@-%(XObPKn7_wf4+AWI6xHXTs2^EL?qMne@6(M` zO*{-V-*Z$&q7NvMQw-<}kp=4ih4?=mp9NjF(9elTIk|ZIx9XCAuqSF+jwIvo(==*$ z9Kv@%tDJ0@!M?6<gO?XMaq@Zg=cQo&LuR}j5WM*%EsCW%3MILNw#0J9OU(VUKR#F) ze>qeb_Y!;Ua(Hmke-+0%!O6idu|1cAT_;1K_~hw3xeL2`Ih;SqKDzAP7Hze72=(QK z=p$6Z|HzIiFp>4FALBQrP-_j`7-7#*rQI00$~V}i`d~cHLsEfq`$_+$#8OT=o@+g- z4-ocGXOHXsg-@ol377r7Fcom}8`Gb}*gmNP|2kiM)<R|GxZiYa($OhQ<6Cr$*Rh|D z<8+*@<5C?z)3Hj&3p(D?(J5R@*GI!*E=p(g)G=4bfAHNF{mD9>)p4(mpXqYjbo^Gw zpLD#Xqhnh=LmfNl*iS{Qq*HXpY#kTr_=%1?b*#~`QAaL9OW0G#C>;}Z?5*P<9Y^Un zMaNl$$e&xFGnVMMLdP$3{8q=~I@arWTSqQZD@bb{Ejq^QI6%iCI!@N{MIhzRmFSG+ zI&RhRJ00tF{6|Muy$LKj_Rvw(ag>e)I?mE@p^htb{342vqWn3hc3KTo5B_^yZ@cGp zT+H6N+AcUdj7ujum;M^A{zAj8Zvfv3YvVqzL1V;*1M%x<p+3z&u0~T<4x9xas9K!1 zboli(_u^cR*OCLD{u&i6Hd(^-SFZcXmZZOQ-H%02{i}bh$-`7q|4MW}>^k)?PWQv^ zQ~&B7Y4U!Wx0uV*8F*5o{(0*D0Nua(p%wv~1N9HTrluB{PF?7){(<Jl!w~gvneGqO z{dNCn;TGMWr~AWH|4@!AY|;c~>jL;)C^dsQx<5|$V;8FadFuX$y5B|jE4p9aa{uqY z+=ElO=ebE-5%(gO&rRc|ax=Jz+!XE^U{7w`_FsA4BzSdC_~gpX>Zb>NJz|@&V)W9n ze1*Z0pBX+X5sNBP`vrA01mmae8nmQcL5HaRisg=clyG}e#kY=pybJFna85&n+=^^x z-mf*qrVe7o#@!D0T|-5Pf%oxi<|nr2xLa_|sa8~Z@V*uK2Hq>JStP6@$Hl-Ydd6Pf zPMr4?7tTA;ne&eKENqtRi?fVjaH?EImNOq%OhHuk(Vfwk;I8UPJO<q+?up;a<j9$j zoGIR|CAo=sdhiTfbGEHq9XZ#;R-Ee)rxuySc#b;-=YL&8-Y9TJ-izZcM$R(Dt75DZ zpV6X%Avk%Q(F0b*^@^{Y_z+UqaVNf8)a^-Vb}X7+Y6i}nYy6W8)?{$peYoz|D>^vy z0Tid7GasT>jMVDVkK?w&bw>f{&x@2FFT^$LWaa=^PH;n-!__>TTJj*}^cV=c7VZUI zW;Ey$IQ0W!O0$h4*9OXI6K_&0jyQo_eKyCrWB4{l3I#m^&VIeZ)rAk~#{VlLuOL@0 zXo`soO7!4@X81Mw)O**pu6ojNCvrLN;!_;A{(8j_7e1USIm?Bg+@iRZV>qrgOgkv< z52W4FBgc&6xRr3f>je)3y&cZ?Mn#8Ke7BZ9R)GtaGjQ=YNJSi1QQnGA^9^+60<E50 zAPN?Uf(0&73zb_@+lm)kvJQHQ;|9Uy-l&Lj<wK&p%r2Zc(Ty|5d)1rj+^gIQUGtiC zeFGDBXo)Jj*p=^4QSQO}MSFQUabAh8oLBsR3STbeIEOdz1pH66NL<BjS3cLLIjR0Q zp?w9e5G8))?TRsOd_aiSJzP208Be;0y#gZ#Zpn>`<!<^IbmO~`2LEy613f$h&LiG6 zj&~KP#Vgtx`7SMaJlcSf`I$C4`OhkzHSz&1)qeMLj%)h`$DO}X@t%<n>C#*-r!Ub8 zaQAgh`iHo1rX_Cmt?Hbs426z)4!J@cb!V~SYXje=LN@Y2#pJhOzWhI_&TALP^?}RP z)BWFR?FwYt?S7&)|2Q`;G;s<S%8%hZ8$Ifcb*@z|h0b|S=uwn!jli}0KW03qhU313 zL;Egw*T&a2cRs2j&z<+KxZutQIZ-lojun?&F)i^Pe7hDMO}+qg5H9-9iXI+(ND}FY zhSU6u=*e(>^p=eVedH3y<?DXl?1V1l$(c8M6uRdbb1T++@UufGn>ZNM{*lg{uQieL zO-$f?`8du^tw}{s6CZ&u#Y3Nro?PC~mGhedH>?%sm*~Rz#e3J&sPgab&P7=ta#2$r za8Zf>a8dkiu4T;WdMSt5%{%g(V{eo{gx9(t{+~dLdm}EeEKhhV!7IOoz=|U#-uM3- zr~ltL{eQqX-TqQ*el6cw;&LZhrwy6<!t_~F`#tmg)S-n_^G9-h`JvVs_?I;ji=Qgd zXZj11pLucW_F!**s$p9IU+x;5bWbz95_OEzF_y5x9>E8C`ROcA9bGEEkKld7F5J~( z*6CQQV~vgnD{e;cgR_?D-i12O)6uHqZXKW3af*)Pbj;OJtoSGrxmt8@kdB@@Hr>%m zBUk(kG531idzP@`P9z^w5go<*g-z4>xjLrn7_FnFVssQgvAFWKmfC6^%XKW#k<-Ns zb$_0YxjH847_FnHj!l1S>0a0Is*ZI!uGi3sD<`7DV&#LDI1H|+Y{#GV%5~H>4fQS> z=2hTl;zIdh+xJKF8y);*L1UlQu~~dbMOiFgJ)ln3a{j-^isc>o7mLMU{IHZKmQFYs zzaDh~7>xbK0`M)sR^Bj%K^uWD!zF>94O{_-a|9I^<4kK5=yKo+>`hh^ALti^sTnk3 zI2_5R11G|jV;hwhgw10bZ1Sb>qyrgT1PZYgcoQxN^le~Q>`#cF1U#qH7l600PqE<V z8*T#^S+ooYdxvS2!FG?sPEL(WI24WwF$}m}<rni@C5UUP2byS$(nVqgrP2ZEXKI>2 zZw3AU=h%kl&H_!?8W=%)0=L7(g02Lffm`hh8K6@KjvEKQ3-Coas!SoUbBtDqIN)!^ zu~0RHWMFznL<UWG3oa4AUU(b$A~p#J!7l{HU?)J$6btMW$8p!ej{|-HM;clI^uaD5 z6TjZ%2Yd}K79Xyb0Pn%_&jsBCbWP9-QEUV;A`xwj05S=NcjLHuL<75G2e1ru67X&8 z1h#@+2E3BOaTh>e1)5W}g#3VS!aelkxrM+_;7AjVK>9s9QafRsP7}^bL;F*y%i-Ay z$KgZYD&Sc-7tr;<pW%$48-X|BJVD<E4(O>RnhpF7j%q9et-UnO5l+eAIB&=khV%v> zv<0{c?lyjAZBy@Jj*IMz-iv@}pi_S&02=25TpS#gI!{EW%hE>&FkyhE>O|lMxQ`)U z3A_kL^7X)Xu^%}I{xV=twpKX8bU3Oo;mjQLtzrmSLEM3(fF|IvL1;77rViL@u*Nq6 z!*x0pSgg|}z%6h|klzZt0Y{o>1g^@}=+(f^L(ul%PXp5FA?39jSnNL>yC(z$0Vl#y z4W<D<fFq$5z!yiMmXIj~{s~t_831RF)@Up6zD_>`F2DxpEMy2fjfI5<Iu5u2jymrb zz$Q4PU#t@Jg93ikS%B8@Xb#YX58!ANcu(NCf8ea(KLn;v)M`c;m50s<el)Of636XE z2P9k!M;)*n_%|F?<~Go4vQ|lN;7gOy{_7#M37*Grm7sCtz}csuW}sIC-<pcrfL;Xr zq(IATJ@C%c=tST*0S7#zPdLEoh3K$U2%z*5G6uZ}xEYSLyA_x+Lz7P}2H`#v2LTZB z1onrU0$K#V5BCtgbOo@BRa0pqu>LjF6nw&2r~s*SHgIGK$32g@qktvzF?2x_dcUq^ z90$Au_XYS(z>W*h{-mxr5Z}O&sw;ukQcb;t-LdIA4Vfh1bU0FDA@FNBwKl+7ohDqd zP)mofwoIE32tSatiEagO$s!D6OxsHrVf-(&YjZeZ6&&?m!X}+2{CqKbI3lFKjr|H- zCg=sgSnMUIfF^tlHxD$o3_E8yYWqB3hYvuL4DegH8qf!U;U7Yy{+R#i5$_r}4mGO+ z-d>H;QD*`E@-fs0`YJGYoi>LPj`>96j|1+3Bdyc{OFqTC0lFNRv>pz0F3@)aIxXlp z;ID8ZXc_2M{2A_};4uQ*!;wnI0T+LPwgevsl-w0K%HS&S&o7~3(2YRnuQYiVU{5&G zTslzP1l2=^@EDw0IN++UHF`C$o*eQjCSuEGtrKkp_SmBLOyG*0Xa*!iICmFnk4`ra zICM8M0X+=39F8(r0er4n>zL02`5LW2j=<mHmO@?zrX11aQ-L2GLHkoFR*>f?+8;t& zf$fiBn1N0N&Vi%3dLHnAP9Fq5fTN5b0&|XQBPAF32OLd4*Ma?hLQ|qsi@-Z@)WMs8 zYtEwrpg%s3VdHl}Qw8B3xKRj@fsL0qZXsyGt@Y?Qpj8A-ovQ};;?Meo1C03<8AB!( z_$eG&O1pu>en(p16Nbx}8^DhM4uz{PhG!UvrQ`ukcuc1+0J~m8M*}|zSp5f@0CWv7 ztwEc((t&f~;=sq*HMbIu(xLOo@8F(CK?&(3F#@&%A)OEsO;}7vfh0sor*cFS(g_^V zgmh>|G~q&>rqj0dI!#!s(}Z-uMe>BE8yf8kqysJD6VlNM=;u}989ks7xJ0K3&%;qk z3F!=nA`{Xn6w!pyI!#Dt7{n)>pwo1Y@tQ`{eN}t@Q<on65Wku=b!*P9sbAByhFhDy bR$NO5OE7=9itt5z2f<~{_UuLcR)_xqc%FZJ delta 25573 zcmeIad0bW1_dk5jfy)e+=^`^11Vuqba6l0S#X2Y|&Zejs4y2}}7jp)^QsNOKw}n}$ znU<NQrJvL?g&aU7#TjQbzjf?dVNN)n-+P^N!1_L)=db6V=Z~K+H}AFAUVEO_UVEQg zxY40-y+i4u(XW<1y=C`&TcexXZ{ju?{=e4_-BzXIkZm8Un7yqISU2zWZMT89?38Wa zs{Bk{e!Px3Dh_%rC-;`!yH^7vwof@TlGFdya}k`Y;KFfN!T4HZ&Tr2G`L=eB%LUe# z@5rxbi};oX&$}EK%og2<HPmz5dg0yhK<~De39~JTGUf{Yc*0#}E<&is>P{db#3jPr zV4Z}Pd>|WxziruD_}i6zjlauxoD<&QgEtM}IQgcL<1$LpBICpoF<UG#l|KbDry0w! z?<Q>WVxQRe;62%Odvie&0uX+)ClBZs*E`{6M)8C=+1HihQl;vma)g;7Vq8@r*F0Ag zQGyK86%tO#8-oxPe)*k1jmXHUPc7N^6k<h6s!@E}JE2@UFE4;th8k^tB%dmsPu*?L zb6mRgv)s~;D<DFYYGheWyjCnKw}hb}b(D!O1=yA$@f;;R=9y^s<kn);{|B^;y>v|e z(Ot_Hd7^Qi6rphSFS2#ivrUr@q)X>T>A1YdMDoWe(YeSC)e}n!Oyi$`i+W16#`h}Z zKcAtF$X=9Ar|$MeX7W@me~K9M97T|)qi16l)Kkr47TjL+7uDuPw(p`XsJ4x@JWmx( zLC$J+rqF-oR`|?IQTmr;#VGxfuFMED?lJ^DVOJgeTgJ(M1ycuRMW&=n2h*idiRscz zkn_ZJ>6g^8sbf+{r)G^lz(O5^o!@sx1xnhu_F}yp!xI}Wb6iomamf;teLqUZ*%oA! z7#2S!nIVz6Vo65i2>F`YVdRzIm1^bwUhE^smTf;!Bj*Pu9HFt3jlS=Ui1I0FxT5m> z!#0DOq#8@+*d51sejtl<ig7POM5z`cZi?wOmgUsK{b)MR6&<lSC7e=B57;|SElp#> zFvJoLip9P2MCnn&se~hn>34R_DU$yWt9J@1aD0TUx;{~qMg8izII4Z`y3{?TsKWQC zA$snZVoJC~^)NkcKz3EKJA@NXQI+!2N_ytX3;J{1o(GDeRHjAd<syz^8mUGYiteqF z*U%6vF<sLl*F08~J@+B3m^zZskdmQQB$cm8UDTw$B&mC<l&6+d4GCpxLT5Fhw;&{z zxPK1j_tfN-y|;2e#q<ZOb51T8a8WIEDTFhmO2iI@e43O+%|9e^1S*zR9O5GKh>%92 z=#y;4RIHODtVdDAphh3{Vy0qhuBAC%E5Of>6a}%sKX8HLs;jHT;;cv~v3P2vr&!Fg zesc2{rQcEYVGtLkJBld^{e`KX`XeiM2`M-$Pw=KulOF(?K$0ox>!g@us<1>;GQV0Z zx@2)px4R-r&u^We-Y7Bg5KP_ergqJ7+33_b#J#{TD$kG()0*Lnl8Ge?{f)bf6EPgT zQl+zUgBQAq+6f8UVOkU*u_#1B6#0J|AaAV{ba^+DKLUA(%A#~_Vp^A$mH^`}SK@!8 z@m)mg0RaKc^MBI=R;vLPr-^CB%_&fnPRLmpE7}zN`yq<27EqKLXwDbBflMb_yGP`= znrKam(4?M03KgI-4Wi)2q^ySpD&GsiNR%9@kYFnaq6k#cC@P?rR*nFQY%_vm+*Kt% z#j1`)&W#FY++{bhs5~P7e?-X=nJE^JMtft*i_+9cQIsl0>p?S3^sxt6Tv%YxqT7Xb z)FX!cAbln+iOd5>F>Pe$+{^_EXmsj6^n^NHoij?>#N5XU5Rp-mE7-iHbCiESEq{-6 z>mec4xT`mZ66D{^DCroR#B)E=VwP|uT{@R09a2nl&U4(vF3H{qk!tc?A><5FwEixL zCCekT1%&o4jvRK_u&p)EMQ6(A9yT^|oYu(ts(J&HQY^7*>;6-=);+9%S{W;LiOHTC z8ojU)GHFX&idc9uW1eJ42c&a*Zaw3;H0cD!akN;R7HL*YSI?=#Ox>3rKR`Wm)GnNa z$Y|y9^E1Vg`H`6z4dWv-Ei~$+b8=7Yg=le%6Z9JfP`*6TTF|4p*3Z8p5gMp1kb=!a zYa(wg0=kF>q9ZlXuM@33!nGJT^%(cCcr?bS^JwhU7cpq2qAO2i6pxQgksY2kHqB)! zb_VS@GS&NQBtzYwqPEBbw8(AB03oXol2D#*J))#b=#pn8nZm@sw_yf~4^L*4q!L&4 zKVu&o+Z05gHxRj!Qm4clQjPB&M4ECGlF2|5CT0{{BI7bj+^GO)Uu<zGod_lxcWR_r zUQZhdrb-ItEY^@}(Rw>t_Q8foo~C~jCmO?ObAfD<`@8-;m+vK3RCpREKCDobeBtf^ zvEqp5tyGrf*~#tCFf4|gA7tfuY^`UDWcy&sqLw-{lhze?d5bFI4-wmbbWYp+2)V2& z7V4v|FBvFaFyp=agp>X(z^j`_3}k+wgp2*<U7;+;D^hqA%8I>`BG$+99Bn%CX?4|T zCVv^K=Bb!I0|%?`VD_t5Sm<dapf7GgUD6^`EG9XZ1Tj!t<uR<ecQikmWq5}r4-Dry zi=R|jh0KZ!J|U>ElbjL*(PBHyMIjgyqDwC)OOPn_@|2dUV(z!W&3Bg5*=Fx4?yrY5 zwZh|IX7+i*`)(Wb@Gc{b5VLVtt^7+H_NmX<&}EoK`ET|^5$1|TNAd&Z*U?;h`#la~ zW?xqDO)F%FFtcn+z$%IobMm70pcJAik*+AYFNY9z8ft|LHN>f>wvO#$h>1g8Jh0)& zj}ROAxZA#z=LS(G)XwtsCQ_6R+q~t|kdj9>F%!yJyvbxdWaPO6DT6UQ%#tB7lMOS4 z7nm}6PUbpOs~B$epfZ-{N0wwdA+O?)h5-otrIQ+1tp_^p4n&T&SqVqb$-|MNSnSJ- z)}P`;YlV}j9J4s1ao0iE_AAs#$P9o??-J22U8>3`G4)R7p*fa|6%7XK^{{mIz?78S zqbtW1CU1<!5@s>TC#f)!dkf0XdNBIdIBWpbTB$|p0=Da|9tcUgFr&LzVu<r*)BIWs z^+{}vU!XrWxF*Sq)<?bze5@6o^1dWi?HAbWxVFKTm_pR}M`v_I{7<~tGr!o@V^IQ> zKTcGlRzr@Wx*NZr;crCzTRkbTqz?toJI)3->(!yV7fO(D2pSRA?FaoN>~Mcm+0eg! z^3RwPjU)UO(6^?eUh=2xXtTHg@9vnvIdy2`9`an0YmK(VOqj-e{JZn**dYIxczgDN ze^=qFX6&qgOt4$B+TGq56{2*G2EdALq_NrD<OwV|;2XY@9S`W^o{Jf!kGK0K78Tfw zk7Ta}TKS#qkH8u60e!H)NQX19Smk1ez*Mq{l&q#~S{oG06!b`pd}|i;C0a;^uy=z* z#~sK?-r&pXg7UpP6Ws!OwVulkA2um?o!eZWCXOTX4vFj82?<kzt*s5uEm2|)l&+&< z#Ule$*%KXk&UzzEoN{VJx}9$oI`VZtj1n&p<3WfL)<(mYj%-Ot+iu69*62Bn0%<+2 z6g|jqO-0K>22yQ5shS3triGo56Hr0S73n<M$H@K+>DhdI0FQ}!#JI~|w6#!mmJDc{ zDKv58(xeK-WUXX_L*v|@VW-zKTE^Z9tqf@Wg0Tx5uC+?JHIn+zg-s6&<WI0=Va<ht z1hy@#AHRe>3me2cvrKaWf0ivV#|R7J**E54Zga2=>4`cppYVZvFq;tG3P-+o!aKPC zrx$IFm1;K>vzqXJ?m5_lbn$w3);eN%!IpSrDM}#;v_?sHj7zK-CeY;a6T~hd9$;GX z<tanKDmKKS>ExC}s5|F4%fki}kv|-9ACj&C<*%<;^uY2G9iu`Ji0ws`Trp5B39>u+ zlPYHgfHTqBZBBB&Q5$B0JeCQ~2Ss#f$8n_zm$2g$Jutpkj)Z+=f1KSKNB2fIHmiBN z$pZ#qP?u!$-(w@g+^&|p{)u5!>_AJ2ulyqxD1DXs7Nq2F3+yKyN|(-%I$n+y5K?~7 zC@gjv#Vz+ZA_;jVRzvB4%|yzuxp%@5bTyqi<cZ7}8O}b6jEm30yuoCU8sx938PM$x zsfbf0U(#B_U_GJOd_<{lG*#6_Hvhq%M`roHdPh-8NxCBjI<>?x`8PH#D$?)Q6N+N7 zPq<`D6mtxw8d7@PPn}?!quLkvpwK8{gf)4p56=l$pjtJ@vW*3vllPk&7jOf%3ON@G zt|pqQi$0Hl0EQ7KuWAy#h^X?WQD|CuEK)+hx$HqiP7Z;HVp8^_LdAx{cIa1WB+l9J z-1LU$?N@YRClbDcZBVuh(^+)RlCOfL4s|&TrNJiQhz2<VW`<;#i~T;M*f0+uN~SD; z<UR<0<&(FlB}hlZ@S+IWl<-Xau{Dz#tn;HIQ?PG>KNPHLc?$lP(pr=SMb~;TZtMX~ z+jw~tQ(6Q^T|q$=)7bsgCx$9C0x1k5)O}zGj-_;HmG`iemT?73A*rf{8Fc>1kRV3W zIUC(PX*a39qO>G(DXN9)521ugbg#7>=Oe>)iY^(ts#zGmr1co`rJDR!1XdargXwI~ zl~)?(gTjQO2(R#3wxe_58LE5o6w~B=SceU_A*i+B?;}Z_R!n`s$5@ccscSmIV@*B` zAuiuVlluvBifR0hbk;>)7}JW$VIRwg-qYzP=rf!p6tVY!k}Z%#pLnPTO;P$;F>L@_ z>T936o643holUnpAunV}ty(!mB0afa$K+OSyf8U*$L!W0^2SYj>C|SYR^zlhibcf6 z51p#=3@Y!K{P;K6Z_4xOSmk2;{x5G^f9$@ms3J6EXtbYyA>s)UFIx;m`l})|^GIYT z`yw{QbL?0ut+D6=v`EE}>c$?$4hj1_iq-?Acz&cgDS2%oR@jo;6f5T;8gp7So7yHi zYj&g>amjWyIa5b$>Z0WPH&*T$7-<<&NON$~CHuP%Qv9trOi48eI*%Zm0a~4HzEXK> z4zk=c0`cXyL#a$vup%LY-&?Z!Hhw9~Fgj4nP^bu%Eh%xgN3w8JO?nsn>DI_6oGlXa zUudBXw680sySr!}gsf`GUTPa3G9MF9Bu!d9v{_z^<zn}ap6tE0%{wf83<)xN6xG?& z$)!R1Te@I<q+tF>VU4klkIcis$cVHgoJ!rjiVEw|n%!<2<UdEvXp<8<RXs>xLB#l% zFS4+9UHCQZm3E!FT=<R#m(-w`DnU^yqGH+!67}*dQu5ubPZUhvV{(eRVwX?}#Dr@3 zI;(2esrQEeV6ZjQ1{*0zP)m9sF7*6=8V@Xy{==c@#fj1_wAA}hc~Yed>9pqkjS2c4 z>lK$PeAU3diW4V(i2aAwYG>#}`(ewm*^Ab4PfGfLdXkv;kMK_$!+q6oU75Tf5C4Zu zL6ZzLo9+mJ+BfMD8`Zugd#infF!F1*rTwe%+jk<@Vnc-+`VjM?m124yoNf*?15w^D z!BwrGse4d&^;nR%lXZ`e=L^{z@zDWE2#1o2?MgAdy+bR08*B0<Z}x5c*yc8j)7}Y} z#F7P`sU>|Ka9BQ&Ry^7*qj<Fa#JgXLCCO_ZvJMI3LblzfcBMYfqZXF#Na^;}XE4Y3 z?qJ_0Wb%)hU59Q$`_U}5LmR<m6u^gBJD9QEZ|v}?FyRtQ@7PgT_?Q)TY~Q6Xs%_~> zTW(Oj;)*@3#Nb33V10`dld3Rq`dudV$`AoTvG{F_V+0y}AF<y%z7%Rfm^DfrHr6N4 z^P?&a?_&tj%AlB*ZfE^Fg@#UTVhqFVvYoL$d72MPE0*p`L#7j3)~SoI=x=tYQ)?ll zf!*np*6#Cd+FbmVa-t)PbV4!B)tQG>X8&LszgP!3`2#k-^B!Tp0}D$G@hCS_w+(q6 ze?s;?V0{t~@avdImtsDjt?m+4V7vpvt>TOmq_l$Sk)Bjd^{b_Qe=RZU6}ty-Re=zU zMVLw41B2@Wj^m9*f55X3Is^xW<i0;`CcW&74Zpy`AoX%8He6K+w_?KuBF;l29P)#r z6r=3c$A$$aG3TgpS6W}zt?Q7msrM)wCn|$`HfXI3bI(&5LI&Jp+q(MsIMx&YvW1Q_ za(@KL0j#!bC)1`t)HvUF53O#pGt7ULhIay4^Q2dVCHL6Eq>qGO@3P=-6NC?+Fl)Eg z;>@$uaZ)rj(=S=nZ4&lQjN2m?KWi5pq~(GCkQk~-c#?FNfAHI!W%s*96gX=v5yATl z(GX(H3HkmRH3OW*RPV2v7l%GhzNpb7D5%7+`)>dSQ=oNEHaL{^h2daH5(&s(k#Nbp z5&Ik{;S$3F67TYIy~SIUjH)VRbwGIyauNF|IY5|lhV4oY75biG=aW-~o%dK^_c-C( zJ1nDndtuvsR?t1(Z2MeWT3}L;+-G4t(;Dcmm|`}AbRU18o$MYVT&Q68ySEcI+gOVp zJ%xk!*yJ8vJu^Q;C%Z30&(KO2|2f;)BfRric(LXyCJ&^GPQlFg!;M5ex<D~`6VD0@ zX0e^kQ8u4~73C5Q*}p$yZYe_w%1=@w^%bce2KigfH$wAm)O@*`?|sder}>s@zS){@ zuIiK2mRA#cO=GOrTFkK=Wl(49K=AjgBlUzFf<p+qmJ;Z7`W6*ipV1Do|4HVO8teK1 z&1?xodzai{sj1ERy=+2itkb+rq*HwSH~S#f95#=7f-2q`T+})g_HT?yHyVMawwqXO zYH$8+7TQx3JT9}`o?ZA$Y(vj3A>+2t0+6&3kr9c^ygk5C)r`=Mtf6O=Fm?kA>(#kM zR~t3XG5M|06nspcrK1ZJP57}|n@8Qc!sho{G;|*+FwzB!KU9+)pyh=<)FL^Np?(7~ zI*vi45>s2sw!~fe6eDXP%85N<B>bn7;eSl&mxP=~7#7qspR%=!{99O)hf~^g5#-*V z67tegHj)N^Bo%fr#nj1*m8PY(ql*pRxP(@oO2ahrp#9U9M&*Dkx@0UmL){^{@5C&E z0qB#Jz5Se4sPxiLSZMF=vEnBbUMr`Oox0djfl180n<~&&3xPjU^@o1KmiBHx>W2+f zQumJl@-DPG>bF~CeL<{^V3qbRx@j@)A!SMSgxU{pev3-OXsp*%IxXO#dck%=KA_qK zF*+x&W4<pH2*umjdoT4CCf{I}UuqHZJ6h1%fC}a#W=VfL`2rYdKX+?#=nWS3vVQ>? z&QtgLLZQPVSA_*#lzvW?&d3$mFcXfD?&XW9umrayLG|iwVsXb-XdbAFifPzdaA7k2 z4T-i!3bZ9i>YY^~62Mq7y*N0Lsn9HUlVL}pORXJkk(o&;7p)E5X1z?D4d5UR*X``s z%ON8=plknI0ym*NRzs{DJBWII`&nLur49v1g;i4LOxHM=45kdorSA4Llaltc+<zoX z5u0@?p+x?7(PsaXsg-sDvGfa^aUZi1F-0&AW+z105K(hcS{|7q0HGD_WRJu)L5Gnq z8r;%s4_XvGtQ8}~n*8({OHGdm2!F~o+P=tABe{Emf!X*mo1flGSnx5cNN?%6(-~zo zaOh#_=VJF2&a5H5T~IzTtv`)`3Fx5~i1lH{A;~?>nf2_GB(!v91%3Q{|HAGpmMo>n z7>{;mi%nOY*cW}$yvr!STIrN@dj3+IgB0?%6I1%cc)EO~wsLPV=PwM)@lLF3-?l>R zhir1+=7Q0Qz0)@_AZiWOR7O83mVPw<MRnp2`;gW3Z6_41VO|*tPAQHwLvE~L85up= z7%)^5{(vTvYOTg{a9qx+qGdt73W~-VJCe>FA0b&O`C<dxosl1ygV8lBf>v1**l8u} zWVByeQb^wh_Hw^h+AJSIMNs3nqeFV@E`}=))fmSW(>otfjJ0}<Ll4={{j%Gf{(r{E zY>FZLD@Ni&HnM-e?4iwgZovdG=PtCNR;Z0=?N-lOGNIYmKS;$P!BZY^4$2W4!9l6S z;Um)e#CLvs2tRdzFr+jMf7og&s{3ym^Nt*GKs^+kcVLbKUNJ89qtL;gWO}KR=d$So z@|>K(gXaeOZNN+3CrosnDa7E($oU0{uQRY7nSL%O4yz^uLxF*f&FtxxWZ=0zlJ5<y zuuz^eM+qB$VJ-vB#u>D)mAF-j#YV+6`aRZV;PAe|zaV)Wjbg;2KP*Akc}~2=gqfh2 zK75bL^0Pd>9GzorjtU>}6qV!oXRx6Db%VnCHqd6}YE9zv9Td})_t>R@Ej_nTklhi~ zxC%Od>?-CxC_-3wn{^r#B@Dd5Mh^<-XRv}ntvlP8IxCyeJ}_lJG@MRXq2Z{O;)Dk_ z_w?edXPnJJ&O{KbA?_BWtC+?xRyQbCSbv>)3~nnltYqB>clBFhK$bX>nB-|!C<!ze zCqH2;2Iufo*x!TScguy^no;r+k0Cscts2tW98Y>|VuZ2iZ4!-yuh`$VP!0kC8UHMH zbx5M?#Y?JCeu7-X!iSmyzK6dOk1wFry2@Xv9yO<~Y{1a<{Ew`DXe5h^G<)v7s3?ub zClNHuj*K-H5>t))Q$o3|lKBklBS@ue?64&E*|4SjtE}DdApRxRcX-d313yxOmqQ(| zwRp(?(R|tP<-2086i-l_P1O}!vE9St`L)b8JSsN!36_@YVPvuvryLm}+PVE?b3|nh zUV>hm>SVSwOE@JR$3?avSFmm)TJg<yOdOHRyZwYwt2fzz9T#7T<K5nZ>ZiBX3FbGd zbwnl@YHKa0*6ImgvHz_?qC1h6SmJJFua4^4^&ZqZh~;;Xe+QLTY__$A*{BsR&f3Aw znja#c2D_;N533%v0SB{lqq@2kzF@es;H*S`IUAZaL0J5N?ahi1JkGPathHU@Pea{O ziW6mL5S1KRlx`h@8kwLJ9WgH52r9|282X>lx*9aDCNiW3+(}I;W4lJTc0K`R4RJT0 zV!w}`9I*QjntJNxMUdPMLAn7jjlDj`-}xHWX2dLgpRE}4iy*ztW{!;%ir!-%jNKdj z`CDoqxWL#_q*4>REVyhLhh68ZRqT!IbpI_QaaXWlgbO&sSHWm!v^j#zSjA3dm%_rD zGj29t%r1{>*?Nu@i<4A`-N~ew$bgEOzUMgF@tAV!D0bGQ1JdX^sBCvuvNo?Ca&uXt zX6K=pRxf7HU-c85Rx-bwyojsNuD^FdV<4Ah@8Ytu%g|bjpUth2H|t%tIj4WiqhBHA zC(l!jX_sNSd_^&pETa7gH@<Q~bz^(Vt60eRz=#jPxD^Y11H&~?o&q0QF71WX$z$x5 z@u@BRz)84-BlI7~8WXs+4+TlML>=-msHEhHf3ouNCSlT_?ELuF&9*@h@=H~D>VMmC z7*`%K{|ONthBYN3Pf^9t4hWny1r2im#h`G+ZTCWK?@=~wLbTgsykgKt@!M>}gf1=q z14iQ{Oa+bT-RSDeTTD7V!(F|fJST5u{KP;U7K0}yjkq$5n%20a3?-6pf88_%^X<_9 zKJrD4qfWy&ccW)G#W3g;$35)jk5fLEpOaA%b*c_SE~g>g+U+TqAG3!ZSmCa3+r+uT zr@yh7Ng?cDMIf8Dy_w1S7y6+=$+vg5vk8iws`egW{eSAgE==+ZfexFl9J2OM^1ZD| zt@FK9bNAVAddMO|m|JciVNE$J-yX^4=Y~c6=L@WyPb(FrH&pfF-s{Q!9xL0x(3@j6 z`!Tn5OzLvH9VmiQk2!HnUiXxyt?B7es6I)RmT0;LX`Yc&j`8lloX!0vT(~c>j;|%S zcX*Lb#bLHD*N;iBjpNJNAFl=Sb<An9&3RL`mShQgI{77GOf~y)2cC{i3E`93{3(^f z&|_@h&T)?2)2)Y{<a;~5nmUOW?60zSz6%a=z@k0_C7_^G<G!b2&I43nw_-BPX8oq6 zI3*xVG5z=^^VsF@<f?i;ev`G`6>PsN7U!J)yWX_tQbA-+?`(f41%x!c9sivDFufqj z8(zBerZ$m!ZzrY12&F`x`-$348O1}lOY<Ys$ex=^=G@dDI6<B5|5?KF^J0Zv>saib z2zDs%J1pz--|*+}GxPVY>{s4blpR04;lR6{!J@3U?>Kg5MvEa|W3?a+DBn{qQ^ROw z!nKTT5FQxdTtT;DFuMLV69sD^IXr-wF#}^?NGM0?oKd)Sh-a&&QxHsk=$vb;|IGgJ zW^k=$PHx!SG`-$I(nUvR&rC{w^BTv^oPvid8PZ?r70+;g)p7ncc6??a-+|ql8Qy7S zQzZFwkVr?HPa9BLOo$-)U5zWh_CoN%4=lx!?&hO%)a7|Sd(RTdm$C0HQ(Zn&Ew1@3 z<=0tU{%HT8BN+O0WW{Z=9ZVp4o0PIm`JW1fRjkjfNK@c+G)+zeYB?KMW9s#b?8mIL zBD{W?MpMqxm28VCIA8B)-_7dGUt@pHY9ZXLV193Q_fMk@2KK;W!`w<F1L?iEHCj=@ zroI`K;YfkSj>Mg<&giq$$tI`~SWgF%HY?^F7E6MA;pV@2{!T>fSfPzpehrrX;;0tz z;y$O+@Y)qtxWJ#?e>2Fha2Hg149)Vxn7!LRP~~<WW-Vr$g^SaeIJ;fsC)j$lOK%J2 z<{@0@QMNGwl=M|pQ85)vV;|0LW30oHqN>HOKvSD}k)5Ay?)nXQs!^Vfg&RpA>3}5^ zYusw0X^gC!s;=?aTo!<WwOJthf?Uno&+!ekyGT-QNBd2=G@|F>v;Zd@LoIHY(hJ$x zIql-xqVP0Y9Dw@nxJVl<6OuxTE?~5b7NsYmbXTl+j?oge>;fyF6Ug^um*xbu{p`EO zNVu0mlKW61sS=~)D+ExgMNI;-_jfF1dl2g}*N30W2G1STBB?nnFu3d_dn_4wVTpla z@C{Zh#WVoRw-&^Dgqv91TnB#Sj^E}U<J){yi_*c+^8lk9-P3L_?c}tC1<{q}P6T#K zwg?Jta*oMc*tPkAk?U{=rVWTg)R*Y`azwoT1NPe_RUJPjPh}wsnl&3wg4*Di`C@Rm zu)Yfdf|o;_lFY{5ELx-H!;6k;i8KD;^B4$iLvNtqGzh$rhwP<E89;NlI9h|_!a~@s z1ta;_SlYtN&{M(-mUsU4tX7pL-y_YE<Z298?L3Sp2-5k(hJ9pbR7^uAQPv@wAT1Ur zJM3pS3x*3<j<MvzIFG*AI8&8l3FUaOm$Z+~E9@Y=F_C>!IMlQCM2P3esO>-zD!Co= zTND%hm!=82(g~`d6q|;0PaS>x!KVin2*6nUW&(R{QLOVNC@N$EDS`>4o%0UOx0QS? z*waPMg6%WrxHzQOna{8$&7uj3+lVkVp)4ZZK&DBx=?Rw<Q||Z|^li{2o!A(7dOVx7 zI3{8Y(n)`KI-@uw3zT+)0pkuX;;c~%YS0n~*zUzEg&S|M4D0)RANGs2xBu#`q+?iJ z6?qz{#?AZjmn>z;@4~V@EVgJ!^Sr*WR@u=48ZLMEvQgdzR3+z+WMyRw`?RQ|uw)#& zSTxwL-8f`k+`CR)n!AcQ^v2BfXSVFEpxBqNl407;+d?M~{E-rGeP~Zb#T1;Jtxmzd zT7-ku6d_a@LlH1*;~|abxpj)ko>s$4`CrJ_zF-I5igL?+840MFjI3t&-WoSXvZ08* ztEjvI@)&H@ixQ#Zah86PtNijEC|G~Nq==jDlA&j`3$}^H3(m+tV_m}v)@-76!I{8( zPcdg`737-_B3(_f9F$ziP<2XfyJN@OuDthm@yHY-qq>p5iPbC(<fk#)(yk5+nua`! z#T1A0t60C{5PlY$QXK7f9XHyDV_b4q&0kBentf4xU5FpWmM@F--PpLHbyQ4s&}M02 zW!1|fcyIRCvPAxM7A-}24BqsjpRR3WqomjUmtfq}F@%mFlKoGG<DHzIOGfz?tCKbe zGd^NVmd_SSN3iG1GjI&-TN1#hu?Zy|guBDphb2)$+AHj*l9z<1BbdvIZYb-^D?&(M zMMq)s2=>v6?g=AtZimWV(zkw5mn&D)wd0q?G~oukV4MO&I*cB25)*!wGnxB4Jp?B! z>;KMb!Q%razmq6T9m*m~Uk)0KQ?nQY^%_H*ZbZJRw5x_X18pygEh&xRpRw(w!ClLT zsJoO2&#tr6@B#%-w`pE^4Mj;B{#BBTg~%9dRX<pKewIfRFpqa)S^T@+g6E<gFTH!v zuHf)|8aaJE<sUV|U*4$^X8B8vh?X~KM4bGAO1R4JYK#<lsYZx$fktG?vovCa{JKVD z%i}d7S01GiiSi(g$dl9O7pS?<mQz%gR7mrsgGMhUI$EPki4N9iSSw&<)adm@+iNuJ z>d;B%QG(KTqHk;TZldcnx}4}U8cmCzTu`NXX#UH4HM*AQ?HXN2^hS-AiC(SI^+Ye% z=mw$-HJa?5vPGj|zQWT6jfO#$le09MZb0NrjV^$ZkCW3hPc+eq8XZS;tVSmiZPw@% zqJ1@5B-%-%Gl_mWSFL>>(e)ZVo9LSwT}X7TMlU70dam03QYm@LwSd(`@6_mAqBm>w zW}-jR=<P(8YV>ZRmuPf3(X%zWis-2tT|;!XMz1G&h(_1)1@MTPr;Y-;X|zmqoJQ9Z z9jVa`MEh$r>Bq90MmrJBX|yNN_iz`Eao|t1tkGtoFKcu((KVn8AQVTQgIYi$(YrO8 z?4<HmjTVVsuhE%AuhQrdL@(9oY@+9DbS}|(8l6Y<c#WP-^azbE1U*d7&^${iAVs4~ ziSD4$tBH=*==DSgYxHKKJvDke(e@g>o9G61VRdNcS=f7V-*alVI>(&wc=edZDc3lj z;8bXwuQkpHa2{%$k2DSrm+pUPoMr6g55f!PYo2jhuo=NKG|m8x6AezT#!1#VFdDg! z(Ks<0ClQ<>8Ye*Gq=3^$<2Y&@5u6l_bN@}X0=eMypRBQd(O7w4rD>d#8fP{*T{X^r zm7^{xV7AeiTQnxEEnu28=6f2G78x*oG-e?ixF*sg=KUAj$*%X9bxn-$_j^0GuSvEm zxP+6NC@rOxgKUoW%1IlZj&_GnRXBBZ^CXlre8nMxU2Isgr7@J=9JntaZ?XGA4GJ_U z1gPq&<o-Hwl&I&}!nHRF__l$bC!`KZ9h%CfuI=&C{db!VIEz8rQlLQJ{{pcj_dfx* zmZNZB{jITV0NZ@{FKYhN<e!R1i+9$Vh0d=spLNOpSz6Fgz<x|N4rb$Q#q{S(EO%Y7 z`}ohGgTk2YgvUny?A>*PJYE@xE$3u0UMow6f#cYnb<LYagJnA{C$EM2S8N{%aYu+R zWHIY=6H_;%a;UP3+8c=PosdgOA6RRkF+tVAAw)5~+nZX(um<WjK-$TUt?y($fd(hD z>tJvdQ*Vf5NOkZhss4_=S@<U{_@PYvq`B~)FWKv#1it(MB?&#Gn^*ypMoBOpaX^*Y zmZqk<8_yroC0OIvseXjhCm<`eK&A#>rGpM0Mjrp<WzR|PqeWb(vn5x>)GCcdZD>=l z;Vt;-Aw*iFm@3H`(%%`<ExG<fGC<+Q;Y?N0Y%ms)B96015|lH=bcM1|jeN!>3~h{) zwKoFPvV~I^X+d)-fJ$4|OO>b_L;mAvjZh@2m>P&(Kmu=S0-Kuz98`f?VpI5VEj+g= z{Lf94!0yKI&LA@Ag}UK?5J1fpff9vl;)<!XQO-z2v0*oaY@RqhZrjAgb2hxKHhcya zo(O$T(P6uJH6!Vo1&2E1eX}PO!o7q-q<Y14rYBqcX}TD$N}jNEP)rL+0(rJlYP9PT z+<{8&PP%{(?gVrJvMy^flKX{?RD)hUna{=uz9s9lG2SB))}%)L{Gh~UZ4`wY5-Z=> z*Zn=|MMyaF-!z9evXz@6!ZTs)w}ndv5m@ZrlYBn%`DINLYCJo;sa3(2X_!`%#iCRB zj`HCtptD5l<GMF{p^#IciO}wc@?R2-X|*tlC7jBjyWlRVYJE|!{OMf`Pu5}`&ZNWL zkqx9pTVh1(le$?y5@#1Ua(6XTH>89kDPC5^(bo})WP%$Tdq56Hs5&Wvd#F0^kzy$D zcDA#!Q)7Ug7O<Ut@mVLKqL^L(%#Yv18a|7GMK|*EPHwMmLkdtTB}3^VHu3W|t=b@o z@I=`Gpqoe7Qk<u8zO8VeuEfVEzrez~@=Lb&^M3wQe30)kxr2$6PUDiJ*kKUZ=1Ug7 z*(YXInL5_-NG1!5VYxca4R&90T$fjT`$%qe>5`k#xat5-Zavu8%>lyG?riSnIAOyU zwsmtn{~5csxfLJFoWC#&uKq0Miyp$!WH$ASppZlAI^>3z?|Ax1Z*a0HrQXs(yog9< z8^7o*yzvFQ@<q6CuG`8jWB9k&*ewx!4qLpXmGH@C_SKfCfKy#j?AlwmQCzfX#x0vx z?wrlcw&gA1(&udG)(Dp#Nm`}aDQwZ!K(^=0HNvCMC`1_DmCf0DT==64OaCe+q*fil zZWZ)uD4WWJ8$~y#w3x$Kh`O+Mzw!~dME2QN9kUNX)7Q)#@<ouimDv4=c7Sg>Q)z7Y zs>RJHhT|cing|}3tC!LT618INcnWoWex!&8wHwjoyOF}J*@#cs;zJ*IpG_?3>p-`@ zY9_&GY&n~a`#Kl~s)b+ojOW0%nxWz?YAnO87lygvm^1&&FqgGaHO%=k_if>wW;8{T z-v)_vG{fBDa1@*jb8l!|IsJv;xk4D`f?44<zXF#g?k%#glcl`~n3P=fDe_$agP@2j zJ>A3umh~4L$S#e*gT{NteHs2v*6X~5jP5<nc-lnV2WkGe_lp7RhcKE=SWT9p4Kbx) zhFafIZB4AsI3t<luL@(RspYqD+R++cEBjg`hO+y}uQvtAj|*TX@?}pe%q(}3Q8@V> zYx7e}mY5qVw4MzUcCY5W$6-n?hhqFM^YuJ1^ts|x0JC!-OZmnabF$YzZP@L0qNhqs z%NLUk`|!VP*qeK)HteEa?Bj0|+z&2nN^Ial)rNidn{i>Ku-aR%8*TVP@Tizmfm_k? z^qg|huIeS@cT1eNvzgyMa*JF-ZGq=6GHgD|WD_gd(N?%(g^61c&Me-sZ|4b~x3U%A zg$s8Uv2VWf5tg)5E!W>Jq7}1SJJoW%O!Wk}Bg-}4iS^wzk8fc8rw0jNZ(~!Y2McKp ztay5Rmaw}GAIHYwufX2f9p(QJWAndQuphDN-Qhw)H}===B)$cU*>hKD{S_If1!e1w zzTZ3X?y31r)je)u**~=74XpGBbIgbsth{NFiDZkwN`Ijhb+2l8c0mjqj8wYe8Fyv< zXZo=_KXi6pZqdg0c-DTeujv<T$}^|^%i1izk3sFn^7gju{A_09aFD&VXreYQ+<d7O z57>g`#}J_@bLxfYSay7Ggh%~661kw=nE0^gdqs2qCK+2#7`xjhx!1gh>|2OA^hM9* z_t?ZA!`zHbLh?uK-5<>(I>XYas|axqditk`(qrSUJI10So-#Np|L{2tnl}Eku<({d zjYEoLQRxUnnY<oBbPp0u9!7HbdM&3`taxIOm$0mTp&bR|5_dJ76Y@w^SdZE7pD|PZ z9kV@RwrcIDi{m-K3HdSfI?4#Ie^1DN!1IrcuCi_W-oVXD`2L*M)tMBO6`4ohYtF_7 zm4Z(-$KWNq<evVTW>7uCi(TA@6tl1Qzs9#`{^fIozakkcPZkbNXQ#^3g$a=?q#{ai z7|2#sbYgQWy!pL5N-7rd!o>;9^T5l^@ASv)OMJa?_U$IiYpmQ741M<PoXX}Nun0Gj zS^a_L!jH*2Ob0V~=XSWbpn1`ZO+M6Hxc<hDZw?K?u+7`ySm`DR&lWS&k;vdlF#FTh z4t*AwiTj;r+Wk(sV!9v9h92qCrZoicpco4)%wMHQPpR1h<0v;VE$LQ%Ag=TMP}66) zBES=lVD`h2aDL;CD@Weu`RpANj;#^+fh&)Px#hpwR91n-9Pirp0Ost>v0~06bn^*y z{ywz+hFkIHxD{7ml1jUeBSSYwB|piUjK@Cgc#E(~f2s}z6bC!vvd>RGf>lZ1SFVm@ zwZ}Vp1W~fDi8=VPt4+%#Ea*g(u<jMs^F(x*pBDQWl6AAA%ThnN*^B70toTG!v;!D4 z@ULb!rfchmLES#dJ==<OTZuU*vENL!vU4XQeD^|#gjY2Q%i+weCSDM%ETtxd@4?2@ zG;4JL3c|zMpi|8sD`^RCdq`K7eHKCE!DVw_$?aDdWdl612$|U0nrLC}VpdfXEIjpP zzt+Treu@PeOS_|V_||tA(CEoIh>17z&@_A&vvw!@4K@?QI?s*E4~MGm`Tcq$^}}bF z%82NiR+7u7(Py(ZJEfPN`Jc3i7yXexp5*aGIYb}y!lvS;`F(uZiIdHQk&Bpovbzwt zh((_=n^j{37A08U$}Ka+9C}H6vk>E^*uKVUs3>H)rzQzMFJvcAO$yB#{X(;pv#>fI zu#Fd`p(*rp3i4ex>~w;6vmy*<2gp@&t*JepWU&uV$GIMXbjjEVY+1wExzjU*I&aqJ zOpuV^#U`Bz^$uEsNG=djUuQt*eO7j69KV!3IupcCWu~*?!XR&!cy?^-XCs@&+A@&0 z9wUzoDZF3NP2-YZK!2Z%KlEkMO;&L>%(Fh1%;e8fjjJlfq>jJmvih^prrEP-QsZc( zK09zK5uN2L(^;Ey@!j8mIDNsSU(y-|!jH~Or>04hKmP>riVag=MYVF)Q}@l8Bd=Vi zYPfmwqgU9+=Q<{L$3hOF-!O|P7S4WH%sR@d_d&i{dl?v@37yh~)@nirCNbmrcs_yk zJl|dzJ&EO?Z-wn;-TB@@w<pp?2Avz_uP7=Ko@bkZbWcxY%K3qO1{+u#nlcVjX<e{R zS@Nj`mVK-!`vk<nzUqK)qy_8<RBc2aO5u|c-cLG6D_@DPgY39~ZL5ul-tVCfFAwY+ z@~+{H-AK#ig+v-$3VtI1k*nFG+7Oo+!*Dz%?<Qux5X(Ph{Vp6!*zAhJ?x&0QlHoYR zRQ0B(_n(4E57aPn+sh*nO@HSx0HnmR`2_)N{Kcp~pH|bltYE;d7Gr9$6fO+1U6M@@ zvL+hxU1i%)ib+pxMcj|!5hSi%aE`&Wc!t@hUIbRrH?a?0*y)R{Vn4zW<6*5Bq=*l% ziOLDliqDs$<i$;i$Ymhaqs9yu7JaF+aQ#g-?$XaaCk|;EWp@WF%5FboVRMlTKTXB{ z&Ew3{WblxGsnGhvWe*9-H{pF?7?{QKE}IH=4aDaucEs-Y8r@~leH^Z71LOxtRA1e# zAVnNR)Fd+`=-cjl$6;qVMGV+N{z9+gE^iNY<gIv0rg|c*4^Bud5wdlos3{&j&v%p4 zAp*{Ejicl{GW#oG!U7i-b){XqL>zad!Tw@S{ezztqXcnk37nCrwXeT<x1u2RM65#( zeoC>p%ihd-rKj<o&Q!F<H&Bb%nJWWu+ZK5>D)6BN@&3y@r+0?3oU1+f-`S^ElUqmQ zprqb3<M(NJOg7mkff`#KfNjN+VZ3@HSS4?r#aylhJHCpo7MGW7+%O-Oa?O8AZ3KOq zapZdxF(nP_lUXtSk0WYWY#8<m)%l>PeNcuf_f{ErZSF=*fwzCAAe`#d7FD8VgH)pC zs(+W@FB-EtWhk1@Oh1?*^~;pIvi;WrLd>XbDdNP9KA?r;Q2P5G<cLr3-e4!M`{1nv zehA0LJnBODcUf#*AU}vz)R`RU7%O*HNg9~=h<!m_nD8`@t*wg)sm;?;H8-Y8xoRJT z{g}tD)P=Y#MqA>$r=xNzv%el8%*bQLI#am&*Mn(PueQhG0EPNjbSm>GHs<<B|0Qo| z{eX)eoFK}ziyjwT^weAriTiT;f5}=tf*tL@q?iKbE#yX^Fm5^vy%8Y1G@UK13-H3E z#kaiwNC&@Uw4tETjDdoGD|B41<JUS?>3C7cKXtSZ*W#IV?4~1LBdY1;>bOM5H9CH+ z<30@wxZ^tGR~;L4oZmuAXtIv65t=_y$21+YbhPOBmX51++^S=hj=$>oSj7U4^JuOm z9IazF9sB4wL&wEBuGDd}j^F8cP{$e_FY5TKjtx3GL~8l@5+Z*tRA;o+F-^xz9beUP zmX3>cT&3d%9e3(jt>YOTZ|YdDqY$MPAQ(vbbDeZXhK{f5IA6!*I&RQ$uZ~A`yr`qB zV}p(^de?>O*jC3B9Y?m{X`SV`r8-0P;Eyl+<kyxV-OOAHm2_!u4fDR#{YAic!+qRG z{H`&A!+{vhVSbH2>qQG}Ke&2)jZkm5qw8-A-3wAUUQLd6Uiw$3Xt8PCq<@9FpEN)E z7pMEN_o@GCo@?@6x<60%d+UC)?uWfe{a5}>lZQG>{|dNFoq=~P+CSYNsC!GFY7v5T zKYqtcEihS1=wHnf%^#|Hxuv>4O!t>R*22xYKU4RIt9~p&*$tY&Y+WEl_s`M&X5Bwm z_j9`csqSxhsKr-wf9($E2Y<Qm^0(*R1g~w8Yp(p+hV^A%^!krW#d|Ls$~rpmGb1kJ zGM#$k6l!%JuJm^;@?+2*s9EXnWk((O7J?<a>~9A?zAV;}_wP!PsOQJG<hTWJTl7d? z!3Hk4r+>L$o+;Zmj^i5TBBSvuYj8+xp(EecyOHk|%W*+)*M2YC=g9j-H}X5SfpP_R zO;49MI&jAEj-0UrFy1v!EkyxIQ_<7gVz9s+)TRFfZP)IF-^*x^n23#-jXC^+@IT-h zb8vRC=Uh5Cb1s7%nq+pP2E*}g;1SX#L@RP`ycfrt-8l1juQIC>pW0NFbFdygNaVP! zwz6|hd?>Zo11G*yPfIq(<rz56OU=NMbBTYEK|~72&4FuX<J5lvVNRT92T#s(hFgtG zxl^8FwnHY;ndZ!gs+CnITN=l8==~y&mxI7L#0zna6}u~PoO3$I#p`Km`8qV^OAYot z!Vkjr(`7Dz=K65l>$;y;%c{lk^0DW9IykoEeBzC2%TNN@eW7o{H72kQ^f|cKZDlQ7 z_|URU7v8_AWd;o3xY3zdCTwMMUHAxUnYAwbtAiR#914YECEQk<+6bIifD0F3apMBU zyK(^?Ko2t2`_y=qH+J{Mkyw&P;rk?A{uB5+!~f7$HprFlWNJ*e*=UX%3g>+r>6Tq^ z<%7#Uapk*}-FM|*ZYtc<=^WQK4~xm|vi@!;d7hCErV_3&@_|i46$?4;E*ykby78^M zd3iZ-UMQJYynDH8o=bLPjyrH>J`eX7<%oa0uM_7x9&U&u=Zm518}FG{_SB8f9M%}4 z*$Ol~Tqa@=@{ulFWQPDQ^6$WU|2n@K-*WFfuWZjuo_cwZkg4^i3+FQ9#V8v6KGsIK z8Mn*UxN9@@CwIOR)#9-`ALQX8Q0K?-9s=hP?^4#>gKzKGnE%h4IqoDJjjWTK%O-pL zqt+9)pw@6d+%Cg+%Ap+^<DC1F<38HTaToMj2823s#+9yh&NYtZhCKUhyG$XDMm+{1 zSJvRc2N#gfjP=CJ<iwdexN@d=uR3GRzuWHm9**<*p4u4E@bCY%jUn?8uBnavoj8BA zu|L|_A8qU(@0<6(@@e+3P8wQ?V+9;K^LunMHPbmG-=fUZlMi;HlGZ>CEE7HX=-kE{ zcfZ7OW8s|sP>ZIIP!EhycZ^Vs)ETC_e~!@6mpN_;TytIC9`vRw9M?<t^CkzhgC}SD z)+5h7+by$fuN&`QR_n>n4y6Lbp#T9<j$AWK2d-I%1g;q$$GKtxp`_Ei_{i}_E^LN> zy<eSgjd!_cW<b0<XSTeF6=@b{9+J<QJ6Jd~pT`B%HLEd|Q!Fe%I2ZBuJZIk%D<yt$ zsqvo!=~(p$H^1PS!7Ke6zn%GiRnPycdj9`V_1tyDm;aP^?#&Oj%)tNjT_5`K6JYVb zvGBWKzSN~`dkfw_Jy_R-{B`uz(Njk!9XTBv9%!1AtYfW?)n#5S`2npy(!Hy7T%}{F zj!Si%UACwt-)!7g-J7N35FImgOwln>$7mh>b@bHHO-D{g`My@l+Oq4&rpzgtkLEX* zwTkA$0`l~r5jv*m7+3aMG(Xm>{GJwbw~m|3PDb<1obz;Vec2s&`=#jKXdV4^<aCtp zYN=e-@r;h;I<D5Skg&`=3qP<_dSgJ@?pA!QSBkx+-Pbs2m|6BoYd(xm+f~t;-)0w3 zD`@O09UH}amKC?<EBkEyP0RWJJC?0!$4@Kp$0BI)<hV<4JMm*Rmx1)-RztwA0q(&e zAr16iU`rTwoj^weGvHQ(ChU&0Q8efjU@Q(1+wqf6gznhs^1$~52Evhi9B=?!Hhuyp zGYDsmHc+j{!xIOz!bRej4>kg?!})@~35*KS_|d>4I$aICfdhtr0Azskak!ui2;<FK zWx4@_!?n1Cz2HbD4fwgrFW|Y&ATFpLo*N&5(naDYlc{vT58<jnZv-BOI|I53sI-7e z51NC@|2f<}(3^pW;7akeXch25G%`kuHUP)8(khb;3~Q|w!VElD5QC<KP%SVnmg90k z6W)NU^GD5qW1#;p2R|Da1dX1$DHs?Et^FWqGjI)D4L%V42q?sHoISqHwg=9GwjK?7 zK5zvb)xH#X5hsQW@KtU4LI?CWJePrcI-}1(?*;mFLB^nifs?u-W6-(458x=FkASX8 zs67hm25b#SwYLCQ!I2E%PMs!v3`bq;*$vuzH}rok5>0_;1Y8~HY~VP!o1pW6Z@|f* zEx@C2lxQ_@VzO4_T;M%8YAwQ>Jvi<*<O$cNaGV=jWj!!172SuQ93$M+3x4o7_bTAH zw{V8YfCq<nZaW-hQ1=qYy#lq3nwoylZ3i5cdMB_I&It1%p9mZcNAlS~Qy*x9;QJH8 zsoeoAhoiWJcl%=8K;|AWKLh*%1k47}uOEJT8q+2dSPVxImIF8F^j@H%(_DWTYT<$* z-vRhK9A%aV3>%=)X5h9=^gU!`pvOQh&2GRC2BWpX{|H!c4S%QxGB9X}CKL?(a~Ns~ zp?cu7;gA8H2fVA(_keRoX#Dv=dz@Qn-V=TeR|R@Iu+>Pd6=Q+3;izJS<3~Z?^258- zJP`Nb;xKRs=VfVqxe7RUG`a@-`M`2GswLrPV~`m5^cJgrECwBZagNX<TN{9$z?pEA znFaVR9941^P|^9^ICS4Q^#5ukR0_{kxXqyJfUoDEHlPcElgFbrpr-=gnxJL86nJ(b zth(UW0$b<mwFC~BjzI{1CU8m~$33OEz?E=nA%H<|X!5}YApU@h#ptaEww!_QUO-0! zr@>JT@_>e!TJt&qcP_w^3>m^c3$=tYfH9~{F8Hy)A%&QNpb2F-%GiDpR!_Li;8z2A zEBc?B&K^WD98IC+KyiuI)P$}@czgxE8?YA~6(|i@1V@R|>y!05O_=w#mJZ?irP|s+ zIDMJ6(B%OqO4vuRZ0Aat|5?kmHJor29Q83_wN4Wjlwf=yg69g38wl48^bp`v+~Q|} zCOrN=1}x|rpiribgE-*h4^R-20j^wwt^~ab`1gk#R~vx!{~m}LA92{CFb;qR*I_Dx zCj9nOOf%3sf#!|c8cs;RkwW}fAU(RHR#^|s{0s#JoegyQ96zcL+6?>+E(J8VndANg zCxYG$yi|Zc8Sq>NK7ykrjr{^^{+Ap#6?|9{xgBtn!A{`5tyo+@mjlnkk$f%C<14M@ zJc0h((0Y&|{1{Fx9B}$}jm`sZhoiU!L|DJk2GJ6r%eNZs2AsMFg9ix__Wd49DVEO+ zVDlePGtiO1e7J1Tvw<%k)&`~syaGpESqD5&iNhCU4g#B1X@&3yzFCF-r&7!&&r$Rf zgi3*iYL43r+7mbwj@IoFz;!yk9(W#(YE}zu^|MxpSm0qeT8OHE5f`xBB5ow`BpeO! zGr)xx(cKvMg%>eZ?!iNaApGDG`W^xF<$U!O^ciTv(rd6RfF|4sN4p$hpE}UsX8;{< zqI*F*0j+SPDXj*^{ffm4e8T5&mqByCaa=pN)ddiW1Cd7_(1c&;^lqS+jKu^Ie1V_d z!p;VIBQW51$fFR!z~OKl;=;(x%_Tl)GG3A`G7U0>WD&Gr<P(ymk7&XIGU<^JAz9yu zCM2sF(S&5&BAPH)r^!OLRHq5aW<@fDWRD`6@GqTy2qgOx@d?Sg0eY%Rr0W4>{F$NC zgg?MhNeRiwLXio{<UuqcnIecLBr^ligxz(TObuf+ddH$0t@zKKy77YocW)@)P_?0E ggS?@BL)@o{nBLe+(Ttndmzh`at#@Ut;CI^n9~^JsNdN!< diff --git a/pipenv/patched/notpip/_vendor/distlib/util.py b/pipenv/patched/notpip/_vendor/distlib/util.py index 9d4bfd3bec..e851146c0f 100644 --- a/pipenv/patched/notpip/_vendor/distlib/util.py +++ b/pipenv/patched/notpip/_vendor/distlib/util.py @@ -804,11 +804,15 @@ def ensure_slash(s): def parse_credentials(netloc): username = password = None if '@' in netloc: - prefix, netloc = netloc.split('@', 1) + prefix, netloc = netloc.rsplit('@', 1) if ':' not in prefix: username = prefix else: username, password = prefix.split(':', 1) + if username: + username = unquote(username) + if password: + password = unquote(password) return username, password, netloc diff --git a/pipenv/patched/notpip/_vendor/distlib/w32.exe b/pipenv/patched/notpip/_vendor/distlib/w32.exe index 732215a9d34ccb7b417d637a7646d9b843ecafa8..4df77001a222c84ff3fef542618b3f45f4c1eb9e 100644 GIT binary patch delta 16225 zcmd^md0bS-^6xn#$bf^O0<sCHs3@ps=FG~hF1ST>6a(UJqBu_UN_56(qJoYjl97nE zNi^3Ox2!H9;vO|5cts6K(CZr4BqV8ri6m|$66byU%!v2C-}`*tU+=HS$C|3HuG3vz zU0q$>=U99oxcESD`O?WX5z{sAP1Qbz_voKmHOhUAr?gQL_GvRE{H1mPu<6<#S^-$| z-W2U~691ck{KEilmhkAA&pKF}t2@W`vS)GJa6Xvp+IH&%F7S7Qlksi%FpfI}#zsGL zVqemQ?<3o|f{f<7^Bc%QzH4x7D93FgoB8Az?l#BS0#8e*j^ifqt2@V&MqVyEb&&jp zhh8O}WiRlXD%Z<q@d>71Y@9E8NS%%IHg0xx>&KN;x#LRiyR?oWBqb<?pFsKr4U>JH zMaqJb`Ez7vP@=3=JMv{vUt4aT!-a>-vYJJo!;#RBvzIPzE?wM6jnG&;X{?D@`?+Hg zBt}ue=kqybc|LbDof^k+3_D|qAvobF@S0dr?PoiJc#d=OIdoJs$K`k%sR2)Vj-?oD zTphiQ>D8^Wnw-Jw^UD^Wz4LBq7WMwou;eJmMF!>4vIw#xI7K)eD>a|%<bCeDv@wd6 z?xLOZk9<CFqgZ8p5b=7IV;!_Ab>Fhn0@55DTO_qa*DE;Ab1cJ~UEeDEhLh`=O?__s zYxZ2@=nQ|epE33Fn0_9#j4_@_s5*KWjXk)SYp1QzM}fD^fs0^xj-?hD<?i6piH!|o zyje838I=oVVwID#v+>6G8<g4_t3zYnCMZ{vP$ji=<Zm|W)UtuezJ|61-pGTj$pT|L zSI7`!^}OF2T9sxsvQ}2b<H4-P{K=C}RzK~%<+-KEb@uVO<>LKPcN5)DHnmPmzk)~0 zvhLb3Pj<Ri>h`WzvUx%~HsC1ZBlpm<tS0+sx5%B=y<*OGU|faqxch8FEAjpy){m|Y z5kn4!+(lD%no913bnY0{!06_OnAm}na?1v$s`Ko0;0Tf~Pf1lvb^k(Le!lab@4Ow= z4tfC=s0m87OO+pxIr5muSrYB_3-XRi^156VvO;1GC11;J-76$oCJFdlsgb^D4xU<z zv5v~g&GVXvpSE$Io+awg?y~;v$b``J_#1d4hkh=(U`Z8RFvr2&Vm>&ZYz~c%|0$B= z^7BioT)|?MtpW3q;~0o)I~`2wLZcJ*2ddpa7kI3k_kk-eVt;F=+)g(}t_kZ{RPy*~ zxmZ<D+S+qmmPaQw`+PYME;<&WnFODc8@u%9!cP4-xS5@P^Cz3f$O4bCi@Q4>@mRX# zM69iHR7hQZf?9t&#}sJFQ!Vq<$ea<5Ce-H8FGHnyfU3y_o|lrkxMD>^papnv?5|Ae zLaBWPbS!{ecc$nM5rF~5N=-MkWz4V1p0I8*A%t8GODoa``6b+nnv%0kK3^kdwIQ{v zP}b0n$xeRUKpW`fZ`u8G>0T!3cmJbot>-!sH8L<41s*dicX7!}_g;ZaLwHRI!%i?g zROWr16~vQeZ{GkeC{^@#91pH)i(o+&P~Lk{tSY(8mZU=(&_oF3mwDJS6YuAHzcgN) z7yW9x;=jUOisHY*Tp|68C+6O6ka>T{WB1u(#nNNl*<<;6WhHm9aO}rhbLn?-bI7l4 z;^aG|`ad3!wu;oE(QN~^kz;5Dh9vjT(wua4T!a+n#ZEtZAo;9pvfpl<A3qS*>H#x_ zvD!_gM>;#?zXcw7L3+e5|9g0#ga42(E4c{m?75|H7cqk7z+c#6FYz(`O=!kW)o4Sk zGDwmR2Wy#mPZGG!i19gjl1I?NA<Q9C5ufKv;kK;g7S@FnG3_+bziRzFyD!>~g;C%s zO6uZHcSepc8=T_oVlSfMGQ<l+LCJP`0XIBwpE+ri<Su1FlkI^v%1Y`nL1=0WnkqZO z3f`JByR5(~_f*LWN=~}(rGIg{$=m3a)Gix>nn<bU3~D|<-Bemqhw*wPZ;)z>{;S%1 z!YjF-)MTvm;AMZ4txmKmV~;vzWiF*Wmu`c@K5>+7lEs3cv`=zMpX7GFz0A0Ij*F<x z&CQ|TORGo7p-)5aTKW-;UTZXl2XXy4x9m>!&3_~0Iu(wp+cu7F>I84|l`gIqtGpBC z#_KS?vBn)mRp8ffG1y1xIdUt!OP71tq)M~os0^n~iL4UglxsMNeXqb^bf?fCh|o4V zr$e%|avL@y(nms<duRetpfsz#CW~zYJ__X!x9h0|JyauJ(-{!SulD*7*mAvYdC7fu z2l_I4aK=n36=<(SvaW5aEH{xHZL5x~#eDdp9awu<^U;pOL>3_!|43mCIOQ}_BD}s_ zy00sn>|FW+a4!7;y64gf5<OV@HFaj3Zk@^O2&=8RC!R-pVbI1GeMQ@k%cmOf9E;GK zV;X+wW@yerr+W-`{`a%M`6P|w4opDZ(JACsM6aG}0*xq~oKwaE#0|6~R!yvSrZ{de zODhvFBsu+wSR=LkEHXbbBRane#~DIgzZxITwfJ<t^WKYeP#3a4Qs1)}4KH0>zY=I( zJ6Oxh#w+vHd9vz*8h)zDx=}6(+*nIu+i8*?!Ib&Tl+xb0IFfV8(Gz9dYC0gE%xaet z`eHAZ4#;UWInl01T%lw^cWf?=gKfNxzGxdbc6yS?+b4IL30ZF=HbTrrtRbt})q!5b zY%>$lZ^*#*on=sSM*Hrvo@%nHeG<Qd>}o$)(Fq~}m#HFrhk5Nj6*(?qe`8rzqdj7O zh@EaHB^|~o#Evau=Sf3{S3{5WfMJqo+wNpZRBFBpdU%^keD3Z}#Uw*;YJ5M=6`n)m zlcYIvf7lR0v*9rCj6o9e)=Zw%5S2s!WTT}j@M<^(TuiJQKe4*@KmN$=s2aW<*&p3C zE&~ac-@EFt6mDi9;=(k!^gOu}og?NzZSM)svDnh=CgYdxj`U*))o`#0N4K$Tg}b>6 zv>{tQubmV)=xbzQOm|f_d+wOA+Lc(t)z)Hdw6oK7h{^e2h1PIauVN)s$-UW4-isxd zv5It2<aA72(#I;cLSkXub~%z3+vL&`@-{XosE^1yCda&p1%J}NLOi!3v9QXr8)+~x zcO1?CK*~ER6<HlxY)Z*j9eeN>$-Rz+{2!zscD%gM1k2dzJaRHNfnP#?jJ1Wn4;H4E zULt+sdWLX~a8Is1nG~0rIxJAL2>DT9GLu1yRW!K0v|eNBr{ujjE&qUAjhn(RAeQ*l z@NGy90>XnM$(;Da@Ry!&k|N2v_+1HA5iK&$g7lp5-JA})=jPGVWNJbTUrH7v%$LPR zkj8{D{9%%nsODcGFD8!co5Hkv4Vw`*i7Ywd6INj~?Eyxh%Tw5R&|E_rbMKj?DKWKk z32LR-vJ3mhf5aB=AjJnBcO*SU%$<7ivx%dVQoc?nnP&>w+^J(18|&;HH27bg>A?te zcA0$JsS}T0I`s-G0$b|k1Co)H5jDfcaa&^}`f+rrl`WQqq4ak$D=8uMkOe(>n;O#W z^kYUEYv#v$TNijt3(W3zI0PW^xJtGsrM8{IdS3zWb%s!LJ90ItW1IG@Bn85K#Fr%K z+M|K8Lf$FMr8R%By-QIVBKi=OJJQ2#n3l}<=_eq)2-o!u2{}m4jT|XR?q-TXU3DJ& zqqWEp(a9Ff#DAD44`-hc?_vd3Uo_1|8~&buhcFp6R23f1SY7f<<=)>L4wJ3Py@zf# zU<$oW)P%9;(nd9h7%~f^N63c(f<52(8rpjYCBnJe*y)Vlp>RPWm+vW1aBSToI4T^) zY|!74xD-AAD;b^AueT0?l1*hXB9p_%hU5E{MPEpdb!=kN1EEk^);jFZ%SlyA6kklv zrDPX<hXVn8L+(tZIV@b$(O9e!Ba-%)824GkKuZ56^n`kHX9B%_8<M{0MRhiA>oG)Z z8Zr>G7{z9>|A2s$oStM-^ivE{dMuP4U<@ZuS24|u$6bhSMej;NdI;UmI>x(NN9@l& z(a_2nmOsj#>tIJy>TdA)O0#Yl-1Y9(cA5@(IvPI>eY{z>ymD`7Y4#QSD7u6B2xetE z^=4gxxMy)a$K=~Fc_n@VJE=G82JMO6w3;?R)0-rv%jW~vvT+nbd+4?Y+<dkZm4oT1 zKh1W|l^{K4Ccwc^JS}H3j?f^k$`>7rI+%0?>DIMR{6ASm{?={Xr9*L&Gbp!IQ^oAF zjLh!pj*r8lDK^VlJFZ~*3xs_dCX4mg+%-M=H8{%B5elvtjWmRLj;G}MqSp~qYHY$( z=9yWFr9=Fc6RT11RHdNbb!1LzpSD&^EoR!!+Ca9ac5WN_U#!Y+Q@`XxZecX^$Dw3L z_w<mvpmXVLa;<x82zQ3jBgo&Nqa?a7N$fF%Z$qZ{&_-7^;;H;rSlySUoCgL7k&}uZ zHd(6>@<$IV!kH>9wbv#XqNHjw8;`q#PhCO(rkG`^T$)(Xkl+ofc&u`9TBc$-63c4u z;(D~!ksMFc#Jy+67(DOZK;FeUha;B+fuk?YB~d+h>)8}JvBml41sO7GF?=>NaMnny zJ%#6?&lQqGKVgB}o83HF{AxVyM;`S|jamVzN&cO{ry0_|AEtB`(e|1fod8F`<E;-g zwd%*geDZ244<$!>iIM-Y1=iL&HtEToUTfO#_zv0W2!(f$ywx|NLQgk*N8U_-zFp4% zJ6B?7(l`>AQPPgw@C!fYddSBagJe~i<Uz*Rw#%?y4kD0q-T`vDmQ3v3SN868@>=g{ zov*_y#44Ejq<BX3`F=uvhjXiw8|^4CvP0=4(xJ~#eiND4CpBpY#+7@N3+l(^c`YSA zR|n6MAl?<8=lgdaJ@h?!r%zVc28c)j;|BSyPp`D1>zI>#<cnqMd+a$wk}Jqt(=gi^ zOJ}l(?rkzYbVtxUV(XjQ!#|_$j(PMZMwm~#;PBuNm8BEw8`_jw8bb2u*WVFu-+|Fi zCgIb<A(^ewqujy$PU`#iYX2j=ohISYYDZr)TTR<YilF4rlF@=XY!;+hu9xyVDHrA{ z)*|%|Y#BPzR;fhBXH&+@wqGX`lmjDAP~5>}U-5?5f@l?ff>bCEKw_pUDR&!d#}~Z^ z=!+l@xvWo<8qDVT065-8WMm0G&4~`u%uAdWfy1nLd{Xu16v%s;e6AR~PgCcYBv(~& zeX1bf371;CIUmEo$ZWr3MO?#K%aMxJ;?JyM)0#mO^dwc(^2f+{@#SHhi0N%a{(Ik* ziSAhYK22?l4!%0X_gE(uUK29>e1y|l*3*a}Uuo-x&|Ip!3a)rR&li1yq^sk??_EKe zX$l1#WRyCw=og7IC<E;vl3wwb)c(@f{*uXGs`8g?{?cxLX`sKf&0iYgFTLR}O)%?W zW7cd5V>rX<*M9+frZ;omB@Vb+e4Z~Vx(*6s`9v)B*)cd5ykYDbO&-sYv6{5}|1b?p zOhc&}S)EulxjNUY8HfiRQ((J1-+9LvhPNJLhB@yTA{C^WI`6Pa1<7RRJ!U4sc{PX0 zB~9n<Y+koAvCV9LYJyqi6)XHhvhY^_1nwaz+OFYCE@B(gFafV-8?kFm-I>=p-tLE~ zli0oszPhcCr+=U$3Xus>Ng%tm8GJ3duI&ORlj)TFIHJ~d)t~Q+b;1rcF=YcHjn#84 z((PPTfa~h}xld*<Qy+kzGZoJw`eR%6nzxZvy6%b$NepK!whTVkr6|goE499ZJ?hwb z@~bXQw)H&es84ELaT{l(3Nlrn9CF|QqYr@Y#cv}k^*cf;@DPhhR>PsF#&0oCVKz@w z%-rAD!O0u+ue&5X)70TG+YmiXe8ln)1R7^6y4^x1WOk0qxPphH5Y9il2$ST|E%iPh zS)JLtsQEH3k#eaMWp;?Eaw)_rJNu*Y*gkXVozB>f(V}U7uUz^Q3S~iAYnfF~q<`&f zb^1@oT{Mt>DaTF)gmT;uz(z+Lx4=Bgx<k&o)t@GV|E_Ou5WF6|RgZZQ?$wUS6dnV^ z6M0lYh8st#`($DexL=S%*bbweg-uy(pTvUu{L?JK$H@9gTSpV`ibg?A&KO0c@~utj z{B)wiuZv7FEkU|-)ijU4LPnWK$c}U*o6YI`O7fLCnYWT(&8d7mX>WNq?8UF8CKZl4 z^0}oaKaw<9dS|q_`6IuZ=hEJ1uoyNk2<^v}4t97o65+0ge#gi_YwyG<s4ndkghQC; z(Ia<KO)w)d2?!PSq|Dldzd<UjTQVpHgA2#yxu`13qIN9xD*=`cEC<i{0}_#n-iRVg zY+3wza?I8o_WYNUj!}+Aa;l%6e~Yy0Z%hg3*v>o1VfJfjr@`Mq%hE}X9IyEpnbiNy zjsc+#lIAU9!--;mCL{(U&!;2FkO5Z2uN4E{3S!b^)qsWMoq+?g+_hLl`wn9a)N5o5 z!29wZ6mUrEXoz)pu+uS51b9inx37Xpo1XBMq(@dtUL+Xamupe?SuuXnYwYxIjL7?P zU7&OuCEva(lxRIjB*osB&!S{r5QZR=U|7dGW?hD0$MWrCRrJ#@$;Pa%J>GzPzF~5+ ztG#z{5Oiw;-B^;z*q)k5U$)#`lS>N;&B~WW{y_!|>Mc8Yip(7}UG~>0a%qqn2fem~ zyUXtJL_63fru@*d8&!V~MR52kM`zx}k+^YD7$3UgjCYXo|B1T5rnP~5ICzX~J5PLr z`-IPY!dHAxiiW3<aYG{8XS8r{zQQz5{GNP1JUacyavMjFz{_!{^)x+l52kmpqk8mR z-1*Y(TdHrcdt>u*9Y2<n!$T6%KMT}$qkBP>#x~%zurLAlnC#(5x(Uo04i_aH9YAD5 z1@ijPRQ?t@G_<>{79nlu(8RBip44#2f3W2!QrOt~!reZwxx#5B891ze_#K2zBy7e; zmxBIG$gt+{8Au)i0}Z=QHar!tusz}YK)!gYB7FI^7Lgv;FlM3s;1<qJFH=W%O&ae@ z+cDnNPsTfxwuK-Y??f5|3gaCkjrXPPgwO8O`yzHjR@;Mq4GQB;!gxE0Cksm3`bWGU z?7)b}k*Bib!q-R)oLxAoxmx*rb}Aq0!d~V{K{87=j_BNB7)qtFQ~>fDg8rMF8PQ7~ z?&sejK_j1u+Im?MDg_D*J>xQYab!0|p2U&Pjg!gtk=1-7nPcCJ-)^JM@y+DnsN$&2 zuwcF?`<4$cT#Q)?wwv}Ot44ncp{H{8D(;_e>G%;*jOm^r`~jOpU%*<JDEB;k<W8Xf zf(TRiBa%HPDSBg|<azR7_w%H9j4CWIP-8kz4v!JTL!JmkoG0(($MswUPBog%p}IV* zUVkp^&5|P&N7G~HS_U_RSf1{tAYk|pvAT0)&eImz&~xPDr{Ch2R}RZn@q80moTotw zTA4Q;bllk9pa+jl<L{EfvE34<BKQRy3)c(TIH^bCIOSF#TW=<xjE$F#Izzr1+iQfK zjn)p~meF!3u+jdAxZcvj&9#`plFJL^#!kiE`f+pP=_OP%v7;?wbn0oCA(hO`@2Qyf zggc9D%#Y_Uk%RePL>IE&YU_}Juxs1W#aFPyFC^vT`toN;)wpT=WzubYa%eULs!{U; z$sV8a%wojPz@(>uJb<uNbn_>u!<Ii%%6K~diNqtsPrpPpB=SCE!p7_Kqp9wR%(sxa zxeC=jcNDvf$|IM@7xLT5@Cie8Q&AV_EU2Za<&gDeD@snft{-I$8r+fdqp0?Y=%vcq z2|>K<=Sp&BVsWzhb6AFL-R_~D>Yy_961+a*-2d`vr2Cb@<oQY6Qae{drdt!<r%2a7 zxWV@C@!Jq1YgytP9dw!;nv^NyPLunSHcu^)Vkc%2Z@#3vhe*ddX3Mw1Z<)Dl6xfug zfS=49)Be9^u6sbNO^X=abxNAK#3`L+2TzgJA1BJRUy_May7OwXe2TsE3>Xht4RU(N z%Gt=*q*J+A<@euQ+V$U+_omqS$QKaqyc%{BfoTao+Lt{2%rIG}@5tt7%5X*2_t|c7 z&+cNwZu}p^{;6{IvwQfse<I{a??sNLVXMx~S<xv2Z=-zCa*{cHoeUN^JH0!vAP=Y8 z;udYks>Xdt4&E%DMmCbw+z=|!WK2Q-o{5a*ZWi&5Iyx4#*yzve8IF?5>|)m!9Z5bb z7}2rMPUzi~kMy{QAy`0<^OG?FDWq^lJh9A(i|d4i=xt(C`adPIdB5Dn89Gprj@0%C z*t~S8hQ%)9fCe|NCW9OqeSY1~!Vpe2t$bPa9$(a=H0e;@avo==NQuJ^?XhH&BRTZU z7yfnNASWCvVh+NsN@G23eRKmTe_*hEP6}q$Mqj9UTy+SPziJX0>@4YSMj0+qnnY7T z`QLlcXpp!Fo&sM+@bQ{c$sbNn{QFoe`Sd0HvfH<&n*~8^h1~E(H<4A(WkgS9)lzv3 z%Z~25cha=aNZoU5`1dQHos}r#U#KkpM<U-Y73Rh3Hud9M`9iXDj$L^A5d7L1<+#X< z)Bn>z0Kj`2cdYbgAD4g~=vfH)m0v)57fzM6`jnIxZjhxdA$^|j!)wUI=hZS{2_erf zW2ocYetni<Jt9SxJ7ef_koot6Q{Y=NS34)~z&*JQiRi9<u)r||s2k+vxdn*&zs~KL z6jTg1PA%h>+PHGy<tUa%;l~D;R!q9g>jA-G^Sa@;a9*cwjbN7z<$eY_3#bJg0~`kI z0qg*5D<-?=4T)lsULR{$4u}GzEhTMT)52FSV-*iBp*PbqQsjz@DMuwrtfG7q=wpCe zOUW)*vTWrt@{KEvp+8+hHztAJZ9qc+cEDJ`6o3Oz2=FW;Biuux{GDt7V;f)(;KVZW zkvk{-&pj=1Z39Ro8WY*=FSY~?ho$C=PQ$d}zCcFjlC=4Ii)4o(H2oIt+w@ou@CWK% z>@eMAYrg{1&)xz=ERRI0;x$XBNC8zjz6&gm{)+=w!;W2coFr3N#a7mc?*c0;q_U}{ z+=v5i&Q{yw^41}sN~(ffk&cUG$_ueL^DTVggC2#ix4cAPui5_VuJ_>X$6hm$3|TOk zpFzqOWVXp@;ntCJ3zqY5lW_|(!=L%2#SFopkj)DZ@ZXbhFB*`Ot$lGB-;H#9X}C!N zOThp0Xs5R%kH1T!KzOs7^2?^zPo6X$2Ose(Dqqv0k@Q+a&UO+)-g#-C?58(L-lG2E z0$2d;<j|kjJ%0RF%j1{J{|}EBklICAMenV}x|}bk8zrouZ%8<jz9!))`m%&$=@N#u z^aZJyLgz?0mClfG8l5cR44NllfsT~0nhut5BDG3bPjwPD(Z0Z*V>VyWd>c)bcmrvo zgon^52@j_V3EOFqgvZck!c-kg?@D+Ay(Qr(v|hr~=oJY&=vfKRqO}q(q{k%eqK75C zfbNm-BDw>(h$*qJ7*0etGWI^Uf5EbhT*1f{61kF*izTw0k#i-IWhr!qM6P4x1c}_h z$WaoxiIIaOavLL!68Qlm1&KUd#OQ7kUB$?Fi9E*02#Gww$RLTVW#pgm1WD&QR%$@W zx9<QWZ%E`tMxK+%D~voPkvACmnMBqza*sq(kVW)EiN3|y8zu5ABZ)*dGIE7P-eu%s ziELuzT#0OE<P3>qXI(l$BH4Etbd*HO894}~q_cvR^il;|&9wKLBBp;7qq|7#SVne~ z$V5geBoa3S9OWf4m67*Xv&YgH*&vY_jJzR{0wd2!q?(bZBvQ}F&m_{s$UPEi<0pgu zP@)Gi_C|>u!bk#AQhGQml}HtKM!qPKV;DI{BF8dvnnX@uWWGdBVdMznEbcNi9gKYM zpde$g3Ht>*62$pAa$~S9zzOkl1Y@uxz`2J-o9~^kHfm-`9K4(-=Zd=(9g+BhB8|aA z0+QSPoG4@P@BnAMpA%~gwg)(`_&JHj;4uM?+s{cc29FJJp7nE5jlmNFoKb#`-WXgM z;8@4|Stet!E5PdQ=h%$F3j&-3KWCsZcu{~8>gNnG1{Vi74^~OT8*bDrmpHlf$5rI) zQb(IZA3Tn*NGg`4;{Y;m*^hkZOJ%q(TmK$XkCF|4V;{cnPN%P;7|5S{zeg4>|2JPj zvP(Mhb!2);H~!B`Z^=8s{H)4!Z($IhNH(mR)?@Wz774MvEnRElmaMUH6#xan76+IH z_y}-iE$LA{KFNhZ>`!dHHQoyi!=wbhc@#cHl#Vr<%47HfvcEhdwdoHV901auFMXvG zuar)_fkcO$P!nkz#!ca9BMDiZ9QDI`f9V9Uzh(Ly$y}YQD0ss72idSXna7!K^_UKS zB9@ox>2;vMFqe_8Yo3b!Cm5{N;wKed*AdT}OdM>Eu1Vqra&=8IpF#d!a~lcluOtmO z?=fq~@@FgOtewrHaP{>|_}%};H>iH)O#$?QH*@hDv2IYpW~{S-ax6BK3Jq4@-hn^D z`a4PCx^9Z8Qhg#@`7_Dpbt!puE@o3_q$BnY?7(6~%Ne0J3QW=V(lgS<11=p{-2AUI zLhFDSTg-v3Ut2Am5lriQDpo(?t|!m0kC(MxL(0}q?-A6(XLq0_RX8pNOUK3b^d{Ub z(9p`txVN_O%%K0YW6urW@K4!@*T_d3@KI}J$G0!=DFfzVXsuRCj`c9$Sa-cG7Jrkp zea977fkeV{BZ3`>ynBy9?76DYh<(e2I`YqVy2=vvS603AGIO=T@4ACz4`-00A5QBr z=h=X-?cQbMcJHup^#JQm<THRb0apQMcafnVjZah~4e*APCbq`M7!5IWKyi!yPZg7@ zkDOM&fJf09N27*ldIW(pFu;F-M5cbihwUC$!R+IpfcwSL4^hYL{~DROJt4XzP;x$b zIJ}6g-`=gYp3Ph-Ikr7r7Pp-IzC8w~d-)Ds412M|zCap=C#uEE-o<if7`;rM-qBqh zc*zv&7|WimUV@IY>!p+S6=xf|a~W1Q&ZKg7$ut$A5WMkt8@6L`r}iFHiI)eIPHgrp zk#p_@$EFC}c@AQi1)E5RovA6T*ZYI;Rx1H#EY^?bxUIqCU3}5&zJdNPk-VLI!%DD{ ztI=x0QWCT4i!OQ?aO(nG7T}-=S1e{qO<C09iXG_R<gZ;x?XJDlVz;Fr8@?sYljBJL zkGt_E;`q2zo8@5Q)80~iYemXG?!?!VT_1NAZ(@1k_OsEOg&VgeZE*P?OV3F>yyuC> zkpS<H@P!a=J#goNL>H$KBCGgN_S+P)v%=D&&5JGGv}+;k1d|kF3FP3e3q~+rb4_=e zbMQv6{U=j-4{?1`gv;2wpNx=Y&n5$R_e`4x3&4~Sp6t6mx@ZAws}~;6qsqxp%iwNu zwPj!5b|tUxem^wk1)q<V=8>$*Wd18Mz0z4U3uiC9DGuCVgtIRO&P`;KP=%|S1_7az z9|Yz9BR`+Q=AFI4dHh|GEthV=6k|$Voji|j%>Oc%u3=^R<$R`(0{Zx`3g{s)o_r<c zL@6d8FSov@TmGwJ^n5aHPd9v6^6H-ISQol0&1%LVJJPq^j~k?G$n?F>#TKGcynORz zbU}Ng(xVS{x1o`4LibKoe1h0h!-ccDgO#K9$@%cNv77_J(l)QMVE-sS-a7a3j`G9v zQ0{mG1{=#V=&9uN0j1(Dn1PR$Zj!bKliQpG!`X#>Qa79E4~}3l^5DOC+{YbC<jaWj zP*(UU2lNQslOC-+d?<|XnVyTL*|8mW#K|xwUUTe20PtVUu=lNs9QH@2k=vgx4axY0 z9d1&|3x_AS`k>B6K9}i=@OGB#4;TY@2CxY5DqsVk0&pDgJ>U@_>ORNy0t^C702Bg> z0p)-#fZc#ofGYsULyj8@7zPjk+6SNk;{XMKxq#&lxFYTi5IX=z0cQZe0TjsgQUH2@ z9k2kf0<ada18^8{0&pHs5BLqx`cICF0b~FM0_=cE04Kl&C<d$nYys>7Q~|#J6RGeG z5Pt!paH{JCumQ#aoPc7$TYxQq3cz7NE#Mm9H$WIhmIQba!2a4o`6i4$9{7L%n<v<~ zzXxLTx8ofAsg3)3wha#iqzX^kxKyQWb?3O~$N$ShhN^79klTRD4?i=87L|XCGdEaz zV79N_xMbtPfY$*>0^5MwG5$px*Pii#J1{<QEO6Zg8y5#$3>*&}37i1D>Aa0&d#fEd ziSdDxfrp&4aVfwz&f2)nunyOF#>OoGqZW7}a0T#-z-xh7TD}t42Ry5??Bw`%olM*; zZYt;I{=q%VIk@TETy8QqjhhK<;U-l6_-(qZ)27>a!;U9L=M2m_ZRtNYxTYbyV%BO$ z+{mSbC%vx@2-~E`WM#sOmaOg^L&~ql%7!G6kFF-Q>XT$6-m9Hj9q8`Id@nx^N%!Ly zGbAiK+?zPB#kWrGizVHcym~FEl}ziexnm-yP&3ku1G<^DDKl9~*!84{KWsMc5n>t? z<M@6y5^+5)Vo(1kysG{-vg&G_Y+4p;Af(M;*ln=C&3A{BL)Vj9y<qnjALg(gWns@q zXlDVdj4(KCtYzsh*ny_5#mVf5N)X)RW)d>bVhL4jl=LlrPim~M>7Y$|=CNug0xCW( z^Kh%jf969{yWO`Ows9BlJmzxTFK|z;j^k3XpdbJBdO%+LF8=@bSTOegRV=8y|NSYx zua~n=bvZ^)pFL;(^g%OcP0uNu{%jtX#pk%@;@{9bj?0uJ2F#iL%*=mGuRK|QDQNdE zX~8|BJh)~1zXQVg<jP&QPX+bcaoxs!0N4iD1lRyr2UrVO30MwT1SkYd155ze0mA`9 z00YT~cYEZ;f>Hp20L|BIoC)|Y@GU?+;3A+7a13w&um?~9*aj#EEC=ArHI5H>*&>u( z*KDNkz35IR&>4V4fE;l5D%u3p0Zx#q_xcW)0%|zG22cY&0HgsD0SZ79J}#huvw$kV z0YC*{1Hc8Cas}@H+eD(jU)WLj4)s6UIQ`FvUU;>8qfI|f{R7+oJ}1QwY;9S+>IXzF z{1u0R-Je7@l?P8l`aP!d-KNPoMQjkkLK~sIkSuf;(gmAf7oHWi2ww>2gujIlWwP=o z<sem_>O<8ps=rhrqEa-A*`h~WCvFpuh^NFG;@@JFI$b?NouhWEUsb=aKBoS+`aAXC z>eiY>O;=5CjX^U+ldYMonXQ?tIj*^+k!$B_7i%|a4{NV$^+mcty4kvoy7zUvbk(|R zx<7P1_1XG~`Yrl%`UFECLqEf4!#u;Ah9icjGG}JKlKEz4l<~N+)_Bf%#rViL&-9Y% zHPd?2MpK39km)niWz&zQho;u%6mz;+G!HNjHIFkB^V{ZX^C@$^xxpM`Ibx}{oU@Fy z=2>T1U$Am1Mc9Hdi|d4b%0lIQWr^}#l};P1>!f>KA8Po}ur4#!m||2K&BmvV9~*Pc zUz_ikWtK2YM@v^rUyH^v*fP>G&a&C^o~6RVTjyC9T8peDR<Cunb)EGc>wDIn);-ow ztzTGAS<hI%wf<ziZM|##%Zd&Kj$?N@K}ABi5G$k#Jq3ec5e5pwgek%d;U9uacuja) z_(<3%92dS4t_W1PC&Ve!l?LSq<yhqs<tNH2<w@nQ%0yMVii+`SgL<s`Q}s19r)i_< zrx~SrNwZS3Ub9J4q4`2nqv@#asSVY2(52`m>89)E>K5qs>Q3wK>O%BI{q&RcyY(v# zYYjULHHM_jtjtlF6O5(Ca^qXZ9mZ<oY2yXsUq;>(Z|Y-Go6IJ=$z|GOlAF`aD)UV9 z9J9;3%>0UZrFpe^v$@WE-h9p6&C<&<%`($cXmMK>S(aK>SsE>WT6~sQtZ!J~weGea zuwJlUv;Jajv^HCjC2=;6V>e4OIAJ>>9!{7csD(^nkT6`xhbzt$<_WLE5_LkPGDc}r z4pI(RKBIh5xk(wXGOGrwid3(t>Qv`dw^YBY{#5x?twn{{L5veSi)mtSQ6pN!Vd6+} zj5to5Bu*1&igQF4+;_eBj`+T~LoC`O9uz+pkBeW4SH$ncpT$NoQXQl2qaFo=PEpsX z&%vm_sv|Ygns|*;Q=)lQvmHh~r@5^8PP0N=rhP|yM0;9$K|548LH7@s^();4IPqUP zPTyAFUq4bmP5(OV`$(T*c-k=4@V;TEVZUK`W@+Z8%t%vb)A%Bj!!*}aY$`PoQ;q42 z>5A!|$!BVB)|zLVx0v_BnEfqY%U_l_>uc7x;7*^xjlPHRI6IP1_z@3_X$7m0C5#Yq zglB|OVYg5%ToAe{=PC~=+p12(A}@%m#Vz<6@;gnK_B}19578et+%`PSY;XL)Saiua z$sA=#vYfGeXBlgqZsn#R*FnRXaK?I}i*k%|k+M!@5GRY%#Z}^2G}9>R)cw@6)XUXd z)W_BL)pCtilaKYiNwZV)mF8znoVK6#Io)fz^}55lHu||(xDzvLGeeEB#_`4iW1(@W zaV0vgH?}v8H02kWo;ST^sx{p;ePcdn?qfAt2U?k<d~LnRmPsMUv76pFtdKq!<uD;k zm7waX>Z$6ZQmZzp-cx;nu%s3Hi@D-L@nvz1_!dUGOS~Xn7k?4M)E(5x>Rk0?^>gZN z>YeI?P}Zkzqe<0dX+~(=nq`_-G)1p#rs}@a4cAZBFVUClx9a!m@9CorzZey!EYmF0 zO4A#rotTz;rY+_~_;Q&g*qUrjvyQWVWW9p5{E#VGjFcUyl^_=sLL?UQWMQ$eTv#Ej z6v~CQ!a8Atuu1quXcX=WO+vHqjB>Veg>sd0t#X}mLy_``@@Hk7Dn*s5N>gR11dL0s zGO1oxtyGn()~eR2<YHTK4@UI8NW~@u+qQ7eM0Iy{hFYsOsRyY?s>fl34)t91V)X&_ z8TBo7q4os?*v;Bsv>siK{wsZ>zSQ6{zieJ-{>XgT{Jr_ExwR!0i_~NpVJVtonQK{Y zdBgI71^-!~9EmYT^1HBBxm$TrX;O_*y{KBFDb>a!B$w(x(wAlaY5Lp5n}f}v=5TX6 zbF_Jtc>~<^-{uI5&T`7q(^_l2W#u;Epn`U;2*Jup%39TX;(Y82N7Rqhoiv9uw=_}O zHQM{yIGv~)Q>2@(`%EXt5;>xuYWT+RigB*#Rnsohek)hucf^N+R<%&|uBt{=p-#{x z>$>WC=+bq4@vu(!6yn(&-CEr@y6>^%;`Ot!*mmhp=^yA*4Sfw04bK@|hCPPk23cm0 zOm*gj%-1sC$UK{Q!uS<}8jt0dXDXU%`rQ<39&VmsPO_w01dG8k91HGEMD*R3iHP1O ztxar>tE4!^brKqc-?1;M5rnh|KL=G+sswR}_=Y%HJzu+4`;+#Lw!N;GUa6mAC_wkm z8<wGaJYVOxtV1~q`;1GuK)D<~P_LxC^38wb@M#5WjT?**yMYr5kjZL;|D)qZoLo?j z74*VbVF6rek8nV^2sesS#wzvL38pEFl`EAWD32*mD5+8o9~z=^s8+ypDi9SLRgt0~ T+QmX~5jJd&+uxMKH;4Q$2FFL% delta 15811 zcmd^mdt8)N_wVxzAOj3H1sU$3peU$wpJDDKcuQ;)8zk>}8%c|F#<V2R5tDbM&~0UA z<t58ZOGVARMM9xwMWtrtg^X$&lw_u)yw3UVXGT2l?>(Q-`RDw1=EIt`_S)-Nd+)W^ zUVH8R43!6iDh~$LE}I$`IZM50y5=6drTa&tLV0l8DNT%ky&AoMKhq2aHbz|6$bi+0 zrfKF1{4f3Tcl@|Zz~ko5a4?-~(xn4!1spe;58@6#cIFYz|7qbwd>9|fafiTo-N#HA zO#1MHMYldA<M@93JhFuE8zc$gxLUHAPwn0O2gh0cPYbA-<HqxA(i2DrFBW}&kle;Y zIV4^56#snPE24RPV$N??&Ko<T*~)o3Haj!(xT<<reAP{-#-Sr=foc3`k{38i^lUz< z2~6dAvMVr2)HjNJ7C6LG?3jsXYYMuQUWX$wkF!;mcU6~nPzQRWJZWf(T=%hK9wf$6 zx!3D06%~73U39sW;~4hD5q(hNgd#h*os3cBt2i9Ub6im=?{$gk{1_%H_H<As6u2AH z3{B2tPe*A`Nlp2gzqWbG#o0{(1)Z;WI;tOTzq$<LJ;rfSfp)qpf@}&(ll>egv}I@7 zZqPtM+YM+tb=T|lbSUc$w<2GxcdSAet6l9j`UvR=l2!=H=!an(2mS8GoNQNXwyP!k zuO%Yo%~H<Jy85(FFM3%_U+o1!rt8zh`djB6JE6=*|NU>)-He)ivv<nGwusrDs1Hw| zVzwdL89c(!wD6C%fa-z{rs`kuc)XyaWa`O=hdu4?c1gIsbTG!Oyjkt)q<zTc9$DF^ z@n}uK4I3<Nqkjo)-|z@Fdpe5a+>J4YyRMNn1)a8!E)YfVz}`x9$=xV1eCj&W7NER2 zjJ2~Rlq?AT(9$Y&Z$_8byKHsFD&`<Ejw>lCEvw1TaN0^kioCHBc!DFajSa&9xzhY8 zc2ug0ZIl#}QgK>FhENxXIz0Hd_nZyY4*D%@+7y^!6)Jxv?}>XyO%mu+cLYJ+;Us6o zir~cpa}@~>vG&^}(0PJ@*O?LJjpfkzX&7QG{K8`z{gstF+(;IN^b>84BCm#ICp6=U zBASC0p*&et4+kr9bbvLM?k6oFu?YpRM@dOly)#HzZ*3DP>x&$!b~cigBr-HMvA4h4 z_3Lc6h4b8U#z%h8qg0IfiX@iMxQf<D|Knn1{p{)<?oUO<bPC2<go&L8J&E2@Zo;x( z%aZdrIF5}*+-B1pHQR0I<LZY;+~z(-k?ZOmn}jwy{_%Q!dKx8A<!%tUo6sen6#R|~ zQl}t=USPgvr=a8gSa&Ah>xWc$zAb_=N05lHOi@MvQHNz&j|KR8>XI~7wV;PBnAf(9 znle#aG?V=<!atBOGz{&tv8I2m-p?d`>x;Y=mSCqSwuLuUF<grE>!u>li>#nLIlEf& zKuuK#Xxpbj^$}RxbQU`5=~UKNc`<g(wC;Ci*nORt-J0jvqE+53@qB6MB%zX2qfMwR zXO$&IHB~pT9&DfX;IePoi%Z3htN+Hjj8zFCM_I4@!SEMbhf9TLMs|`}k__d^@Ox8% z5zNj_as4V-uCHS|q+nwk<pt%XqNzUftPdwgB=HsJ?^S<si?!{mbKFxF8gPqQ!_eTO zHC63wboT1k7^aWmtGcQc-^%*j*BEw4{y<A?n%z*vxJ=NYyT_dmu19#n-6*XYA;KaL zXsVcP_{McJ`{S=VJslpQg?lyK71dQ2pvNOv{l6+bCp^M@*4Us=8qhSm>IbMh{#9qd ziGUjRTyc$4F1FKm0ueUuO@_mXKH2ESn_h3BSD0k)em0HX{jA5{{laLrL&k0|rsqOf zaF7+#-!Px2X$EHIv>`l@%i~<4zZ$O2M35|%IBKt1IhsLUi-?mnByn65*BhaN&L;H{ zeJWNWWK|c44Nb0aS}jx}a&v7b5&dW98eD1A+5JT6-I{_9n}cbmFQ*7msco~(>x!}$ z(Unk%#Zz6-+Embm;Nhj{8De28J&xxaSp4sdg%dP*ya)((k4s#2)76W{g`u{z_mpZG zJ)B79MP`V8O(bh0RZ&L}EWEMP;8a*fv5uYOT%^pfD2-_-71MR8K2>(A@5=(Vo!;rg zadzs&VC=L-pg$A7>+uK-oJR7ZEPN$d7S$`~!z_+-IHBJ$314(GI?JMrYrMVKQJfEs z8UhE$qwd%QQXe&t?@ijG62o^hBkoT?cg3`n^orK-Gs&pvoY;%$9G4sH{M~S8f!V9| zp7Wfir_zZhS|?r%70b$7SCG%5<Hf6lLd$B>9<5GYo{au8n;d0ZOL-LM6hrGg#%-WY z@x<D*DCF({VY{Tm$;O`j;~jz}Tv9vTf%)-tcw?>LuwCv#&(!4KA?xWt)PYf@xdmO$ zUUV+RV7gE`jYxZ?i_j%QuYT#fl$h-vo?(H8pv571oU^xQnAj6u7K%J!L>c-cA<KIW zm%N8v$?qg%$=O~Dqb+zW@`DaG{gEF8+vpHd67z_p5p(Vrn?pW|`B%t@{;*C8U7Jb9 z#by-FV-caV%IoS^Dw&cSl#zh;!i(tZY?LtfJ8i-A5?lgP|E+*LO;e||#T3zUCO{j& zYvLquB4z!=$qnSo*d{)iyxhBQ{7L52g3GXs;%W|p6-;TTC&@Rxi<HCARnG}5;_AXq z!)LBIIvPSv+(0Ozc_2O9)%QT>7K#@}3-Op{lajc83KM&-(a_*bYT{0x#tMkG(Kj(W zC18a#aThPbzw5=G!cNb5tm1HbrWe^07oW04!TeqdOGg(8y%;PoB%Y2o3EfC&Tx#!? zSji`Se(!FN!g8r8?4Z#kSvrpYhLlO=k}EyC?N~*2N&E9B$XRI_e}xQ-pD3PYgf(n* z26-nwk$;?g5^oK887#~-T|^=h1_U=VzpRZW=7fw4y}u?tx?AHB?2^iQ`XV@3g;F|! ztW41G>&VfBX?z(;Nz4e(gM@z?TO-LsiAmwo`<%m(WO3qqiEIz^%WxpwC%ji@!S0Aa zACtVK-ux0WA!(6le*|ex8qe<}naL{tDe_42Lqj@P?^3XVA><Z_5r8rCv9vL~TiqOx zP}i2lmO~G6J~<;j7UDw0crchPi0+8t87A58c16)SBspauKbee3k&9Pp1@qX*vXr<! zQ5|S(2o(NDV`t$+{=P3Jhf<Pxw30F~G!k+`D+NT7niErO<+!cVNQAa0Eo|j1386od z(W!|UJIrXo)7h3~qi-<M(6lJQ(_^;VxY*>1M!Jg)`g7t*&4`e()}?5yG?;!KMUJM% zg}slw)90#1<Z`M^D}sWWGQLz~r{7#>*@mP#Sm{OVjiV(H_4zv;!?INb<<_>uBD;-# zGJp(A%apzaZbLEq(7DK%(1-#vQPMzuNgFiyMlL49(@9^zDC~5onnP)FV#!!OX zUwYemdWI#zO~Y*T9LY<M?O|ixJwqm?>-ZM(VtU>K0~37#XCZ>9!^_so<A1QwIJu#8 zf}@N{Yte2^!FroFc4@N}8A7i5ga$iJm>oH}-5a}@Nc$94oI!}hB#TRvXb}sov;eD4 zU_{Xe1jbdwjcR1Ezo`U6i%S#flnzLGW1UdDwGle$Bl(z)7&aX*4)sfksg6le<S%S~ z#HAs$KVy{g^zQG#hEJUcO~EmOkcYJON$lU+hCHC4CD+yJ>S3cBu3=mH7+-CJJq7I^ zu_vUu@PchD^^!q-<B}>_?IwKrH!x2@3r)f%)<DhqWLn?G{MD>;W#|$b3`X}#O0h$& z0@HEkE4Ep!0O>X{0S=?*=}+i*i6bPCtM|rAQ3tbC5=X}1gg;nC$<{af38%A^(m;E) zx}FXCEZLpmN|*<it~8069%m3Wp(U@H$<p)0l9?U541Ta|jD)L%lD1Iyr)jW!Evd+q zCXQ!*Qy^J3!dE%D0R?w`8rrQTdou?|C@_7PO&@C=`6DwuBH%w*b;<oc<AX0?G*mZ= zT<f15+zQ%GFOc*sX>f-?A0<}MfnPKJ>tt%y2!0CLo~4Pc?Z8u`1F-gfP<pG&TVa6P z!Q`(jtLSDAq8?!3KO?RI83WhD5LNY?*?3&NysF)F-c?MUP_9ne-InMH+<mXh4FmL& zr$m?lkDx7-gbY;k{}9W-IQ|_{JaDg$O-L!W7~d?^RGmkDgu$|TfWJjy=aIPYc%8vT zG|+^RdkVXzDnFk{cayyAjF?JDP4UGRuX;q!JWSwBvLt&!Yy{>UkM}s(8IZ@pSmFkH zN4z#i8Q`#K$<Um&J+rPL;Tt2t&KdBfQlg{ME9BRlC!@Fh=wsUjww}I7<_@Zgj{F~7 zFYyi<CMwG%!v{}@xPrO;5Mh+_3>DKGWY^#!qVyJWY4FT+3u;&{tvspx8mHx7kTc;7 zRmzQX^wzTj(lIh)$Vh%Y*)=31<xPylevAvu<BC1zDzCGb``JL=8D8xDrkHAeBG-l# zgsy`Kma)XKo0w$-v+i7m%}bDj)u{ev&*i2#13gV`^Gl`lB#Wn>PQx8nB%MiWWf}c_ zGv<mbrn4~c5;_D2JYS%yp4{3NR&8zzE~cZdkRN3EvA<xnC0-32j#(EiiXF`zL?s{C zv)}>5lN3DK;J9pHt7Ri8lgs%=vPG^6oeAlN(lkd$2WgQnkZeXS>kruZWQszLM5|Vj zAnM;jb}I6t4pJ+Jb?XT~0wN&%;9BKD2z;nav2SE*ys<lhjt6ocuidN82x1ev8yrsu za-&4A`a~~bss&EBz&=*|bV|eKG|0O<z0Te?ue#YMNd~Fn2OorhJ6zCqbrD8^fkpiZ zhrfhlgChou!I!JTq)o#nWf7-J!yh5<s#cDgro`-aAiuon)T243->W{|-3M<oLb4PW z5wsIYa|uFVBWr0)ptrh5TZo<3T?AKov)CJ3Po7c7hd;^6tf%OJj%-#ZRm>JR!*ZYp z(QksUr1F(U`ASA#$>=LteWk&^QogU0<|~cxm3sP0lT2AKF;mMkMrk<x0!QQ$_pC0? z^Q;4&mQd`Ci9LZ1W4%N!^V%>tC!FC7v1y9=rDVG%tK@&P4NC<>sOsPB_%*e`?osFC zNrw%_EB2mq<TJdrky+-PLnjo3sXFJ72?fDs=iDYH!FkmC$N+76KQ^&jnb>AFK}|ud za<v)$A((ipZw7agGHu`R+vl<OsF{FAy^(Cv8v8M?a}-)JcLLk{t+#pWz4^~~#31q@ zhSd_0E(dNpMArviHclt!50MqRzB-Kz>x8ATy=#U>85$OtaSrODX@0JA$j4rhtwwba ze$Kjh2GJdvg2%LxT-NoItRBK1#<7JhgXr8e$u#CmJ^sQ5Wc`+ya<fFK-;z1GDLpE$ z;Sf?u_T;7pA6&)gRiFp*8_9*-oxzoOh{dEA^oL?vzQ;s`THT$!7yO~DN8}Iuq?JrI z7<(ll2`F)Q@{uco5ok&SXqbiUG^EEQUBJUJ2-hE-he?X*M&w-NN5i0s_8)L3W2ZAw zX6K1|r$kwAV;|Zdd!wBe^}&@SG#VETw9{cI)C3l+V^%$p{Y7cOSN}L#p&<LaA{(tm z(8YBYY;@Fd0nB5pIb@6h0w1bf+r0Y&;r8HN^Chmh1ahr$M5XbZqZUsT(|Kg0ah&RT zJqBT`sS-oj2BV#ZO<6Rb!ovCWuULAGk!@x{9z%7;qOgwiF)8_GVmD>;3&?VO&m-@d zo<*uN$h?q0LpGboh?1q`PjfckPLeID{2Rn%$>5(Rk6N~c-uyyPDRFEf(bfUjxeV4p zIo*DK*XQRrRep`tu$i5ss)su~YJqUIqJKxoI_sdM3#hJ64#cs?efO>_r74J!EZTcx zFOZAYKK%K$d0TQ?Fcu7Zg&j3D1=NP6e#Xzzg5}^De`pf&Kx-`dBCh~DV9Wzup}&77 zbTP(JPZEae_}ipts3FbY#c0nkhsoDT8(sD#I$1r%QRFclA@2=+InFQCOXzsF*a<Qz zUmd&_123VI$ohN>;@A274S`IWT+UxYt`!U|`13T@(19Z?HEc5oF1&Id3OFIf<w{+> zY_$2lz;r>tdtf({K6szMi##<ft#~yUo|QOeaUWIUE4#@?mk6w8f9WydiF%aiG?2(+ zJuA<kWLk`~sxvWnJ!_aXIR_h-_W-M++SBBZVSW4egnUWv)GlXF&+tI>Eew4_47&r1 zmAIRtyoF*{lbyB`-S84o%b$b{A0#UHj2s_6OSI@SGGK&?|AR~!(N8o%L{^TlDhK`4 zy%{xQ|6?bgS~TV<kHRH}#Bj$MZ}<x*njZJX-XZ^t7%vJFk%vYO4qt?4{Cipa74k%3 z8rd;2s%LID_v&-3=L4>gJ%zE^e<OXTN8#i+QMx<tx`tDZHL2_4Dxo*AwN~reY#wQ` z-SPJeL^3Kd`!sm0Hj^G_wbC{u_)8LTGshl|qPxIs;;=+HY9r%D$x_+1OEGq$U{ARO z!8n#qgM{yV5(tXCou&v+W2`<U)nd<|ZAVD?r~!N{d3#hp(mZNJ(laegA@U>aP?9ue ziXrgM?N(<vZW&Y$J`lbMK^Ezop~ES`3Uoi%6&{0B!$1BHE|Jxv6C}T1>gFIV9lbkT zexGsXB1|Ofc>~*x@1d(6*8A3rf_XeUV4gMi%`=i7h9EP*WV#0w%rjmvk7oz@voLv3 zmA|$>eTLOa+i)ejB$-`@M)(Xg6KubMf{9^Fe7Hqm2)AQbF4RpQlfj3iUqBOSh|c8s zhthl9`hh8;7lC|+pq1qKLj%Q!eEd!1#zT+B*n9<zAS5XJ?t4;d%ajBP9Gnn2T(rDu zYv3uFI(9$4W5=CEPTMxFGG+`cSmG{h_u@5zp+Lem-vUxmbQnU0@%trP&viHaF6kQI zFOj<ro9qYIGg<7ubJvwf7ofm8Tue+4r^G(!FUjsd_{~{T_OK!}&|h=)EZO<6GJN0J zZp}x|k{2F{ACLrr2B<Bf+F~qrU#{#a5F>EM(kBIRVQ?{Iusu^^1;gh`t!KzoyIFMe z40*%8fnQpuFIMn;2bn)Xjf8dUgjt|NN(O<}mSiFLPAkbwia@CJI~J}23UR8A!r{s# zK{kJbY%WO<-TjIjEg3i_jg7Vg0l#~+yHH@GT?^c;;p#e=xax<+VncFeW*)a7fv)t6 zJ=QHoBff$eGDyiI10>P+x$)%rM-p(I@y;X1W38;UdCkaI*ny_H`~vcUcv3lW2wy>V zPn^lGBC(TFLw-kqZ9t8im?q^sIuIqlzkiNkdJqA#;_5Hx4vQAZKoh9=zQnuT5)F_j z{)h=1E-#9ut*G#8c@E^&7f|hW#juN`h2*nIW&8wUoIFw+abIlD=l2?00a;I>r0S&e z@-e0`*A=xWhW><lzs8kyM<xgIqJVwm_>{`jZ-0XU*+%ag>23(DQ9X+{8l3ABuUfbZ znL(yc&CD>>K_;`49lY69P3ygkU@SuFX_jZlb~Te7Q}v?GX7bC_&C?Y^sD&h6kW6LA zfGCH9+0q1l_srcs<(s+7!1rg4_5MF*?i;_@!EP}+@su!glIiK9*{8^|-ASTzr%2)S ze!Pu1r`ytlVPNDyI085}&O>%5oY9r_{(B_a@O9nK(`|fI90Hw3%`O`-EkVapNx_U! zqN6Qj&5Rm;8%dp+8DH@p8+ONk4V%<Wp1F^YHy~~~*sGFbS=iX^MXQo?@Y)4$y~x1X z>qW3g<LrKXAZeRzjbHyRRyD3X@ItPW<t_zXZS3V7%#}aqfdMgJf#vE_;>~Gn6li6K z?-;<aL@2Y1S8uGCygz45+^AjXcV`LWcmLcV8CqPFiV2XCX^sRk*byIp^bD(KQ~KW$ zg}hJhghR_q*Uru8xr$9o-EkP~2b|{M#WRRzZqDEhAFwEdb5MY{pw;b-S(+uB(7O-k zZ(<Mvhn?V6q;_s<$O0%~%Ro#H&Rx}e_Qxzvx+A=FBq(28_`ROkOHaqHsJ~Y=2b8ZW zjA$RL>h~KK0M>GK3jG{Z6NejYjvfbzJKJe+WyBtjDT!Qp%$+bE+!C54)KwRBUY#At z7D$UXwu4m6%ZZI()k1kZ%cX9*cG0uPNd3IE{P?<}`AH%^qps|UBtAL`*2U{1)xF#F zWb#^>O(r@7w=RuwY-h%~f`YLAmGvH%RCvdX`?_9q8Th`=XOiS6r;D~8B9%{W6zR%I z%7VfCSW>t^C9;+i_k!gN1ux7S+y~1M`LwvSH_ZT9ax*9mt|f9tm+}r=c3Y9PPCNh$ zG)h2qkwpt<^BlRjFfJvg0$!X^6IE&DYJu0GSet<_8^BpXdOG_<Q18sd_cUj6CI`8i zk=!lRwgN5y&HzpW8UaTD`zpv5=ZF|4>G3kXQa}bkS4r+VXNGTF#wu=oht@_fBTu;E zdv8J|O01%GH|R5fu1fN{D^;{{8TrVS#n4rkER#u~bsNwGz%+mZPzG2Gs05H@#Jp%k zjIWWMVC(}l0?scZ8x|F1zqPMBR*eLSkTaQG)M7_ab69EK*fdNVqKb$<Poz)nuh?=J zLbKX&`KH5iz{M$bl{<2-vZcSBtu|cPN3Mv%LiL!0BWD__ioD;ltot?#mTGoJv*Fko z#VWQkCEjmYxeLeNqOHd6a*I&5-Yah%0jjD#&>7`8L2QeqI3UhlyuJVG7rXEK|105| zz4xBigc9A7;d~6KT%r&AZqGe#Ir(JC3VswB^|U@b5?p`C-n*NudHNuKmyCKQ7x~zV zXO{DMBzWm)V<0R6|1YM4HV7VngJyv66m*u<%xaxFWg<>V%Fk)W+U_0+Zy>~MBYB^^ zu=IdPw4Mwve?VCV3qVg1y|w<{<L%v#->UsTJieS9DKDrvz7Fehk(eG5u!Qava1`Ak z;263^z*4$_VGSihF^zfzoI%|J&Z5r<IEOA2u#7$?U=^Jq;3PUxz&ctaU?Y7H*xhLL zRxGkotH8^rIsuQMG69dKSpv4vGy#vNQUOn(Q39Ss#R8s2IRVe4orDd=K|2IIk5U1b z(G~$aar?^Z7t>||FQq4dD_AEERI)gAh_Mf_?F*J=<SItKDUhof`HDc+GV*zWWJwCL z1}3|nk&6X#BO~Vv<R(TwDv)n7vRELuGjg;*9;sk-o<P?#QX`O!jLa6u6O2p~NOstw zu^@#$H?vZ(P;rovU2qZaf%A;~Qy?!e@)v<@VdM`2*~-XffutZS=qCc*&e(?pQaFjd zFOVIK`=&tNVB{+T*~!S~1+t5gZh>S6W4c%%*_KM@3#6ElkAf8XEMcYbLIqpRbmZC! z*6A2Vn+3L%kxGF~Vq}&;;yQt&$pV?d$ese3#mHcR%wc5L8m3If$Ug;A#mHX-Qpd<2 z1k%XJW`VTwQ$c?s(D{sgNFYZr@_mp(r$@8WW}(8y$d?3iJR>~<If0Rt0y&A1E`glJ z$WrpivOXh+gHa;9vNdAAU?&2#k0Uk&S^b>8K90-~<nVK%1<oRsK|N34*zua<lVzC| z-(fLBTa+PagkQ4B$B8injrMc)`#4fVkj>B8>f<CCg2wwfYkZtEL(l|2r^3g{Fa%BV zbLRUvIzv#IpEGfyk7YCjIsL4WK8_V{r2QPNkCSf*TI%Oy`ZyyDL6v?^FM+dYv_ZW> z;Mgf&OB$Cu!scwh7fX>;xU+eA-3<57eEQ!txLh0d7E+I@jsM0zeAAUpwJ7=n?DyNq z+^TQ*O~kY^j;|*Vugv5_>YiEoMi4)<E^&2PAg?2<YiIV~Rn8(IwzoCJ%00iv%GCps z)?$kTI01(M?S#awnV6D}K<vwFJx!i(+eQibd)HX}K2A8*)UN5x_abkv$;miz-OABG zNV`AtR!_cAJ-G#m4m+SG(Sxsmo5s=C$gMT0F}41Z3)rWbO3A>rc1f@Mj6||}Z7RQr z>|8s(*IS6?)jGNw$Un>>q!)QGb_5tq>!AA;%`cOMM30-Y_ect#MLs8~d<OZQTtohP zaa|UFn)G;a0)M)0>WlMv6h43HJAA+W@|UQ7YP}5f_Vsprd)E(39D^0*@0=ZEwXDtJ z-Jkz6tp5m^_DZHCLa0w-E5A2c^GaHAyOY@zM+D03z?NzE5#f9IFze_?!1s>`EN=eC z5uwg6w!T}88rBF$gq#fnB<B0vVPyJ-1kqb-Nco0Y{rA9wd`j7cX;nQ=i$TI^u_t}0 zyNc%4g}l0j=dY8U8*SKg|Gn{5(VZHy;k9|Zwl4VfZ~3&T3o*3a9>KBh_#JCAihhgd zle=#?<2NC(aJNLV^N?qMBg7u74~aamLRL>aoBE2f57h16w34}+_AOVSD9%CN+cC4h zXNKR`j_<<zjCZZv4ZuS?t=y}Cw*kKcT6dA$cPA$0APw*YS10wrZzbA#(+<Q%{~UDU zsQPZH#V6pF^uWoeEtZzv7gzuinfeJYwtL(gvtQ@<-A_vGsAKjwkdmE=u?l}F?*7Av zo+Z^gGkb{t!psdI`*voFK6H~GclO5NzH_IxH+u`jeggE{vp5?lf50y=OGD{1q+nM+ zmH&;D)RD!WodE%NVXKg6UnmWuh0777a3mG8JErLfh2Y&;PTtrxJb5pxQvNWsdUBWh zSuy8IbZo+F@S0)lu3$Y0crPQ3wR&?HUV$azh{f7*e`;-Wd&<vyTrFsS2^s$0{?JUU z<OXOxTuFMof4q+j2Hd(BcLg{n!WC_-Q{2*SSL{W1kl)@<iS|6xZP>ve+rB65)8dI@ zcP6hQ6Lu$u$-(p#wpNQN61&~W{0j2=?sR24mM6RJ_Y@>m)ju1CHw999Uf|&_E&)dZ zyluiSdvNK2D-R^P-dG*EzWW_f=ydYh9&`VLOS-*jB1mSEN-TjQTy?<+hD**_rKTdh z)fDZW&dbQmy%o6RZrwXZ^y^DRRW~5Zh@ruhk?z79UOHeg(=`x}7t_M2=vJ<)(;2~D zU06s}-P<7zi@aV|dV#3-rSezF!~04rR^jZ0*T((}jBs?2{yCXVLOpJ3+GGf&{4jL> zf6LFU%pKY5o_jz0vD)bam|{$+GueH&WzlDLs$gZh813Erl@GfqpmV^u|LdkWlzNxo zRoE5!(0^2TU1a?JOg@1;y}v;^4|7~y(1k(9(NV5@3NfOG4?HGypi=q6)s+}B{T`vl ztqXF6(W6ds{=j6(6fl~&a8@_FF7E>|A3h4p*}rsuUr=X1IF?UnfAZdrQVYuSX&VMB zWf^oi`S?S*WHXrl-xs|^?tYjWwus61VLxe0AX$gTuo!vh6CT%bhm&|cdE{_G_)`w_ z!+%ZsSl!OUq5Od1cBp2jcHAhZ!kBm`V#Ai@yP07xXC+1KgWnBZKC&!0={I(`Ng{KO zP7T=9Y_0oP)Ss{DLXs8wH^(Ib1_JT{;{Y=OiviCAb^tyFTmW1LMBL=K41f+W0x$tk z3V0f@7Vst@^bW_}#zE;k=ypICKzxhiVgUUBI>4A)Tm?4`gcDE;cn@$K@IBx<AOUIL zK!64?1~3Is3RnzS38)2Z2OI=^0r&-Q9l-s=aS}jpKp%h#kPol{9tD&EmIF5agIxFk zh|_>8fG&Upz3u}r0`dXl08;>u11bTvfSrJPz}J9wKoUmFKJ^&o|NO{?Sh-z8aj1O= z2jD|i?$SJKw@78q2`l#_1pepe-hTjyP*DhwUIWy9_>m!`qVjtyCk87EnBA&3e`n=F zfmZ=X0n31+8UMVM>&f`Qy%--@3S9rKm5T>115N<ufD?gg&mk)TwgRUxK5#0K?yQwd z18zQJ<<c=9T+7!um4k5vcnR<(;HQCC0JG$LDX<rKQr&AOCq~yvf5;LgmtDhacksp) z<rf(rpSCTpO*f`e+xOe{J=gp7Ad@c+4Yg^}rCc;$z9zjlslO-{ITOhD7gGWrNw$*1 z7n1|dXZmpZ03Yr@(1$l<3s|%yhitf%(Br}&E7viI9J-Ve@R7<_v(-RuqsD7MAB;?= zsNF>Lms29QSghP##4dD<+heg3%jNjUnR)klWqDTe(ZzUCOg>W(e7L~MeNo`k`S63} zx63I3{U7ob-yO$V6eUa-(9v0}GIHb`E7L4|V&_2BrFhY_d4fcvlSzokQUTS!CiJ&< zo1kpS=z~__nR{J3;qT(Ta`YizJrgSYP|(|N?_n#q{V(A$;cGAR#Ac4Wf%SdwbNVJZ z_G7~T?@QkH|9Z*S4gTpAA5y><ITzq>WL^KCzY8SIqA7cS%L?kBu^e|~|92oCi>B7S zckNW5eB)&+w;r$#PzzWMSOr)CSPF0g<^iSw#sfx^4{!9ZNCG7V!~miIVnEj=q<?^R zKn}{Sz!v~#04D(TfPH}NfHwh~0P6si04D%HLSqMP;WH2AnSe=v@qp2Qe1H*<bIID1 zO9CMQbY4V5fHP$7-$NdBfSLxd0gM0{AOo--AO%POUHDM}1)K*o0(Jwo12zFx17-qj z7p&xkKPD61&9b<(A5s6amCO1SVF>SaTf*|VjGwHq;K!u$mNkOa_xTAS2%pMOupb~% zowY&Zkxl2<ZR?y`RKb=+kSt8rQ<f_0C(D*uWj5Ii*%sMx*;!eatcN^BeqBCJ@tERW z#bLz>#dXD9MT9b6S)!b!d{*gEZdM*so>jIhBULdfjp{*_Lsg|(r`n=ArfO1MRJE%9 zP~BFAs3X*A>cMKI`Z@Im^+~l-W7bU7RB0}0+BLl@wE5aX?Rf26ZI$*_?bq6y+8(+G zb<gTf>MrXR<i40&m)nqQ)nC_l>Uo3MFwpRZ;cdeq!zYGQh6{#Y3~h#BW3;iqQDGcy zEH+LxK4EkjR~kPuo-|%F-ZVy;;!F>iu9~iyc=I!6w|TvJtGUs_;`eq8dcJHUhFP&k z_KCb$ovATt_G<^A*@L+{y;VO}KUwe6f2r?f^cZ(TVUzKUvBlVKylxCMNuYL$X@+UO z>6l4o)|idv2h1bPW6TemCz)rMOU+N37n>{0&zWB^zl;WVn)jGLFn?@5Wj<^E(fo_~ zrumMUw**^~Eg6<<i@d_}pyd(E9LqwB+ww2VE0%4Ry_RzpYPo4a7i9oAU6?FZW{^ED zE0?X5?T~#Zqq1#^ZxuHcvC75DSCvPVCzN-U5h|T(q{^<EteUT?P_0m%QT?L&RNbt; zs7}=M(G1b3H48M)YxZawHGgU%w2x|yx_sRv-BY@XFLZzF_*{uT7o#4fpQK-?U#5Rf zzeB%Ae@cHz->Uyz&lxfdGYtm~=M7g3@y1kR2F$HB8jX3zvBpZ6`+4J8<M+lGQ@kn7 zlxfN_$xRm1Ceu5n_f1;!Fj(bLvjf(6-n_xQ3D($aJ}g+{ocV&e)%;rp4ANx|vGlUU zTl!iGV2esiz2#F&2Q1KK;jBomV1b^pfpE)HvfpL5WJBayd8K@%e4~7`{2lrG@&ocC z@&@@y`B(Du@{97H<$uU;%f0eIMW`Y|5u=D#q$)BLO2tUULyBU>B*jd{T*Z@$MT&C8 zYQ-AG%ZiFkih9MTic1P!DON@)E0tBs=asMFhl8IePbq&?s#SW`MAbBSwMVr^rBj>K z52@#?m%)?&QA;!_npv81&2yS9njM<mnq!*l8m}fsTLAOEr@f#J(nafvb(3{-bX~gK zTw89v{wsZ$L2Bq@kQs6fLk&+EmK#=A7`7SSHynpACmY8ZXTg-$jRw;W(@B%rJjy)T z{Dk>m=Cv^09yri1=4<A^&3DZKmY$Ye%cJm_=PchaKS31F2kIj$l^v5UgH8G?@)Y9~ z9>p5fA=QWKC~chfoX)EY)A!LI)L+#<Zp<(ZHeE6OX>yn=T;@@h@s=AFZW^*4C{f5} z%2vx-<RcYNC|rs+6s?Lj1*aUY9IITaT&LWtJg4lTlB$NO=Bl1h?N%MdD!-)~pdPD! zM)SJn9nEP?qIS7%V($Fh3%T+7Ed68pMfytp8vRBzd_&*IFx4>Eu*|Rni#O1C+4!>& z3*|xccr28+%$Lm6Jiwx|m@N;%3(%7?<g!2s@@%<Mo+~%Y^X0oS+h1dThhcQH71fHD z6k8NK6nhoN6u&C2DQ+tglzo*$l(UsjD4$X8!^${`&PJ*dRVvlPswtSnwW^m@+f+|t z4NTHLp?zMvNn25;{RA_Wsk^ODGCXWpYS?Ia%WxE)9%S5W%rUJoy=ICv4>9Yot`C{p z&4HFMxT@5WWJ$A(v^-&{v8=PKw`{a*vb<^8ZrN$sZMkjXDt!TllLgAeGKp*f7Vdi4 z8?rZL+hsdtpTmU*$YpYsTqifmtrZy62>EFFOYrAS@;Bw%<x)k8q7mb{uIN;VmEp=1 zWj|$(Qlm8DA2UWP?aHajQjBl0a=EewEB}hJOIZnbtWob#-&PaN4DCfNr+Y=W!uXPL zyYZ0mv=Fj-o3c!L(`baPGShO?I@4RGgQim^Hs2MaVPYPAJC3k2T0T?$FZmYLE9!yT z`Px^shqSNdh8V*UVq=Z*#$;o<G1K^lai{SU<4?v^({R)Grd+JzE^|AZu1!8Cj76Zj zAU~j3sr*3sIl|H))hSh%Dnq?R-9s}#GgLEEvr_Y=Myfro{apK`?sA3hW&Lu)2E#E! zlNm~O`<yULHcVbE-!K1GUa!nX^ipcH8iU4y$A@bsY97~AXx`LZ#&QeQ4%C)ww`q@Q zzt;xqR62`pzU~>_3SFb_oUUiCHa9<aUheC;Z{@b;p4VU0|EiBR$PIH0Pa1e*mT{7C zo^i0LLS?d=Mw%v>UNyaKI$%0(ns072e`gk323hW~QPtz@1H{Y1W%;s6=vE;@O|v{( zF+uT`;tAzS^_%LO>HvgTy*5u<rdx#Om+97`c|6pN6CuzvSk)n)CohvPMc_JvFx9GW z$7<-(2O1<VmlyHrk3y;i$3Y;8B?^vo0y_W-9rzDuAh9e<HbUl*t&pvP3!RX)VyeXQ yEVz(OK1se<zD~Ygeh_Lq;X68XY^I`8!NOV#7H*o-3a@b}KL{MpcLlGJN&Xk68FVKA diff --git a/pipenv/patched/notpip/_vendor/distlib/w64.exe b/pipenv/patched/notpip/_vendor/distlib/w64.exe index c41bd0a011fd760ce20ba795d9e535e0d2c39876..63ce483d1e462373fe16015c70ec52bbbd816c11 100644 GIT binary patch delta 22290 zcmeHvd0bW1*Y`OWF7xGraFJ1F#2En<1QHbVprGVPjwq;D4x|^QQqiuIx8qf6YnEw0 zQ%g%r3rid_$N`*jDyJ0F@+ewpra0tzzw4X>_B`+N=ljq5`8@Z-zTdUiUVB=5?Y-CD z=Uf&nc37~)p?u+(5yJB2-J@!ngJa3wniYsI+ia`(R>37THx#U_Sp!@%@Y|Yqfy=wM ztvRgl7i;o+G~BJ=;3@fq%e%jO**`4Qef?-o`!sT4oU6`-<IaHbmCBUjm_OgyuC%|7 z4dA=+m)JtStv=>2jtgX0Z*|l+a$J?}-BABloy`+V%*e9n6uvwV&M+5U8~6AgAR)vh zAe>{}b!~YMHWuFz>@9r9u&?m_X2nms*LbH-26CKWGyKj**oW|ES&4lVe~|sh{w3a< zJ+kk`JF!H4NNM;}5;866%>%L%GUBdhStlk)fo>d^CLS%TLYfmqjLWN~l4r6kh>)?R zN9f{CNzp;bin!zzs1jNEjcK;MPa#$&rx}H(8F5wO&k_%@EG66A2!5|UPX+&+DIQA~ z4@&1<eK`)aAl6C0v#WaF($M)RD__p+@Q+~k<mTJx<!<!fUOXaMTB)_53aH>o8QRkL zycR3kK((Zc`!dC!1@XA_3W^3&(bRJE27T%O1WfcwJZD@{EiL~CJ+fuj3*zatN^jI9 zm9Cqo)K3{ZJVO(tZbpt%S}kTYD!q@Zvdg$FbZA1?Oj@N`;z3l3en<~nD|$ms&T36m zO=nb-X55j%O-j<cgI%o87JqA)M5WnQD(X6gj+}wIq{;gN)P6iK^3#?gcRc=g<Ig{p z<$<=*9pvn!UuOCncj&hxc~V-Uo!KS3!xb=R-D|MR7OyEoRNe-~+D1pDW{UeW#ZfWL z)3J5<QNcPo!fDv>5rXA*OdRxi*EXWFV4WM0Ck=$v3bs!9tiNMekDN;!S5{?Qv<Mpd z0prIt%*(Rr{i!v!p%I0GEh{2dGXjc=h>GW=Z`gXr=q`6JQr=?!xI<Ymn>;Ay#3Sh0 z0iD7)6d!ERD<vwKq!I&r?ATTJ^+ndnslEH($SR(Lkeh5ebdgPTiZp~}@?6;=vs2tD z*)*K3bZTpQ;19!%+b>u%iUjdd+^M)jvZ+5i?G(YASd&wb!+)Ak+j}g)xlO5fff{UD z4vttWO?)iNai^&5Md`NQg;H@A$L)L|%W_S6L{T9+DVugF8HT}HYNb_V{5I1ZHFGpF z@BACWvT21XR7*m|s?bbT$c}{WD?&vO@)c~u>PRX_m71nXU4F#soLlo_nB<&Ny7j!$ z#AqaCi8YX&{YaM6#nGhIp%J-)cqH8#G@&1l#%&-`s5(zJJ<`Zr%V9L=jpi5)PL6D{ zsAWEVsB}9L1tJ&t0pK4!s@i~Hof_dGSXs!2jzPm=4IzSf3%&gVd7xF<WW!KkX-M14 zYF&a#f091*QbyGeGT)GlZGe+(%7G9j77goBq3ojBHPi00AU?c#0?LHohY(Dw#JU2Z z6Vlu20t5c%;FndQyR^nkZ6Y<#@ip#npM-(-OcQ^SI-;_jWOgXn4%4_4nZ8FRWRX%x z-f~c<$-9yKKUgRrDhcA{N$H7EW<TQ&SK|Ms@?8YWJ{=Omi*INNN0kJ#)1-83I3?2R zG5SgKlJO38P-!3*9GcFhhaSrEBulTb;&ziPNnz^3vI0`*0JZ50N^UO7Qlq2xp~<@; zkyeS-MCrfPl^SUdnPe$#6K3{%UeF#YXoXVHcoc*tk|H9h1CeSwOZ}+y1|wL;9kr5& z%5|fb8+X`ELf3y2tYc6Mrn?|cjSvL!pdgwfas<o%khHOBqsQ$NM9i36C&5~TQ7`cw zmNqnPSlaNk5rS<~0t`f0czdlQwNrn?N1I+{<qk9@e*TH$*dDi_w(r?<T-<|n+axC% zZ|5vqr?vO7Fob8>3Uv)$Y2q1b+89s98@#4pL+|r?^5{LS{xm~(p_oaDDFeV1>&(dA zEfr*gD~Lo8g5?%8W-GFz1<e{i{Gk3^JD$st#z7%im~EyXA1Gs@E#uypwt~$<g$v>{ zw!jcv3Mp(5Hq%N7(wO~>$(p*jhk!LGr^AyhaUWW^^X5MsmoA>bgpXG8TGJy!WRo}A zCLULIJ}Xs|-$Vtxj{+uH&2w`E+uVqpNr_OW8CHrxI3o?j{*1A77BJY85_=R&lS(bU z!j*=WQ2ns$Tx6#aQ`8cy%45W8Fk+6%h{2vhl^nCR98v$yaR+9D8vaWTn%)@S6N1$o zkt$t*y(;;qQ=3c4UhApd^;m$i#JLfv;_+-nBc)V65^9jJTd_u);Tncc**MQg+ntoj zGZzc|iUjfrY<W=YF4B)wZbH#2LpF8!nd59jys+U6jc8NcJ`)RRh;8H_(hwcgmrsM# z7p1~DvSDkYqQhaRa+0-&kiSo`t)OC2*aFy5w#`0WHr@D%wx~~u5m%LIIV5L_|741X zY&p6~_qJw=vheU^mMsn3LrVN9yXoF(#AQ8|V%qn&QWfe+HLf^_>HFhf=r$I$ge+($ zCd+0x4+#uAc6z8R3wvC2mbD-vM9RcGr1hu_3&~Nee9_Vy*jL$W#sEJro-6hgs;fOF zJ*<}HV%=Z+gz7^cH?v!_FOA*Y+`~DphV#L6mOf&C7^B8T1XD@p1XxiH%?X2aIE+Mm zA=2;z7M#w-VbTtTMY+&wbuEP6q+}FUULIG~5S%5}W{|=Z7v^pN71@MkUwWO*@`yDI zK=$2~+v+R*%)ar6)cJ+6^B&!Uvd_}&+;Ix>uoGIBJAi}TMmCK+#X5RMjXkNX_B%1W z+|7?8D5BEuq3WYYQ-{#1FMZ!u$%Jl5vla4)<e+rxh%{D<57h8il!1oPnlkF<w``l| zo24&eNX7GJY;=Ymprs2P6{~d;kI1GdV(7s@(^j1(FuVeg=3{?^vPL4lwFBZK@g>-T zdMP3)selG|LA0Y@>Et@BaZW<gm6>-0OUlI@RA&xG##5lAX|zp1$_O_q?ob-c2Fpwi z+uy3^&}YzExh2IRg6DYSA|Bal^!LIMfiBV)N8SKgcF|aNhDJp+B%n0ew62aKjWn7u zsNA?{y_(ueNnKRO279$j+ZhR=M8{%X!|y`Uvzg-r(I}hJm7M3K)s$#;lwoj&szFp{ zdo=~|O-kIqj_vX4I;t&=n$6$@kaj2@sVPEbnTWLktn!Sq>t>@eU{ZYu_TsYZ#Zfi- zt6^#<)Px!&q`{Q;!GM9X>AT}h@E+sTuM>vlh*ZzM^o|aV;W=`iIBd#Lduz!Rjv<Cy zHZ464samOmJ@5{0GYeVBmLU?5m>!X8Hc7SQ?L)t=(q0y4iq>UDvN5LM<TgBK_7NA< zLOW%89BsH_C#gOGqE<U>A8-0$2}$hZWDXF-J|5y?Ma(dj#GIv@tkU#~;TU{qZ61vb zWHCOkw(5uMSXpU(3o-6EC-q2R6+U@EosnryHd^qyP<E);Upj`#(VQ`W#k5{t+6k2* zDMSix503#I<D_q4tvh9+M$x_|8m2-Bn>6X*?h)!_BXzQ4qMmY_VQNRDzoAD|V`$hc zUGS$mXr4+ZT1afC4XvaTkdk(_FypEO*2gy|&IN6VI{<eS{?dbN5l+ZGK|D{*@{!UY z-8_4b`C%T;fCWlj*g@YIzmF5KKIPY=Exn{akhU9LpjrMaGy1*2uV(}O7V1L%*h#+* zfgeCWXkROILlA$W7T<El*qv)|lg=|=|EGpaty?rRrFBJUz)oK0<HIfmzV5Ti)RNK` zWJ4FqeUnWM3e}BsXUl@7^R3ylpkUpJ11z{rjNudS7V*yBY;>E0hU1uIn$kyku`a;_ z_^#}Y;A~yA7dssM4bGqQLWZ{sYSkk2g(t^ZYUKuhX(~<><y4&`++TG~yUU)2#Pe0G zYiMPpBLuaAmt%XRJe}Y}x>%m@X1(Q!uJ{*8aAxjdKF+n&SuEtE+1H_7EH%tMBMoX4 z#Gqx~(7kxqxX1$4!2vCf6B2`72Gd+Db?8qfso#K@bVDHhz2+?WC`9oH<Q?M8SeOmR z@z8KD_dVDao5x8ovfkm@+2g^|3}HA((YL`rTRe~{o*{=m3-w{(eBg4J?Xs+GcRGS0 z^+b=veGMi$WQAnJ9inv!#}moLom~nKVR_aNk2CJiE6M}SatC)ben3pDSX{%8*br*i zlkl#wQ+rbnkBCi@jyeJ@j!i-7wNjT%r7wER3Aw>b5C=rV(#$SW)1Pc?#QPDeaZ*m( zojnp+Y$?l*kz1M=^fu6JA9t~#tB|k%?illpj4v(0Bt#FwEGgqXv7{91r7&=IB5+bA ztWljpV;f{y^27F{igwpTA7Ucz#F%lCtwl7BL@&S&%cTv;N)ugnLX4AU|3QbQqu(pT z5tM@A5C(A>G&biw5J?}X2|JN+e+Wxm2WTveJ16x3OF0Zk-=Ydk14n2fjO1+0BPyh{ zP9p0fPYwn^X%%;nb`W0;{$i75PsBEqf<+1`pMrT*+KumWnn|l8Dv=^A+`kquZXX?u z$Q`f@0@JcT`sQHREghG#BAnnKFD6-=p&E^rP2toXQNP8LH=!r#5xE#^TE6nsW9>Ay zKPsklDnuzUB#X{pSt7*fP?raj2-u9#4im&h5lXREeF9V~*tDpn3XZz|0A&~TZ79X6 zUuLA(qKI;8hE-pV#2Wo1FrDqWsv3PBC`<~<(CG<Dq(_Xw5mMhyixkPG&U>)F=<6V; zsxu1xB-h*>d~|FwOa~{+-I$6gUqFZ}c2VV?VT7=MAYm8PrMMV}ceA>-JG&2r>&}_m zAa@2R$yc!yv->W!CC5-F*x~^Dv`XS+if1$JPT*|*Lv%X_X&1(pu|J~k8c#NH+&<MG z5%t#-*dOgeQ>)>MN*%M6zOINy*I|JXwj3wS1Z&U+N;Im8C0e3mCDvN5VRqah_#636 zY*2fjzyU~W_+1$q(<oGuVH4~!#o8>JscRHl*j`AnU`{Pasdc4Z=%s(CMA2}nEvlpO zpLP@DueOU;beo|PCp97I{LB{wn?CsobLh~X&tqLX`1iNLkXj@!SRQ%L^RiTXNS(0) z8h5?oYgBwD_`v;R6iSdIM^O$Km(A2pDfIBnZYcE2$81H1j_priE(n(D7|3};j><C_ zcg^%QqR^Y)P$HEiM?T!a8ajmQ+W1#^bo_`nPJ?V(rJZ60r=>Y;SEtxv@d^(<GkSJJ zT7|g+Mbd%T#klLwRt*EufKWvQr79wGo{M-u#IvCTM0^wxTI8sdp)9R)s>d5cXqFnw z&cj=g^`98nyPbyyZ^oIiJWhtU9g>uCIgaOyw%e2|{~>Jgt8ixDB|3aAT0{O7%}YTv z6~h@5MDKg>V;tbeWQjrf;cQTs_7nT{qx4$b4I*!QlCrur(P}r0bhB$MR%P7$Y^TDC z+hE)rm4e(JN$ajF#XN+slxc~0!Zk#ofSZU3A__oW*<^=osOd}?yWAy~U%)(L{B;I5 z)+r|ZMd2Q*rTe9_dK^sB#3u2cc;50zhJj@F#b6Xf6hZ%45$3p4X_d>-2dOh0W4dZ9 z5^NbBxLx$f!UiDF6vU-Vy9)M6jER4p?TAU_d$PMR-FsTUp@}Cp$)-7=s4PJ?6@WxX zJj&!^H_H<ltK1Q(FoQH;qjm^!N2Tgt*!Zs9TDSiiGqJTg6Uk0t2!DdbJU{eeRb5>? zr^6#j6VGSTmU~Y&E&G~X>{_Vnbf0C!3Z-o^h%}d+VQszs#)PpKEL9#<%syrDbNp9& zcyqe9lCI6r^U^g8a&sB-eXR^{xGsm0fXQ_OJfxfLvursY=tdF@B+ZIjk^LdeEzX6t zii^@c{+RWQo1ja*&pwU|G0u0w;Mm-11uNWXzisST+=u~{e__9a*>n)f?wA8CZ#eO0 zlOSSE*|8PH<L1@1o^FD|F;n(=2=ejbGdM+v$b?N%F%wry5?|~fo8I2aX2iGk=u1g< zhhPJ>&{)D&_E~&by8tYs9Gv1U@c3*4gQRrKSas{Wb&<TsCxV@fn~pp8*xh(<_Z?|e zy5b}orRmJ8Tl-cUzfy*+HO&jhE7*8q*`RJFkK32A$G|2%P^oNs`737b)^SiWS(e>J z!IFFh=S=BuY%drIC!63YO@lmIc7?8Rlr>@;+Gvlml|dOr1|Tl3qX`51x0g>gaMH`{ z&u%`wZhQ%zRi8%+q(M~f_Y5yl)M^;1!YCC+1C(^pkVzalGVDv1-hD{wP241Cp4e=V z4XIhS$ditc5DkX`H<zGZu#T&&8bAYX3nIT$<69ls&)uVRqiUGkJ*8WM%IpeIu6gNh zSvIxWLeovkwEV*tJH}Pre2NmY%kC7rq}j6Vq$TXt9%J3-9mIH_#AX{J>WdGuUwVY= zvJNs`!n}4t4rn;K^I!1!x-1&pARwFee!(^+M1}-oYt^=L(a`%YEuXU4fa?qJEryu8 z>`p?MZr*<8nb<{_e3$i3?5)dv%-%}uS(^1ZT3{I7h;Piz?q877mBAKl0>7(PhpGM& zHVl<%juQ<XAd06A=$|5c1UB!3$0UsySlQ%5JPWQ#t#%ELQv3y6K3}9W+5^-gDDJE_ z(%RN1v0gnrbmz~r7khRnJ#(JyVSqrxt(Oj}v0OD)sm2P`*cWQ7NR6#lV<l>AsS*<x zXm+EQ=BbRe#IWf%{|HE`MFkVV|G@w*7U31R#AZ)=`g=_NlNy?ZRzk)>>c}FKI=Q~n zgssvanL{74F-hU2b?2y|qazY%cEg8Kheeur7E|FmF7Kd{JnWobD=I17L>|bLKQItY zvRP16s`?nkNmLgp+uT(&^n-+=*UrcQRvTQ~V}7~OPGgD%Pt$89&*Y?tKtCmOE%M8z z?>8dVs^1$$6F|Q;iuFqF9x~&MIveMJE1Rx<M#fkV>mH}XPkzQ$BuDX)Y)^8y?rKkV zHQ8S{&=bX==47E-wi9fDSW0oRauGRE>ZM3*EaGw5bVP|Goi4-os);+4#LEr3xQo)( z)2vTQU+2v=$Yx8~P{R(VbaA$*vAIgjBM#f+4_I{2Usu`m+o#O=g|Bs|D%hSEl5~;V z*rOLRbi=Q))Lv1gm;002aGg<%CCfI@PC5n#)(?Xv#pN1D_Z*{<uu!JCWn4rK7ar$? z;v}55#~VHYuJ;PCOJL!o3(T|EV2+1q`^NbQX`R}WpmJL~$=WRvHzsh_WRuH!aB=;4 zqpZrSxIK!vhfQ03?zJ2xu$)xy@WQ+BlITg;lXfUxt`aOw$+P<r;}#gmvvC{ik{ZNE zv+UH?r5+>==4ce89Hx#*#e&k0G+fqcoP3o7Gc!q8?9(beLTEKQCiU)%GxVqE;PcZJ zU5188I~!5#e`jv^H&c~!f0uaLGaA!6b~iP(lPh-FENsFt&G)NWVwEB-o1}H5u_Y0y znESK?R~=(x(>ez1Li;cj=GHsO*^)M^%?+}o;L7FGv>vfVl+CK=?a*o%Q9C@k0w>vF zmOqTHL5KSY+)Br!?ZmRK+|9(SWA43U`BawHJKFCJq{Gq0Szk8EYf%u!xRWJinU1~L zdtCStPwM<d!8Xq$%{IUR4;lBRTgSL%S;yE<`fD?+?DZSWzR&oeE?20<<i!*zo5x)- z)1LYVkL1i+_GX_P-31SJrcV-|$UM?J>sn<p&vY*~Bt1m;i@=K0*YSGho6%i2_7=;@ z=$hE!CT4wadc+gZD7)g^Ytu{DQ9<+SpqIWpPV0bP2SLI5Hrz!d>hDWzS4J40#eT^M zZ*v9`mdM|pA<6ROS#e~I{wL6|K1@mBW7ei`o8T=i44;qTR-Casd0Ol(;M^2aqc4BP zu~B^!bU3lE=-XcR&ka`HH@(Z`HDo)+MZZ&pbbUlCY)_4OKw%CHBu9<z0HxPhe7~JK z`6>IeUy%D+Z24KXq0{gcwEB1EFYM!cvCG0FzJz6DhVdD!Ff+3B&w4z0mYs1cEH0%^ zWhNcPa``2mx(U{giP0$AJ$S1Ygw9y@81O*f@Ec9WvO9?ERhHD_(oP&)%f&wSR(<#r z%%46^R(%_VaI@<Di1-;ZV)-UnwxS1?$NG6DA^)&(NBRKf)_-Vl7Bt-`J5d`9si4(1 zTsTB+2<md3mG<}Xx`E-&OuA&I^O5uwl5jD!segA<kT3dO?7fp#PRSXIaE;#4m;KfM zC0)UFHgLdd-N9?@_JE1HfSYVcR{MUZYiJb3Xj0Zk{b&qDDu8wqA4?)REnojj!k|i! z3nhK{FTRcK&k8S{qH+YJ{;&Yb5IUTYhN-+9DihJZYBYul=&91Vlw{L?@GAh@j=yDR z9ynC<yS>2@MG}w<By4*lcdr8#Y}1b+@x*?O=2p0nrPjJw9MN38^xHu;E!$Uj<RE)H zyNz!1LAEhFO}G3yyOkZId+#a>9N1O2>^C-OV32Ol0XBVL93RKl4HWp<tYKhJovdSR z2Tk+|(W7v<<tFLYuPu(!9JX#yKJUmJa^5#wzl!sQtuK##?p=X>p3^S)D{w3cVaBqz z(LQM{VpiXV`O-=d5E9tkoW#W8s!Va5lm#l?8q&}XcT~{_9kH}LD-M#ngIk05NiY#y zrFKe$o&!kVvHTa;@_x*Ia0FXEIFq+ymj|b_s3D7Wy|1usL;Q77SJ=@ZX&ya(LS>dJ z?B3_h?ozv-SkO?jA2tWe)nK8zNiV!|C|9s^`?0|hE!tmS@x{;$ykXs?mJW8gR55CJ z7hX5%B3m*dJ}?IEu+k%P2Bk2>tiCrF5bbl3T^f;^bOsI%1d8LO!=R`;?a`ffcyQT8 z?N0^RE&cgSi75SvyTC@|Cc5Q4=iFhdbN|CXWXUguW47nL^l@S<_$N43CP+8oZq#tj zsQ2apjD4J3cF4GR1E?hZ9}lqaSv~-bE8Z-z36FqkK43jYws+c!Ovt;KO&M8YFe)Wr z?sHPaIrhiMV16ZYA9YRlWHI||R3!gC^BcV<u=QePOLCD-Yu-|*Ns03=na5+68s2Aj zM-Ovbfg3ergCiJQR<OZi%60B%+4V7N+byEUYV}yLO|t1WBDi$=5YC6?h<x)f3^!?? zIHn%<`r>l-<=6vmua_x>yW@6c5t~1*HP6`l<K~&(!?j)Fuybag2De@08laY+-eb{u zd8RjM-_aKbNaId5>p?QJHF^G_uYqy1Bc?fS5dEbAh@r<3&!xU*=kj_-Jvd2TC{Aj} zKxlg0iyGoCk{+>aa!Dzt{$#1+efU4wi1F>6mq8BVg(ulN<748wx8#xrC<*8nJjSFv zbaWr8rF2LepNp+#cgD9fT&Sbkl+m7C$HFHhxUT@G`NB1{j!l^ms~c9w)=cQF3tGso zPKY*=2dL*<QSdRV;qe{jn;+wkB~`Hen2XDghiV#Dh?xgO!?rtYLVhxThy5qNwHFE5 z7E?|<im^LuHD%smhw?K#mCQ9xNvG#7Zg3ET{<_VA3p#jwvw%jxoFU}@Nz>pq8(Gj< zceIqfQxL)5W?KsqednVfT<Vj<noXOzpm>7MO5dB<MHgAhx=xIDY@jXdol-V+VrFL2 zv1Y~6h4x@PY{vBy-Pyx^j4MTVnov?P+{KUQVPfmzs!*getfUX}H5|q2O-BQpDWrit zn>a9b|54aZ2q&!uYQwb*gH(2?WL8qj##<aW>lHz~k|q9`S^bY5kJ8uPV)G{@ckA9F zE)7siPzS2#C}p4mq;6WdhUbE-D*l|L=leKemoAUHn1<_Ry2VZk>Vf^qXoH&Yu;Lo_ zbn0z?OL9GDF6=LQ*FWg(YcY?-rX%iCd~E%VTdYT6pCGgi$-g3DI}+r5H;eGC`udZP zvXzAs+#X_=(u`se(@$BYd+`waWXhP<7qQyY`2`PrZq&Ajn3?J24g68I>*Yvn(Dg5O z$9L;jrt?SGTd!mqj$n|W$UV5%?|p<_dL>DBbqDjC8sK-BM(>DJhP^e*8hHUx+}79V z^$%J0)VQF618@ehtBwX=ic@8BK;L|Tol?0^**4WdH@1XTPJI#Utjn}y{u=8yt&i($ zW!_?WxLM3rObhjg|Bci8;p|r~=Jr0A?&pN3i)J^}JK+GUo7OqD73!g3ytTh&7#FI; zI58>c?_V*DW9X_u{7a~QhGC5S<5w0ly?dv3TEsCLDDC|r7`~c@0O?&MP<c~Y1f&Oy zO@Fg(2keVSaTDEq6M*Y%nDy6Cy==N>#ynS(*xISyEN6xTf4E}AjQzY1?j^8ShGPYW z0wd^_!lvJ|h;@B+x(CkBl=iW5a15G!fNgy>&9GlpcNL`l4zPb-9U4F!w6Jg;?KzR# z5yez(iyP|S8=+}mwrHl0PyZiOZO0;2@>3-v_g8G2d5iCk=}y(Gr#;J-a=otkFoN4$ z@#lm3pH3oOHjOT#YJ$pWaj>RD{mAOf?-;5fD<6rg!h2Gqs@de?c-?C=*qY*Dy4V@) zesQAi;dItzwvR{6bjo|M!4=Q`Wz){-Y}o8h<F`;V3nF4T;fL6DS2}&z0r8|7ymj%g z4*vU8Bsa{a`#Ibgu2k~lc1AWW0+}wJ%Z$4yo9d=Lrwd2~(s(6NO&l<tU7Ou8Xfu4X zho`fwLH9waFX^a<WlQAM^Q_~X5dH+qo)hkL31x{XZBDR~Ig_J5Cm&P2erTVl-*}wk zW`|MBhf#&L|42A_q$dR}JI)+SrunI<IFX*1N~s@fsm{llt;FniOv#JX-v1?a?lI>7 zTCdXedoXcr<LuPweijb=yoo~oUzqBjpC|PsH*)42Sa;)tG)#35X~bq2A67#LxdxXz z5tb+3bGt%#E(t?Qy<h$AWr`UltIWHS?jz+&b_C+MlKqR5_cbh2R|8$dZg%0dU9IO< z(w1d$m8HR;n)imwZ1d~4`Q7Y;H}ZUZcQi}<@g2=0U)Y3njk(Y5uJf3}`pzBVv%3(r zS~KctwJz={<kQ<lGtRKBa|1g}f@*7}amXn7<4a7q`H?*}5zDd<a$(0Fpvp&+d_d4E zN(Q_Y;R9(w%&wPB3&6r7rq88)tn<9C{ArdqZ@RAcw-t5s4)VH@6%`+pj^p_WtZ6|< z@0bFVO^xU#o3bV-RX<`03rFc*+{RWcj4<@Tlx*(eK6Y^74E{Ll{^m!WPFJ8ZEMzV_ zmCf=Dq7U@H;AU87_Ew*v?7~Cb)HIu*lRm0=_GX@4r}wtBtdYSW1#COY^v@EjF#zYV zyW{diy(K<9mWglGmwtq{(aA79LP!&(Eb(5JcvISr7U5W#807T>sinzSc9!O_&9HtB z&AelyiAzt;jf)r@8EjFnBSF#Mp-~_x;dM$NjV>c;n@Lb4j$<b6MKfg6S`rjXAuvM~ z@MsbEOc7Y12q1lsnqG%>mfp*?>X$0%dClovke-FfVbzx*fixC|`mCzBY)WXBbEI;t z`dkP#c)&;pf#^!Fm+1R~!UKSQlpQmELRQihGZtagouy<pbfOfoQ8p!yVyhPScl(|M zPnhFn)9)i$<6=`Y!%H?D7|A@XUXGBEO`Auu4%T6YqR*cX@Ka{9X6pPuXXmT~+>c?G zX&(8PHnFbnl<4|xVw>Ly3!%j(RMh7Ht%m*-^OB5^*7itvQtxzzuNo-6#*bj{i>vV+ zw<J=xB$tg`(pQ(B%hoL^*L^dB#oJy0RG4k9JinYRSenQWWxJMc_8m%AZZ_eS>AZAY zaNq@KT25XaS}||gKKsm1lqX=XFCb$dfH!UoOyv}+91n1;DrdOL$pz;Jm6NV=^1%7_ z6J}i=U8?e(RelKgJyp)bQl<Q8aAH)>Zz>0;6hoBCIjwRMzzI}2)hZ_yoK`AltI83; zaZ)*JR8Ao{KA*tP!YN%;Sw&#EsY0)*oDy(!Drd67QEy0-41cdxG7eUmI0InF4U)=C zQJLi`^P<X(CMMSMQMgyvE~IhzG_b8J+zoMSo{#ho?C^??{Nsv;E6&;RR#x|+(B|9+ zvYdgbY0X$mZ!UC{YQWIOdguqN`>OVQE*rnf+q4S%Thc*uG)__G@T~kQoNLUv?44C% zNdIzGoSVn<d~4Y4RVBgO+;|-Fj172m_OOs&jpr&=nfX<MU2R|EcZ;0snB}9#{3<qo z^+5i^^~YB8{09}s*WBZIkyZZ(FTk<0|LN`c4w-UgAJ%5ApGtqwr{aaRRXm?lA%9Y! zGuW24wAFtpE85US_u^9a$%fz#>8Y^Z^oRsn`!F-Ud_gffa&U(uO9OUu&B2Y^0teTJ z@t^i^K4(+CnQbiXQ<FEg$XV0>i&MLOGn`r<w&2sYJu)Gx`oYpzHJ6}zwpoD~E1W?B zrM_BeZJvv6W|u$h!_Q@5pLJ?iy+oEn6QorDP1$WhV$r-=?LaF6-W0tC@8`2GSjlGv zrZ32gwXaWmknZTTz__SFhaG1-yZf1mQW9VC?k~F4XNqn{<MMs7X~+vKV56Td<^|S$ zV^{ujR=9Dm?pZR6-V_+PiCX7|2hVs>UU^M`GU2U1+0-|g<!wsfH?a>lh3al4u|1o% z@Eh5b&!ccLSN^#V=#8Jh&40!`H;1_l!KE~<vt4ACve};%fANu;pnElub=!Pg7n;cQ zTRL{#pj3v{A8+4NjX1HmIi+_vD4SkNP=>?R&Kx16Cp{~zY;clIfj!uSE#3TIdaFgX z(?Q~NU-?Le+jy)y`+7^D?&lut#1>Ox7Lsn3Ac0E9o3VzrN^SPlii%Rru4xPAasI|d zu@I52lu?lw8BV&-g1#IVyBrMiTc;`s$|?T&ab^zgn*ZBzrf5C;>dWNr4lUwRfLek& z=q}#aS@*#o)@$YdvZ$pGFE3&tUxo5ZS?{lghKy|CH}uBYsdJK{a2+a(67uO~fx>m{ zyRU-X0$PNmP3$W2nwj0!D5tkz6}HGbpHy_+`UCGiQ}v+_@j{}ZZUuX`?K)nEy7qMn zU&lhf8NkO>O#22?X*$c>o)wfnzj=mNsxv$e49)1O*rDykx~2VD>bK##Dg7%Zew)qn zrL5w+Y`(OD-!Y8m=U0rVjMD|c<IS`jbfTA9bJC1^{}J*ZV9BVGO<miuPj~fnM7nHh zjAp;@YU4PA=p)e;uDc8D4GU+rROUX5&HbTgr+3liaYFthJQf%Q8ZYWwZs02D8Lon4 zyqJ>yH#vKba&eL+#Xp~2`yr~$E0H8d3D!V7wrlW_&dzMn@GaK1s)qsV;jAe_KHiS# zR8h`esfvOJ^j=kT$gT(_>pv*d&5kZ5e5CN_c@t)`3sn)(t-v9Bxb<ptsSVi}pRP%U zCfLQSc0&G1m{`*?7F8W)s)Y~<uWb>QI=#xqRLAnmSb24jZjhO6t@dfRER5qGo(nh? z{#Z_rbKB`c))x+4;7u*#0MYGtIOu*JvMD!={ZZWxS$%&DwD(0GRyWz3W&LPsQ-Mao z9dr~A-u#Y-fgO>o$c~p9uq^y%DJ%Ukdq@m1EN{4R#i4li<DtEZFf){9SFvllt&mU0 zo5;}(c5)xB1<%mv3_S2GAbHu;1ELT5;EA@I8t)Ryg7^9IHkPoj7rsmOb#t|0;H~y` z7X3>$R=IC7{|gJ*-xJqUqxSoGFM)FAVU5HPYA43`m$1eAyNB%!QJR2*>Gn2g`Z;<E zg`=oRN}R6T%(R-$>i4&Hdo-<C-_l#m?mz~=g=HQH;LF*>1EIQS!EEt?ac<6vjN*pA z&;B~_f)SRMF8-5dTwWt2b^EG_r5-Hu1Wzb?U@ZF-T82|)Dq__Ky?w!z#0{jy*Itu$ ztydlb7D@W4?9RcIZZJ~v*YG?yS`fQ!QKAouL(~^qf7^<c#j1Z_6Z%*cdV2;NS2K~H z!4A|E`Qwb0R_R2e`r0-Kqo_9;W}>8()7hZfS26XfYm;?PMl!iJ%&^{HQHML03v${I zCGhjv<U?2bzrk^p7+N&sqF_uvr@S4SMei(+@uPkyFNn@a)O$gWjy|ves1G%D5G+sX zXMazeIp9bW3R(H#fc6=Mv>xK&V=ay(*y`>~hO+G%&PpAd6Sk@eXV}@p-TB{`+mW4} zTB9F7(C_?gBXDVtSHSVOhkhGJmrbyuu96E%(Oy85*}Wq##UGf236P*Kc9lK?Rl^ym z2e=u)^E~mm#1NBBDJV8eJb(h}eb;fVS?SUC9l(2dP6)vJ^j8G=gkZs|?U7P!OBqr> zkV^Z%@L^|;Ch&`x;n*>MpNUk7VgTz4WVw>41{YC(uY}z{Hq?C+c+HM%d;uGNTrdm< ztA=Y2IjLU(+k8C6)dY@hTo~>I*R$V`&(s|?v5_YN@pHsEC&CTG^HBn3p8C@3#uG1i z;eaJr9z82|lQMOr3BzZTSksAU{IDyut{2R6d|ePfla<!>Hd<anlUpvgi&<UWY(IE? z|I^*Jv4D*``Dx_+7nNzP7jT2(ONaiDe?3H$ejKM<oUp5dy;$-o->K(1DbE;pp>?V0 zaA-ng)16jml2zYXIoj<P_9-t5qK!cc1Me8%w;GS0;aF_)@}z#?WehoTF-SS`9p!sD zzVTwh(Nw&@r2gJY%+8TIzr<cV?bY7${Q}g*nO)V1;6H(G)8JW-!6S8&kD=dZ!F87! z**~ZJ0)NW=AJTU^q6Fy(i#?s7dv81yi^~qf%frYp_It2*P6z6S6WiO7Oh-x@&(hBN zIl2)Ypop^vrw8bE=P4ozdu<*Yf7Zlioe6WEPXpq87(adZ_)M5CIgkB#rYn4^`)6Ww z=f+VE2W6@Ko~51*=GU;ivuiijxnWN`hex;c`CY>%4IM&Neu##hHB8lTsD>|V_?Cw2 zG_2I{xQ4eh<nW#seY{mH<svjjyoP->9IoMX4HszGs;yc8epag#bRRDX(C0!kYI1*Q z_)J3=ynaR>9}S}wgi-d?7+D(TX*gHIRT_S!;g1?#(y&p(XBzt8R}J*(tl@ACCuule z!=;3%pIfalzR+;HhWj<VpyBTt-qX+tuQJfbN5cpWV>IlkVP6e%H7wN745a$Gw=~9b z4L{ScO2Z2pHfrdgsX9c%7!CVsn5SW(hO;$%Q^WT(+z`cgr~0|=8lzUj2yJYYM0|at zjp1qy_p(pUwGGPY!KIQ07G<g^<f<4k7x-?dm!af}%7{q-;@8~4KFvP{$MVHxBjmKg zuh<?}^gKV%xisdfDovXged?a5aq@%dQ>4Xd9?>U6i(`*aK2?uZc`PH!Cr^uGT~a<C zTHH&EZ+oQ5V^g6|DVM4-u*oT(#wJw&yQK12pv42UxQ7-e51u|%4^?@%7RskYiwA4* zZ4cD+5G|gn#Y2@iF0XU$s{$pOK#UfDO^bVI@z=F@<6o-$Q!OrOaaoJkR-C`qt5r}k zH<cUD6?4<Me6En2$j#)&aRuBI;0xU73ey9l&hv6ed|=|tzxu9u=d+aKxqC<PtXtE# z7kCF9=MbyQ@$_=yyb9bnuXtC^Yp}7-P~}=Abi=J1>WK~FxM2v}uBerc1U+4ev%^jP z$-Ij_=MwM0xx_jYc?P<0fdxh`Fy5UDoax)>Q}12Zs>(wtitNZPgyTG+jKf+{Ebvi? zu4Ej0;ZZE>_&B7LswBB0Y@t?!KQuYsp6Bc*Ak9Tb^~87@?Kxw-BWH|Nn@M@@V8eQd z;8c$YKAzqVoOgi}=M6dU*ynBV>;l6=Xp!@C;r#Zya(-qP_QoTF(gx~wWDLjcK?tj- z($HqmSN;|MpW4tNmgBl1G?(pc&~t|P)|`R&<{XN!e`x*lii7`!kW#NA@22D2cu$TG zabrgwXLN6t8=j1d>lBU~1v&cob#~^03ifkB@%y+SelO=<<yPdH!*M0!IIdpLCOqlg zlJ%qB9CsdJcD<4lFUdoNh&LDgQXh^RgFvbq(}!Jn;@_hEyXlxK863A<%j)^B5vJ^< zor8TbeG$IVWPStfhPNS3HRtiNhcP*FUa`g&Lz*gZ+=mFwdFZX7UlFuCY}Ql1k<HD! zl?j9Ek5Sdi`|r(fo5gV>5j?J{=-0`W3n+Mz3mBTi1;h{H0{H%%YI~}7Qg!U|BcD9K zF3wyV^S4}^g6&+J_zJEKzm4<I6j-E((I{PYRE)W~DZdWJO$X*~EUKc9U$7JB5%0ly z%yg@Bsd6fE%yY=00Z2d^D|+Oc-kebw!EyHynlpN;lP#t>y#n{FbakAq70kD?=UT-Z zxK{7E=CJyIBH5S!_(u7<aK4zBzC&F(-+0i2)n4&N2hLc48GxCGS-=9Gb#2vL?rNM6 zjw2LaWn-UpZ(*;Q#c?MP!2ax6n=q25k&G>dksvJ8TJZ=^$(AAh^;P!!vz{HB3mt@` zz$}DUekC2iZoDzI3S2nvq0X3Mpa**tv9*r^S%RF^lUG`e)*{gtiN3LFt38!gJEPT3 zoN@5;xjXhPSOLPQdiI&zzNN*Fmg0YeEQ8N@mHjG5VWC*z!YdHVcc<9PeC&#Ce4qnq zzgEZQKl5qHN^h8dk5G7Zr3250DIy^OB+|LLN5|mX$_Rzc;~MJ3wHj)~5V&)#W-3!F zuVmcHnLHo0Vm0r-@?)O&L9Hvl<N3}l^?Y-j<4z-(eqH&1=i9Wb9w$$5ToXc!rmhmu zekZX3XmL+Z2hJ1Y=^0B)txH~Wqc%dO0ii|Bv^trwIziQb(54?W=Qr3?rDzyN2zzL3 zRqs)!HrIvE!5Fk&xn9Q)?x*P56}ooej5A$}csB>m4Yb=})nxr*?YR)%%J~)f#*hMI z>@leS_qf%s>}bdPmXbsf4~bwmF4*kJ1s8a5!BAB2^Hn~_uZMw)GC$>_3Z8IL@sGJE z{yx{T2WXS593Hx|n)k+khrkahn?Fm*?`Xto81kHV9K5yn6p44EJpb2^{$D@(|3g1| z$7>G!X}*id<%~BM4xTt|#_WlGr%atVq-bLP2rh#kVxEb=OzaXp6^TAGrcIbKed3N` zPJB20ckcWrj#*4~0n7hiAYAnEI|lmjzuE`o>G)KdYoj&H)o>``%1!}%fM=@4!aJvm zXw1r21NhdV_FA-wSK0S9Y}D}f%69|!L78}1qU749;RX%yz(wI-*KoOpOEko<)s^%T z4U1N$1)^3#i>7OspkauH9xE3@%;2O&Il`4I0{IRrYXf=T&~i>IRl`CJb2JoIJ__W= zmDbCuwre%4)NqrA(VF-NT70pFB^u^wn4@8Wh9MeyYv`t7(=)ZcIu#wbN+MQ<6!L+r z@z$V~xo!A!of_@bwl8o}k#kWoCPl@@UMk+#12M$=R)&P|sm{&W?(LWo!nfs}3*A() z+?5}O@-s?>Af7wwf$Mxvj(ggg=i-6IUikGI_$9#cc({-Nx&U|rAsTcY@IJyB(1gF^ zPRtBi0^Y|lYc%nJML02S0ZsTi0?C&Hw<1*GM7S*wr!gB25l0az2VO&P!oxRv9Ia9i zh)x9#*XSZ(g+|lO?hOPF$VkA@V7LjO>Az8&*61_9{7^I+{MEo85va`lKt7D)ZhMuY zG7uRElpp|aYV>WOE&`r`A7p^55vT`*gCkXK6#+-J<+xnr9Sz)wumJQn;3<S9pw9qZ zag13G+6`ENupRVv;K@>aT_+)6&kocgo=XNkL7>W>0>8m|CK-*`4s`1b6;naLy$I)g zc&-ZAvkP`o{DvVJ_z;1HvI+P&hT~Gfe+r!3RqbRU@K*%VR_S#R{bSK;2xS3(i{m)u z$Kt@p2udS>ec>n`g$%hU89l%Ujf(*eXMRO<gd-40J{P!K<L?39(df~M@S&5j!y|7p zkfmV!-Aqs^PB%AR;JA3u640-gT2LTRM2JT3mjJ&&AZ^_TdZ(%~gz*|p_&Nd&WjSy= zLN3az0-i(216>ciickQ$F}0NA?jll%$bDc*8pllqO(-LfdVPCy+%tp);Bz=Dr6Z8) z2qz<K0lyGfhCscq1m42I=_vU2I1u$fpa!J_ry!8bRA6X73{xqDB0y|Hph5|cB2WfG z87HF{G=jrPXdwdeaSy}QXml;`RDbjaGG~Ad2vossV8j4zMgT`^G~s(19fFfn>3DoC zN9J<ioq<pp=tf}NAhjTzqPTZ7dI>NpM=dlOxE+BsM7Uuvj2ATF*dZ`f7+@Z-5n)pR z$^=dvj?REz2<)4S8~WCKDTmu6t_Xozz4j%HJ<dX8y@9}42-IUU@Fv1s@NWaFM`JpI z-Vgj@3^WFM3-JC}EL?#YN?@yTs;zhf=O9oy<iD;!pt?Q){ynY~3kV`Hc^o$%VG{~k z05nX%!4q@{&?O(|3eaxAr~<XJXkhU~Q~-Vn@bDzH(MN%QPDAhUbAd*n>kJG7Xg6Ri z0#zIj+>Jn;+*1l-=}e9r3ZZgf)GRe28t7<-X25p>E<~UVi-2_q)EmM*IHRqGOf9f+ zHpg88O}J=|rcvO;5~P7&2wa0ZCeqMaVC3r<e;U7N5Z4jN00}p~!C@R=jKC`hR9QVR zbgo(`F8;V=jV5%Pui61&83L8F7+4AI($pn<d!f3BEe6hi6Vnp>1#e>hn=R@ROW1Z1 zrWq1qfL|dj0lf_vSEkbOz?~Ys2blgA3=cAde<17u-3a_(DLMq2@a1LD9O$XQj}d77 zI0|$tr_Nyg4*@Zhaa@Q$ub>Syjng(@?sD`1e8Rl<Ij$pU!ru@`D-v+WN|mkzHX%@6 z!XH=Ro(1yzfg?Y{P=X!}+_f5=1ic5?vvduN4v}PF`%j=U&>evvAkbbwxNe=w-vHFF zS1Stv)*)0uz8<)71B?`O6|neIq=7C0?nR)6RslUfgHdBDdILX4AkCF-0rAGyXf-m- z1$us?j-NO1Awm=QO~Bk;uw-N=?6Dg?1DyaA5vYPCzy<{B{cWPZhfe?*`@J0Z+7B=~ z(2IbF5NJI(3iPVN_*2BYisJ?#P(fM1H3-zwwZNX$YD<%W?;qv3{osE9%s8eRodCRt zAc21$Sa@7*1fk&sIskqGF!dyc5|&Tc`V{0q`<^O=iV&eGQ3ZVCC$tXqT;M|l@^;*Q z#w<Xf=~oY&bWWuUf!qZQ0ptmfBU}Pq2b2*=6Wm2K_!5i`e8O`?qmzVPf5C!PiiCI& z`!1tU5&|Y(Q5U6T;2eYy6oe~wZWV%Jm_WKa-4CycknTr`CUmODhJ*?T!w`s1n4<9s zM`?V*(%D*saJfbkexuQZ$29sB@Qy|k+FeyE@BqeYG$CDOQN@IGuR=5-T`mz#__<08 zxM9MQswEJbepP!wNcSd`fpCCE)7=T(ix8i1kxK9Q#GTLPH#&9Z2YZ}bSHDhL*SOAU Zy~p~J^$XUo^k2ZY=cOHe7VzKO{U2vOSP=jK delta 21776 zcmeIad0bW1_dk5jh0A=oAY5eTGKx4csUV=BkOu`N(=>;~K%<;UFXjw(<&${2YVDNL zEK^G}&4F^t3_}4G6-~{lq|!QsnrSKJaDMM~&H?-Wp3n2&^Uw2oeR;Wguf6u#)7pEl zz4qP*%ieG(TjH={(YP0dx8HV%s%s05!oj+gh%aAbtNTL1H|l;>aBtmO;M#>>)V&FO z+o4lky~1Cj$sf@0O9h9$R8aJ`!<#?&hwYqvb~LB`+~&eKSDg#TodM%Bl{r72`SacF zJa+SJFyE7Zhb`he>CJaJE|9%>zN`K=$L-L)8S3AmyJg~R3$iRejb9!JXPApF*!^HH zkPzaM5YDq+x=y@1dmg{T*njZ5JNpd3%c{=n=I~B&xg00h4CmQMdozE8&9;x?ce4-d zpX0sQ-}e1@50<PqmwG)UA=4ZEc|dM#R>HNMiixpOpex5^hz;e{NNat-amKd~NwXiz zvLHgnmKmW-I3q;|A}ivOSAa_76x`0R9efC}ayi2&Jj_a{7Qd5th~+5R=11@u;&&N) z?Rk#N7Eef*J94E&2x6loDTQAU%Bw9QXh;)P;!O$R3s880oZaP-VE3T;HFWz+D8ybo zE|q(#)uK-5<Uu(acl$}TA{s^2W{Nf0;&+00QkvS4<WEweb5R?*C)i3&spuVQ6~qh1 zm4~ENkI+X{FNoh}?Da-v(#H8p{gkoGV>ChP?apys7Tl(ucUf>_(eKooC)Jv?YTJ7G z9d-0iA4Qwc<-LFYU0d{{6j>f(8{0+BP5FL?zj2TLF48AuB->eBa=TmwbLO2EyIk>S zMa?sV&?DQ}i1cjnaJD!miRC+X3==BGMmUXlcBEjv5t{%F{9zl}U8tBJkuSaPjH+zi zs@Q7Bu-@yga9nw{aq(hw>1#BYYgv$E(~qXs*oH?G3AUVwQBncgE{GHr5fv{;I(FAF zx<`hRx5Pi;XbuJ?AH6UlpY$v`1;v%rw&;}-l}u8-8yn;lr{k}%1x_*US;#70fRLMP z^1H&maEdg{$mY57qZX%xGqUMp_N!AT(^CPMdkKeyimYNm{5#=H!cp0@nuR+@@DrKf z9OQ7d6}A1uCOZe0UcN*PHk}1Wte3v{N0t-LP}_?$ZT*X+BZE0^-#uBD>oOyXi_l5g z<Z_uZjKJ{LORH&$*-U%X%(Ibs-(L`xP1luDN7R#0i7K>96?%q*{#1mjA>@mq8c8Zo zmD;9Cb^nK%T{`mHSdvRxseDmsViA&Z#5%~<{w>Ry;#kt^@Q6`@cs#QrXxBg<jr)^C zq3V3ulub<_ty_<wLC3X(Q8YMtvgup3%v80y5)=qg@F#=c(9odF0ij|_gojYU%&Xme z1@Sj@v^&HxA!O6{Py!Zq##hYOHK_EQ)ZBq4Lx~?`?%b!=4tA1F>qt`~Ex?inq5QJN zHQVlcL44eN3VMX#MF?i>b)yKD(s!zP0KXah^6DJ1mMp}9R?_f7U*jI*B#g0VhImez zkIMGZj6=b8SaYSw^az=dMY55+wN9tWyODgZ+BiuNuTIKL?qu;Z?r|l4PnGW?SZj1h z2rv0nO9)gFEKZX$E5a#J5Kl>CA86~_bc{NvG*A#*X@!^iq0&j#zF{SuCs|X%6f0ml z2Pt%b+VszTB)1i1t<_Qco(Mi9!Ig^OX$YbT)X_+4;7XVyST8@y++qaBxTjt!Q<-km zFykJ(N#)gHCD#Sf5|JlVjDsq%ngwx6gdm7@g7vUDV_e4A@inY$mMJu2M8>niq8nv) zq;dVYj@lYo98nC8Y|8nL^)Q5#I@&8kd(epn1{!wGv30Zi3(Gq!$5y0k=^&n`R-DEL zf~L7=TaV~6jC-;;w4>x&j;-g}K0Nmo8L)(-+2Z+3@rZ2d{2eS%a@u~Rh>az#5ZVb+ zu>PhKY;Q#5>yVmN5%FxTerOEOMdwN9p$1inPO7RkOS%*bQLtH6TmO*VLt{w{R@!E| za!=8slmnT}C7lGAmAozwa>N?({J!Q#9G5Ac!fcKfDl#L?vT51*CwtKu<P-{&fixmB zbX_<<Pq598$iti{jL5Ul+!N1BwMM1Km@ODROrsKMlC`v7xZ2QzR6VBaNi6>scU48J z)C?FXnvRa7pkF3g`-Q4G4rn>v`&W*_|B{2&D+csbPDNovy5#V%O*NlWvvX*dk!TJ+ zi(+UJq$?V^2aO!23XqbDl!WSR>rpvd#E?9)<>@Bf*^YH6+&`UT%OI{2f5iSWb}RTz zk78}652$o1yfwqP@-WJjPQYg6K!r&;6_$wD9Gig}0R3WHL+b=Eq1-7EW@$JUGA)>N z*c4cj25ckIk{32XiqH?6;}PKJ&2uH5!l6SRlkOjq<r3Z98sX4UkLDkJ*fx)zZtKH1 zu8#A;43v7XTOLv4_Xbm;7ijqMXi^%a=M@oOh_rl-;pkoxCJk=Sg&wGOB?k>^0DHxX zgzA=%9I-x2JS?btce%1PwZeKz?rf%Kyy0pn9097c!dIHic6mnXwuQ2bp1p$7&eQVP z^sS;23)#2;aA34$)97zm*A7w7pHYnFK3X!(-6JTX(qZ95r#}N7n5EoKN+xtea!|<Q z(rTq!$E90QlyqGBLBkm+194ivW16$rjt;Mt4#ScXFIuo)7<z-2Uhb+)M<?;PY>Fbr z$}Y%bGc7$!U>F0C7Gj%$vPL7`52?~`F_ae6LlH@f1+=&eq8$`!(aDW4C{9Ak)fqPh zYue?fQ60HWhBKg~M_s&>+KlkO5{_nI3dyGZPqV{beTFY1?OM|uB6*HCF6NP~PJbr? z5$GawN#w5}%P$+tQ5Sk-NJ43{=`DDm@T_PwV^O(r@dh=ugOX}F&4zh*&d830P_ko* zuH_dY<?)R1f@qXYiAv53Qddf>aFj9k4Aps5roWnk_+};Ua+)3RjvLdFM$Kku#Ry4z z6i>|Lt+Mn0X(L!GvdXVnjLLvX#by}4@@pkgb^0H})lR4hBax7SSzV(nz*Dkm_bDcr z#yNe5VZgu@vu&p6&^VqWALKSRIH<j&^a0!}3^(izIM}xbvU{eGV7qP#m!@$*a%M!j z#Uza-L5zZ{G>j$qMC&d_uyH;isUbXP@e#}Fp`CI)gHKc9Bo!w?w89Ps{55|V(c}S6 zmH<H<;2|zi#0*nN%vqYl_WDdVObBdWN4El4Y{yqReAgRRVULllirKj5f>hU=Rdvh{ z>VZtJ=As32h4P~%{?Y_24s8M6@@KKWZ<lsQWk@nh+dE_T#k}RDEX?A4GErl&)}i?+ z5Q2w7I{2}HI(eHq*)>^Dxh<G#@UWprRAXrQRGQ{bb<jGM3ff6QtZnHajfa#pq@9^i zEwBN8c?qtxV;zB42jAmfu80l&jv!v7X8B0x{GP1d380&&V+KlHSgl{I-|A$Ts)8o8 zrJwW~$zdJPDxbxS{(br0Y>59N-50*>w11btx1nCNuLHUvi07%rKH!KKTHK_m%s1en zp}1qaW*+%eg$C~9bz4mAO3+-N?xyyXFTJs}dQk2KY)Wvb?yi9?51z*Rvd6(8y02?l zNJy-qCuB4gf8)i*hSVBfz#`K)zjt6g%!B!OHqV@^JJ5k0Gw<T#*n-e!JMZ*t7wQYr zS}(WwOB_ywE2uh0c#)!F)5_Vy&_sSXiwoNuxdxlHR&X>(%F_-0n2YrRZ_!&H=t{nq zt~fFGa3ANf@bJl|k7YZ<yjXg;dlpV{7|5XICg@)L!?@TA)xnu7Nf45QUIx=rB6S%= zQ&PVXG3i%<^f$*@@==JAk;wauiLr1SKZS)xc)90ck888CSKL|uh}_)2aQx9G!fzl& z-xmK|@kq9Kmi*$`-KoG5A8hj$yPS$n`y6S)978w6nidoJsphPNqhy!h<xB4y*p&z~ zD~gPbztEo=eq3yorcwK#oMUOoR4=W`R+^}{o|0R<1aWXQCXmHND!k2(MXriy{Td7X z%iPh(VoQ5)k{p8Mpu0eeeZu9IIH5ovc8V>IN-VWtZJ}Xd*0d8HU@1!UQZP9C5IE^` zOfq$49BYwf$%I`+6<x21-o^UbhdyvpMZ4$+BzhjxYlY;2mO_gz`yhsc$}KulM5AHQ zaz$Tg`SYP%7Dcj9oU~(zChSDQyCE#C9jvi1oSd`)EX7}zLT}K9U~z;NmXX}wY<?$m zLSi#b8uE-Vu9Ox+Y4P~g;xD#Z4@B%hDX=|A@dUF$>U4{p>J-fX&Te%IitL6q$fnYR zq+`9W7p4hH8ifHN8<Vdd;V!b+=-AQ=o@$~wht3)~BE;wblaFzG{Zn!<1#xl25>!Q# z38e})Eo!ZH0xOPKqJAksh5msD#TG}bP~{(@>^l7!FrDqW>N<S`DB6OM;oXN+VrIlR z9GCPfwMemSTKW}?fIbx&Rpq=jl$=)C^fLI^GHRf2*)#$XYnl&)xDppt?q$fyrYQ$F zZa>wf_yJL0vDD7{dhJhvoFy2!cY>0<m2tPY?^j!L0*6qr#liL&dx?`Rp3Am7B`srJ zVmdoyqC9Ci8y)k9G3_6Yt5KZ>QJ>d`jqVa!+6^AF^bTFWROorQ$z)g~Y<W(Y*A+n? zlsHdKyw+JwbleN`$+gT%I0~oW)ZdiRn-{F##0u6!PJ(>g;tXx<fpE(&%2=7|Ad_Vi z?6SrB9GhuvCm!vq77n%Qtv`ljvwymzr2URrS(cV)pg!uQmoV*M^ZQbL*2B@w7cq-8 zSBH$U%@9GArd*uSSFq_fKV&ny#_(g=+OGb-&HLaLsr?hgOV+==7kF6@c}Owr6cYDP zBD_YMX=N+rKQ^N$^7}nxe7CMK$1%mx{#ZedEQTCqH5m8L@HHZT^JkQ38$^lE>|-Ol zh3l3ERL$?UhBqFCY{p(3a?s#!rJq?;kN6QQ6dt@?@QzCkXgoAZM?n|k{@**a3_$}H zC?d3~NaSa!UBm+-o-JJ<GD8s|lSCrbY+H|Xk6)jm#b+$P2;V~18;tBxkKrMk9I&nv zWL%?|Q_=?Y<T<152IVTak74kSWW}-3;j_^i@`h-+38LvoxK4uTeFt6#x&1j}(5VRa zWo*pEs)3YVk1H(XT}4tAZmnd*Mv!jyuY-lfMaUOaSV=I3Xr@w-+uxG=b)}dB#Ivmt z54e^H6fi<fZKb@jX*{x_rt}ClC@!9#&lbe_>*g5PyK&*V&+oE>aelu0o`T{FBZ{Hx zoCr(8nT)+}qrK8<_Iq5QZ~5nx{Uaw#5#`{LEjD0wNx!i0_+<WLHZHzbpX*h$%*0mQ zL4cw{1ld##60P+pmrLBN4`f)V<I>qI>ZYv;=TRY{LF)DcI}zWr<EmX)cOBJrNAnJ& zJs6pp=V~w3HNnO65}bq#@nSaZf_G$7%PuxBp-A`cUu<WBFew}(MJvY{Q|i-uSRVF* zwc3M<sZow8cK=FW-j?pIq-*Q(qI4A_*;dBnb{TjRwMf9qdFCD)pBT>mlNhFZ`2)5k z@de$EznFVZv+<TQM#|<^FI2#howJjr^&B}k8V73;Q=p4be$x_Qo#(_`Oo9kQ(Qzkr z{)9BIiS9GPG3#qQ1o>piS@`b{{-m_Q%8<KSQ}_}G+4SoUcC}|GkKL4HcT^BjjU4&T z4(8J<th1<Cua44I_*%9hK~g5X1$B2CeTDqC2ZEi9L;kow*@Rx+?wyo<Q@OP<N;8<X zS4;;-%t&ahBhB$wuVQcL#lG%k@)!#eUWcI!mCB}L+u5~VU59SHOD(u8SW~a!geUz4 zhXh%jY=Wos{SQ=|+f}-wfEhY$9*;H_ZihS_5y#Lti%S}5@?$RB%com7=@J{?+o#_^ zlBv))k^*V0$UamUj#9M3a7=~cDl`C;bkVSnICA7`pRvz-Ka+kF_cxjkG#6w`dX6nJ zgCs;lA3*aPs28lhDyui3Wj^9jYCOc54NQvCeOb?@C#Ch=jQNdx9|DwnO1d1CO*6L9 z+?TSgkN6VDgz9GOG+b`^%@UUkTdtk-ko}nSy!-cc81K^+IF^d~b9F2?Ib653j?GA3 z(AnmQhNC<G1KW(`MbtJQ0<tN1EA#CWX)fKWT1hBQH0-)VrckyRWYco+t%h}X*tkAn zy6=y$MSXhcw%%d8`t;Y;KVbLz^eH{C1uZbt{(;|Ef~&Ca(zzN#B-jLgf4vS%`6%qX z%CsbihBXky1vmPq$R3CP;Df9B+Zb5cG>dpvT=rGiwKz%}FX5`;vWU~svx+P^lG;dX z`<cYHr+DbHFR?=@T}m?{j(IH5aO<UBYHXAmi&kSrYRq4a6{|5fH8xv~J-$e#h-KQm z(MvZ~#yVoy^sa{iDfOt}9Qa=wz=c6w`Hw8K$*KMx-`%F(<)D?2agg3)D^t6<{_{6> zIDce*vz47l4KGbsN*o)Jq)ghHwgdgSb65)3a9IVF<kOz7sO0J=<W^k#9RtxSTLeX= zs^f4yh3X<@TjE5+9!My9U4{%`1>@cs>&uOH4bxlTX?oquv*~FOffY*TdgVA7M5z_} z?$NXW^dZq~ds;8^)wAkq{1rR5Y|7Y7ld%c&=9ChT+syQRqxhAqYu|9)(<$ufzWzc_ zDiSrP3WsDn!4?RMi7Sm1<V2}C$d)ahluccfIMV4#H&{*Vq$K{(qD#0eg`8zoeV=k} zsYf<j+O>Mtr(X}}8Z~x6iFs_ei6;UW9rV{#Hf3#MbNcPn_5Xr(O;6F4?_@8fXX$FM zu^s7Ar9%dj+HjFjf*r&*#7;^B1C!HWO`Cg-qZ@{?NLVD(+A=OCFADDwZ^F~dIN*)_ z9S;zPu|L3YW{78{S31y6Lt8O!n~>IRH3=$jMUyIeuGCQn&&j5_8{r+`zVFxaYR{4w z6fq1%h{`KVYzKBQ!zX;oA8>8x)n#%F3YV(|YisJPfy5XM268y=VjpA#;py$GjE<$= zBn{>Pm~iC~b3(c<D80zQeVNAjR^?!lnFPpX?Da5{O*tX$!uHzy5xV!}R700*X6dWj zDE9wmZvHn@m2+Q7Jnf00d;^=%KfT*LY^6EaX=B@O6XDuXsj^8x?*;LVh;*!c+Gb-< zvXlM02E=4517Yd7kK86H08tD>kTq@8Ps}u+cYNaph*#)eu}7;hg*xLx1e{cdnf{n^ zW5A_{o!E?h_Q*~50V_7Jg#+UGOty1Cv|o!I&%t-a85<7AdK84ByIIp@J9cNl`0x}A zTUNql!M4C7!#3Cfx63t|7317;D#qDQy89{S*l!ZEWKIbBpov<HN1pOx)PlpQm~Bt} z!z)JOdUhu>PdCVuWoD)DpRls5?z-2q*`h2jR+DAc<z}<%S?l={w&bZ^x|1!e`l-0& zHNRoa_oruyfZMVw&YL#<s9&g9%{J(zpi^XM^fMqRRJ^_y9Yd_}Hx@H6j6c9~2Zje{ zLc$t3f({<m2aii4>-0~7Mhpvb^9O9%z~GR;cE+p^;D4O8K6qGyM@gK$>frtH?D)VW z-7DWSy%3|D+RWmF%pP^`lj@C&(HKm2T-%U+U9B;XD9j;&<c87Ro7BuU3j1`^AG7h< zLGB)~b~(1;Q}HXvO=3&4YxwPK$e>BQ#J(65#(%*s42mor{}Y}%WoO*Rip!`|*(nV$ zmO;nS*^2jxaa*>#_aCfhPG>B~$%MP7AM-0M#d54_?x12&PfGi6cwHe5u&>ZFtW9x% zQ-yw+Lb&0`O2l_q5%ybTxdJ_~{-a-D5(<tP_hb%c^9K(PIdGk-aiTUDc7RsfFbEG< zLPgLA*V*rbeY{5gNc<}nI`T-tNRqy1zB#>2Hb3;c#CspvP01N1xK977A1latPIvY? zJDBsHuGe)oHg}><{EgM*#ta-#uWT1Eot)%~MTo7#7c}i9KGtM%SwjCMv0$Jgfx{#x zIsc3Qh$RjQFTGf&lqDeb>oO=q=yFOrqVn>nOhk99(O4>Ai%O58B%9vp8UUM)zja?e zI8-w~04z}?0cimV+vbfr2(OaFPy7r>9(dc*0T-dvS{JJ$nyZ&a)Uoe}`07&X*!>~F zx_~;?acG9l{uefSXsk~6GkarboKF6QeLXZt*R_`YFf@VRz&hj!{4Z=wULW0LJ64%D z(Pstb9xhl)Owz5N+TEmiti#g<{Htu%)2j^9k2pElp5n2~@!8C8Sm%&k;8>HwjODMR zebPq6DtucON^3wsXfK;EEIE0!DpQgm<$=nqFt>EZtwwaMBW(QRk|3!MxOMoV0Sm!Z z>ZU|+V&<g%?Au}M_zGruCW7TJHG8DO32wVHs)y6bx<+<2a>P{feuKOHI#&72ApSo4 z>6uIxHvA3U`5)Mh;r_b)Kd^@386L;6Q^V88CV#==E`4>51&y%yJ;EyeF@#RI!sMf) z1Ut8HS{%{z!#`GS8L^Q!^n!1!IgB-ztHzA%!R!9H#NHT{7<d@|hthTO8KvEbRrofS z5xwOSyD}<0rAQShNtB)kMg5LJzdPe;sfXI}fnc|HypIx5I$n2?jeIWI?N9g{TGfl# zd(VBqx3bjH;r#b3fAsswA76lzC08U#3qaIy&ZxKf2qsB_Tz=HJWFx2){Vlwk8LjVt z#vN>q*ox<xx_4RcF)>a9kO_GM*-K+)8#a7LCAd+`_kPE2jS0aH=05gkT}uVKJ2sN9 zWPalg1UgnIrr$+2t;JBoQcp@=aK$nK%h^!L{unpHZ58guj4h5}Y+c2MJ-<S?{w%xp z{JPHN^y=1x3?|uh0})&wb%T#$JuWvN!wgTU5yv%QVn4l-Z5w~Y?dAU{g}cipuAD8* z@5sN+R^=}+F<ihUkGNp*X>r@{f`+1&^NdAL$TwMOgVL7-NOw=S=|OU0YbW@J8qUaa zb63~{%nN_%Dw5ITy-%dF*o6uGqn3h^a2e;e3GFG(xWwWTE|VVDf=Wp{bDO2V;KSc$ zBVUMd4yS^ArH74d=?k$5#~Ryfy$Z6z7yW``nUar=)}UHShxCRgVzKPz3!M$q6oxX| z4^FZ0f+Y9O;Iz%|FHW(S3gUImr`Xzp{yM{J?8ky=BRPtC&J_ips4)C<i}@DD`riW2 z`VC(52i;fGU`l2jf%Uw_UMNiEZ?O*wJ9?3jZ3*SXTZ!Gd3RCticC;|dQ^{QCl=AKT zB`pqO(A}FXWMUVO^^0f(ELlRq@3ahVve6T}>kgE$r4u9gn{4~UWZzO0gj;v=YjbJW z7Ziu_R@qyVdgx4LEN)Vw;|<#D=9aN3ld`i<HMFUSZk&hV2^Mz&bg2$6GrSDlX+=pT z@G9S52n(P~s78^_m?hsKUrPhbynw4{o5_5GJ)SfqeiXP?GbhEim)g7>gH(QW_RN&D zO)VTZb21+IbHv}X4?WW3jd@)QTlivX&turg)Kwx~1&I>Wf$B3x8K?m1sLGW-d?MJr z>h~A*{D5~(U{6c9oPmpCx@JxZ>Wz)eXhX~Kn(#9=f9mZZYibi`DH<etH{I*+YqdO& z-6^C-amDq&wy@qW4G2Qpko*%81SH5c&Bgew9trLrXO%C#;3giWMk<rSm+4<#t-D^& zK74sx$1B(t=$L~SnR(cKG!Z9en!J%e!S+v%<ma%a$-VHq<CJN91N+aEY(oPE35q;` zi}(Hw?8=lBo#7YeH#NZTIE@}&U^df3Zsa9Iah+bL*SE6VsR==?hv7A1`yC6ul%UGw zfo{HsEmgT)*)i2YHw5oWQ=jJVGM8zo{53Xk+5p#Am30gA&^(K+oEGX&+nVThEO+wd zQT=N({haXpXK_Ql_YSkhY2D*LMm;o)3xS&T!n~tloR|{y7dE+><LE|0yek}fjA4wt z^)ri|-m6=1yEsMzrLB4f!&lc5AVq5BJ%VbrabC*U^w&ClS&I(g7P{@i07qp^>zz<N zZk|f8&ebHgZmKuSD|X;VSB)$_%=_T_0-I`;a^ee$pvw%{wQ?3WW17buv=eFF)wCCD z+4dP3h9j!FtD$bcTJ~th@BreVg+=RW&x!mTQ7qL?xSjsx4`|w#EuQJ)^TR<^+wn-1 z{8Y)vBUL+Q{>Jyha;It%&eKkscI`Bs4&YGZD1(>lTK)MmNXOS2uTV8X<zyTx(xPfu zW64s(H;|Q&CsgBmpfTUD7iT5vUMgm5XN}N>6|+BQCF_2j&U(D+<FRKt<*jXT#XG%h z`eZsA@oKjT+o+j^5wV=`b^Q9P-QMkjcuJkcO}O6(M}G>ETV~O19WDu9SMuX%FPkhN zGsO$p372t+H|+^MoJ1g<)Rveso&Ef3*PyL%;_iQ&QxWthDD`6%x(KRiutxrPiFKWA z<{Me=>~N<BlqIGGH?rBYUyM#BM^L?`h)LFOI>m9b!l>n0RH5wy$qtYbwEPrvoHNx= zO~t|W>#3CbzLx5IirMB^{7x!)ksAL$Qs<v!{&V}4CLF-RwT-t^m-{(*`U@rs1$VL3 zKe<HeNxjWk@-Vw^-^;*K_mEoP(2_NDkn3;<6k&bfJwFb@^GO&|>XquplPP9+QDy!i z>DDN>up<%2E$m%Ru4!4WS_9qZ``M+r`#XB<r7g?qDoelYd9pXmV4u#rfs@8N^YeYS z?rxKK4OAWHi#Z{^%G?+9(s3`drxrZp^XW^dwIZvDtaV8rp@2TSnSPFKUl7>kMX0u3 zy88wE<<Ia-Oln@E9H3y99gqt<_Fz>$n&bn5CMy~6*}@h`3u11QY$^i_k42wIBU$%_ zar{}9zi^uF{O47T3u}4ZtzA{`l#S>40@k{ytG9U~X%3C(iAh_aRBdBPuZ_`N-_BOP z7GXHP<4G4svf9_C^QTxZ>zZx_Rj3Sx%w?Zq7Yh-+r~keR#}JFR`hK$C#Bu2vHbE!# zsCsP8x9b+QwcSQmeTv-Jc9i9xBOby4T)=}4?pR<Ca4T3do{6tFmA;3z(GfB;LdX!M z9Pv($*eqqjNZ|aL9OP9^BiCvyKL_=~c(B>D^1d2JTzc9zE@oJl*rHxWg0ih09SedI z=28M}bfqM1GbJe!C$W+aD(_0`NKh<=z<5=FYZv%f5hzmxkls&CuZEqaFGg{$QquF= z(nFA*L!ZXz%aK4D3qyT#RB_pq)FyYIhzk8E2(@@%k`4tCN8f+wp8|zD|ACYpEB=L? zl&cmPVbq<fWHxl86!Erf>N=LKt{CJ7H%Ul2Wl5Ax=f|+y6{a?Zmu&iC4D(#-<p>Gc zv}O$JvUG&O{*xyIyn@-5X6ruN%q}b)>^=dzOq=6#a}$dbXX`F+VxNj(W->ORc>4lY z7zR<yOWLrheQ@DPiQVC=2EH+eAH`O^@g9CB*dlfFpJStKPw5h$W9w~jLr1a1Wqkov zmSwIyuD;5aC-WoN{^g(gj-Xj?G2vt0{7l@0;LBVxClf|gEm%=wpS@mr-<n%SlYKBg zi7_yhQ>1b{z^PC<&#IhJ;C!ueGF47KIA5%16)U4lRlc*zH-q0t<=ih-%8v#oR^?n* zIXI;lqEybeDklk?K$UYy<)nktLFH^$IRZFNDrc?ADFVl5eW}tJQDqf_<)#YFQ8}~0 z(W#sl6^>X&b3MiI*M~~RVJdTp%9K=Qn#x?EGB2ylXkx;akHPi3c5jVSs)21^<!-2Z z@5x9%!;Y=$$}g+Bzv_Y=zmzqu7J{d|Bg<J>niW~==+lC((iku_XGf}Jz1GC=&#?(> zyiIGczopb#qH&6{gy$4g<6L8TjxAjihV*S~65KXcw&#muH`dG!S?9*%kY{YcXQIc1 zf<t(Ism?B_7VPSuGVWgNT+OWS{ll+e3)c?eS8q76mgnECI{E$`o_~WKTIU6~|J=I% zjv`Iv&6zCtLqC;Xl3CUF!)l&?x=LPOs568vYp?S&8!P^(hwi$KefUvGm&6Rr-pq(3 zvVB+?+xjV!jvU<IY;bU~n`;hk!ZtX#K8*jkxAPRFSm7OIvW$;S-q<2%PWvBD?dMzJ z)cUZpk304L{tc?v8Dk*b0f}6K>e=Q5VgPXl36w6Wd}-Gc(SGcQj|cDzSlFg+okxHj znk02sQWYcnrHF#+<>o^;Ip&D?eDY^_KOcX}W^XDqeM(-eeN)E0Oh>0O<KikED$Zno zY%)<w@^jvUM7O4F(amUlyGAw*=*I#!`{~U6Sg*}-{1#TU`JnDr8jJoUFz^#<og3bm z@o6&MXXR5Se3mDhdZn@aPm*w5_wFa5x=X3-z$aVz&FrNuQG6e^Vv7&xO<P{)H!;tx zVJ-twNSnsRJ!F=))t{Aox<*aVP3XgVZat~<?8Efiy2gE^R0ivhugs}NoLJnPGP~5` z^XOz{I9%;45kh9l<I=q?PO>R5iM_C`r~l3JcGc?sL&v!0y?@Ja8&C9RJGTYu>XO*0 zZKmWVNHxz!0+o(0Mh%^m+UyT0DoVGwW|S@9{Eds_AtKFGip0op(h?T*+4%TqFleN? z_PYLcoOyNQ|Km7QypetOS!%D<i`$#F9VALn2i?URJ8KR6;dfQ8^vV;#$5v+E9?CCg z{kIP{-?Fw9(9$1HclQ)S(FRl&B^1zy0!16x?(HFN+uMbtRQ4nCmN2^=QBJR8R@fqM zuCI#Q@ip&0Q}v;b@<OtqaTR;K^BTTa`gvCxU&unM2J>-MQ>(C)rnCGna)K@_Xj|cX z)fHX`hPJ|ov!h>>=oSuU>AS;q&ke4cxI35U%UIQ(T>RMyzjp-BmsXA3m!J!P$D3`f zb)pYY^D>MF9|;BbU^1#@Q)mqP=*vEiNS94bo!Kv620IoL{blDW*8_$22Gh*;%GS?d z^Q-%GdkbA2FBJTZ*Ml*j1@XT1S6l@>##NAvk5V%KB4^K0E=jSb`4_UEtD}O)M3ERJ zR0QI6uf<1tX-1nSrG>21q27i!QRquT0lt0cR`n*Ed?*SY&|8P1&08XstdCHpn;l(B z_(*$RY0um63cGYDBKl)+Xg(}#Pie`;_{60cT485rb`}awV~Vw|U{T+MnZ|0uiS5GD zp<*`fn|OW&Tk%bh?&?go{TrXorQsZR|3bi-@PFjY1h;*3e<D?()%cXkI9PQ11rEBO z2c7`J*{yFnBdc#spuH3Fu*M>9mQ!O29zep?_$H{fc{h&<c3kp<D)Aiy%)$rDS!qq~ zGu04TVV&p3m4xDn$V2=3;Ds<+T_vuWwjw^0KD=(RlLu%mScXPt;a#+l<YiMQh~683 zhi^AE9umev4*T-USkmEs_&xh@PuFD__zL?*tNyuVZ13S0`Kio&qz@m%#vJkUwm~@y zU?VYv+KF+MjV(FSE9|pSr3pBgejb9RU!cb-97RpiiK)uX%*Rt%(~*vDHpDbD>d)+I zvtVViYXh(YOsoyn-7>Q!wd38^PHCS%G3;(_Un6E(ruZns_;#I;((|)dSbANtCwM~n zJ!3hH049pw^a?vv=j{uwByJ=v&Y2_a-=Mq|7EA9=W;g57dSa52zlLYuXhH0`Rf*m! zF{@vkUf&MOQlVd^3B9ihy<W`5*H7e&*^&BU|LJrzbfQt611Fk_n%6P|B~?ymLyx|K zrFZCPs;*N7laGcO#sn$qaEG}dr~OzG-+{e&?CPNT97oS+Xvk&3n0Z0@d^3kWDvngP z5#<Zc>B)L8$kEXUGhmw1i7tZmLDQ_Sh~o&3bnivB;&?#J#TUsQ;*-~UKgHEwggD-Y z&q>v73ES0#B6jXLU0k>|?Ca)>etb=Tl4cu;+kJeajd!63VA5q1W>K8<{v;XzH|i;4 zcN(5cd=3n2lD@=M>Y*eVs0X+iz_TCzmV_ZDo6=B1j(7wG(gV~`Usie|rVDuYF9-p$ zaQ~_xpAul_N+PA&iBvq5AzcQkw0}!ScJ@RPU(O6CPxx;sY@YzVLGGo^vc*Nz-<iYy zJUQGw6}&b_w!VNpdrC0;Qb0Lklp+h*r>A0FKLN)!J`8t)z1c6PX6O$1u+fcyyotTq z7;gCO1(bk=pnjFQsj;sY4p@Tq@5d!>QnrpXVfgq(*4h}&`>@c{{rCtr;dBu1z)DZ| zH+FmuO>V#3c4m#IXZe-=_q%Q5L^l4+M<Hf7qtL4!e<IT2ONV~*zg`=qXIc2SaZ~EM zD^HO7(WdlFxG`q@(T^7zf@kN6)U?Awjq+(fS{9@*@Bsq;)Zy>PI1HP-Iw&I1HFD%^ zkaFZ)<@aLz#<v>{>G)9ThME4PNX*TXz8S@yKI;`@{R)l7By&}#|CR}KhX#Lg9NybW zK30{_mlm@}-}(i<JnFxs?{h>6(ij$hHc1yYfr`ZyhvDUCX*zuF$(EiC)cukVwzs1) z*vIo(#(6(SWnO(sd@pwI>|otsRYYOO=CcXsO>E}5FlTtu_#EpP{vzT1b78uM@$8#( zaqy)6JQu4g8c&5fC<Zo{rJoPsyRrQ9>q?iHX=!oXDh)SlxLd<U4S&$^wubhhYCeAr zducdK!y*kAYPed%9U2~1v6MTjF|KKNPeX?=wSr&`V>Ns)O69+yVWGyKt>F?4-_~%Q zhPyO8tl@bDp|RgIh6BE)r=JK7lQhiHaFT{|HC&?MIt}+~*refY4W01y8vXcc7^Pt% zA?oM)YK$Qo7HU|mA^w6?Dc~&)*J=2fhWj-<tYM>uKWRvR>p(w`HT1wALeNhzkm~1R zHO3$f3pJdp;R+46Xt-a)Mh$<{@Q#L@HZ*P;25Q({Lww~y)pEn4c(^S1DbyGXHT+CN zB@w@J*v?Cxg0A)C(kY|2pyGvRRg9Pid^6O`F#89Uakv){e_0&j)AonSuuohrf*D`t znp>*|T^{0GYJR9n)9yq+)eqD-omc25UyIX{p&t(|j!i)M+3}Am@2SP}v^Xq?`lH2R zl9Zoyf2;Da@AOm3#cB-fHp)*^t195H#f!CgfEMSpIJxNbv*W%hAFM{XA}t=G#n;_a z)6H5uR*Q!!@n>K)|5OELYXW91K1YjlT70e+Z@R0>Kh)wEw79Ir_f|Du@7JM9Xm!(h zelqi~k-fUJ>|HOsylYJ76*Kj0;y>d{J-wVbuR=G@E76tn8fI)XRJ#^mNx+2}N{C0l zMj-6?K`m}H=xP6o^A0-BAzl~D8|^t`q624)cP;h|bm0Q=H)(;1?p)vu-`hS--i;lq zJ(MzOs@Opno@S0ark3f<@h&>fCEg*Hje20#IX%zjK8R=O4}E>95tO00Gd4JcTUsM3 ztL^!YrA-|hP1T-Cy^cz~ZQ?h(VzWkYx~jIB_jcmE3lWApa^7g8cf3cjr#FyziQpmL zme&*8ml>fwuMvGJMEP3Zl>QFW<$3zC-4A@8YEx5W4~{#45T;dm4D{81#s6C~zmDa& zqX<q{+4~QDeA-&DDGp03p5xL`R8BlQ_HbB-wxr+saGXmj$Bntlx<3jIb9$cRn)FbS z(mxmU&l&wepN4r9^Y%Pv{{kEKsDH1v++8v_?j?j-S{t27q(8^Cr8gt(CISzw-OOOe z9{HEHB`q3&2dqqvdt1xu>0-~hKqDBm_L=f*7FH+1E={Hc^m_<r+VXhWV}?0$Uh&5E zJP)7ZxXuHg<VgWN13}Bf3Lg87ZEM3^d~}hXji&v-G=I+^>>3CjO)C0zbL9dGpXLIF z=WzjvL%9Hc5U0*Vs2rM0$Mj}59{Wa7q*#wBsdUP*y;G!z8Bb%35aOFyjO?!`(Il4k zFd(O`RTpq2xEOvyJZhyMZ9U|9YU`p+Zop`cdk!JDNfG1t4)$D!L<861E!RBOAp2Fm zi7AcSbhK%+9O>ih!ueuF`r?lweG@?sQ(C^#m5*zm4DU_lxRVG)O)H1+z1rDpXK>sp z1h7}}!C_RtvW_hnc!Wh-{eREoxaElNY+8Aq@6*20hQg}MM40@e+RVyPJf9QhsVo9# zECMIaI7~GTq@Cx>F;oa+u2%lQ^D*sh_}hm6L;{DehNen?9Up}yUD;p5mvwwEim%u4 z@s&I5c>kC}E`%S=dE9nya%*&{b}DwvcgRymh(0Ri|25NA7W3ZibyV!+xUUe3nks|s z_%H_=8gszPX}nLn$ccUM-w+@&4K-1<OYQimV82*bE+p||)cf9lskibcJHC7S%-?l_ z<Gw{OX$tZ5bK(3f?wnttf%8jr<NRirn!Fl4tK0N^`XtA-BE)L)vqAeca$H_peoqI^ z6C;Yh8-<m1$#0t<n;_GIpeepH$I}!@^q`@?-J!{&(Y@NO*frlJk5|UR6=UJT8E3c_ zlP!FyGFi_D1d<A3F^80_g~$$j;EN&ijrVS>EY|bBrOH&t$nha=T!_V!3n}z~CX8Ij z6DytP7jNLqmRC7*;VjNPyo56+S~xRb%=szyRvj7o!*9jHz<gW?LV?;(U-H^+a~z)I z+F0Bu@YdZ?qLq#gyyu&-_W$eO{;z-g{{#Pa&rv7-Ti!W~f5tKc|FZWScj0^L*}C(y zc6;z2I_A8kI$kUOzd$&~6ZQ=8<$toT%nRauvom#kI<28}4O2Bt(lA!TXbsI8dT7XL z_-EzLAbx1)!@OD?o*$HA4`_&wKb80n4cAqM2J;;!oYkWEyKE(SzJ?YJi!{vFFi*pD z4U;rX)G%5@4-L7>MX087b1)yxH&z}E=0p76;nb?iG%V6^RHbtWKi*T4Rb@A6*jV{W z2;Xt)J6hCNlU}05XKPrbVV;KR8b)jAp`pEo4<D=5NE%jaxS6oh{4yVi;U8K#%FJKr z)?}x)xY$X>+s-P+rfKnhDqhnAF}^jG<}g0pxh>mIdtM6TJMqq=+*C24^4)NLdZ|6k zX@du@$UU)#;`{SNV3ZeJLGYu2p1838Gmz)Jfu#uZL6-qHBCLQ6;VRrS#e#kZxELoJ zU(kdTacC(5T?CwgK=Lbq+lj^@Y)1eNKsI>yZzHk-_%lKyXnR}&rXdiW4t!Rli-A=d z{W<VggbR?7fT6)0ch;ZhB7om&^jTnmnd7cO<~`sy2vp`_ARmfDq*o~_1CfP52?DTL zqi+CpxMddb;jTUKJp}3j;jjo*Tg5=SaSz1bla2*$LdXQY19%1@5A<1}D^3%mK)V5} z5Q;&64m@3oUt{qH$!CFmVyHztmkNA<K$SfN?!wsu&wSkHK(}si{HY+|L4+pI)xbX8 zF-(*fcprg=vK9Cb4h4yz9|C8^sv4RN`~krNerXekIdK2qL1aE~8G<rSz)NuEs=+6x z;bKpYs|StqH;1FOszzW70?FS14(+Y-^MH#q`Zn-H9}EHVHUc~0Ji(#EvA}t082@NQ z=BIJoHUuhY2QZ{B)DFHG*cX8`v;?>rfn*5lHJZ@9pIWgQn2AsX`8+~|V$j9FR}d_q z%YcgzW`kbbuM`eOI%YK@gv${~rJI4vGEf=#D}ZMyU~&=OM>qg}E6@aogc>9O=Vsz; z1OD5<Is}?eXMukskjz8i&MXX}4~7z$^c2c0MWIBDMW76X%W)z}htLY3Cr%;6$8`rc zQlm!!U&uypAX5mOhd>p~2Y!w~4JEvx(S%`xRQ?X&9|#+f*E|?YdO<G66Ol4t4FVN( z80bGlr2~Px5vb6;z|5hlh6od%#w{lD65c{^!hh9s8(20BdGY@{5dJls<GO<13OxTT zh7~mKY|tB}d|ZHx;<!(qQ)lc}pxtP-$M(QE2xZ7HANcebtOC%(fqlku+*Z)^-N)i_ z=m2QK^$0XqHUb@=SIZ$EG!lV277b(?|83wO&tv?rz@o*D=eUIkO(>`gXqbSWfi?qO zUO;Q{?Y$c?sz9wc8dy@uarWTP1|FNJcAx?H`xMaN-v+u)gO(^SFdl&#kqG<}ftKij zQV`2t!Q~f(Rsf@Bs0q<P$C>Co_)fq@2$W$lu#p1h1mOV-8U&eo;O!EOIcUPgv((Dg z0VlqS4#0>N0oUT1hqSd07&!;yPqRH5#5DvOW5P{yajZp#&A_V&R9O=+be>u$?%KFi zjV5$kpw0=xas(=832^U1br}=BUZxtcCBTJ?F#Az{*&?ie%WJBMCG2G7xO^nU0zX5T z0(u89VX;al0{3b30bpjiy4eukLRbv>+rW2h&<be6mzSZ(pr-)eN1*x90CZbUoq_!~ zgBbo6#v4L}HUt`{9l%ixY6YJ#e<e&TXu|6Vq!kIcXO&9t1-2qkUcztQg%+WS!@$w2 z(PPkKf&15>!=MiU`_PwN#fYQ=W7c83L3ahdgFt%$;rb6%{zjmFy;_->5Mc@An}C}( zpkmO~z><x~4|+E6AObbC8tC~EHZkzMfuA7IHeI?E#JnB2a6`g;pyy6?{Jeqp5t_hn z1&-PWm0?X2_TG>E9&{2=M4$@Z0Jb1d?{5%&0IG(J{a0AV2VrhOF9se(AluLY^!gg( zPZ960Ic_il6_f*9i$E=12kcX=wlo#E>Nt7^`FDU>4eF#5fOilMfd413=!Dt`Lc>X1 z3V@#kOmF14hoA{Np2h^mviCh*3KbzjOQIS$?;MsQ66OQ%BapY_b{?w$ftFtraME`w zT?FJVa@-q`Av}q&8FV9%{wqAv1a}E#Uxs$UC%k|_6Ob_O3U=30BqV~U`5rqO2?3LT zP>oV5@KuBx;Nw=CTaBPhOd#E5&PJmN>6(&gLb^xniiQ%><sk71>2{C!gkv;5VJY3t zkq{wWw-HT9*J?x)p3rEzCZp>x;uG36sk8?$UZV+fHJY$UqX`#jG~p*IE#O9oE^gF< z2u(k#EhUUapvnjbYjhrvZY_vUxLBpD-aMbnZ*q#|hjpl5-?;w5`lj`*>$wd@8;Un9 S*;?uQ8XvPK^ELi}-TwiK!oS`C diff --git a/pipenv/patched/notpip/_vendor/distlib/wheel.py b/pipenv/patched/notpip/_vendor/distlib/wheel.py index b04bfaefe9..0c8efad9ae 100644 --- a/pipenv/patched/notpip/_vendor/distlib/wheel.py +++ b/pipenv/patched/notpip/_vendor/distlib/wheel.py @@ -433,6 +433,22 @@ def build(self, paths, tags=None, wheel_version=None): self.build_zip(pathname, archive_paths) return pathname + def skip_entry(self, arcname): + """ + Determine whether an archive entry should be skipped when verifying + or installing. + """ + # The signature file won't be in RECORD, + # and we don't currently don't do anything with it + # We also skip directories, as they won't be in RECORD + # either. See: + # + # https://github.com/pypa/wheel/issues/294 + # https://github.com/pypa/wheel/issues/287 + # https://github.com/pypa/wheel/pull/289 + # + return arcname.endswith(('/', '/RECORD.jws')) + def install(self, paths, maker, **kwargs): """ Install a wheel to the specified paths. If kwarg ``warner`` is @@ -514,9 +530,7 @@ def install(self, paths, maker, **kwargs): u_arcname = arcname else: u_arcname = arcname.decode('utf-8') - # The signature file won't be in RECORD, - # and we don't currently don't do anything with it - if u_arcname.endswith('/RECORD.jws'): + if self.skip_entry(u_arcname): continue row = records[u_arcname] if row[2] and str(zinfo.file_size) != row[2]: @@ -786,13 +800,15 @@ def verify(self): u_arcname = arcname else: u_arcname = arcname.decode('utf-8') - if '..' in u_arcname: + # See issue #115: some wheels have .. in their entries, but + # in the filename ... e.g. __main__..py ! So the check is + # updated to look for .. in the directory portions + p = u_arcname.split('/') + if '..' in p: raise DistlibException('invalid entry in ' 'wheel: %r' % u_arcname) - # The signature file won't be in RECORD, - # and we don't currently don't do anything with it - if u_arcname.endswith('/RECORD.jws'): + if self.skip_entry(u_arcname): continue row = records[u_arcname] if row[2] and str(zinfo.file_size) != row[2]: diff --git a/pipenv/patched/notpip/_vendor/distro.py b/pipenv/patched/notpip/_vendor/distro.py index aa4defc3bd..33061633ef 100644 --- a/pipenv/patched/notpip/_vendor/distro.py +++ b/pipenv/patched/notpip/_vendor/distro.py @@ -17,12 +17,12 @@ information about the Linux distribution it runs on, such as a reliable machine-readable distro ID, or version information. -It is a renewed alternative implementation for Python's original +It is the recommended replacement for Python's original :py:func:`platform.linux_distribution` function, but it provides much more functionality. An alternative implementation became necessary because Python -3.5 deprecated this function, and Python 3.7 is expected to remove it -altogether. Its predecessor function :py:func:`platform.dist` was already -deprecated since Python 2.6 and is also expected to be removed in Python 3.7. +3.5 deprecated this function, and Python 3.8 will remove it altogether. +Its predecessor function :py:func:`platform.dist` was already +deprecated since Python 2.6 and will also be removed in Python 3.8. Still, there are many cases in which access to OS distribution information is needed. See `Python issue 1322 <https://bugs.python.org/issue1322>`_ for more information. @@ -48,7 +48,9 @@ #: with blanks translated to underscores. #: #: * Value: Normalized value. -NORMALIZED_OS_ID = {} +NORMALIZED_OS_ID = { + 'ol': 'oracle', # Oracle Enterprise Linux +} #: Translation table for normalizing the "Distributor ID" attribute returned by #: the lsb_release command, for use by the :func:`distro.id` method. @@ -812,10 +814,14 @@ def codename(self): For details, see :func:`distro.codename`. """ - return self.os_release_attr('codename') \ - or self.lsb_release_attr('codename') \ - or self.distro_release_attr('codename') \ - or '' + try: + # Handle os_release specially since distros might purposefully set + # this to empty string to have no codename + return self._os_release_info['codename'] + except KeyError: + return self.lsb_release_attr('codename') \ + or self.distro_release_attr('codename') \ + or '' def info(self, pretty=False, best=False): """ @@ -872,6 +878,7 @@ def uname_info(self): For details, see :func:`distro.uname_info`. """ + return self._uname_info def os_release_attr(self, attribute): """ @@ -963,23 +970,30 @@ def _parse_os_release_content(lines): if isinstance(v, bytes): v = v.decode('utf-8') props[k.lower()] = v - if k == 'VERSION': - # this handles cases in which the codename is in - # the `(CODENAME)` (rhel, centos, fedora) format - # or in the `, CODENAME` format (Ubuntu). - codename = re.search(r'(\(\D+\))|,(\s+)?\D+', v) - if codename: - codename = codename.group() - codename = codename.strip('()') - codename = codename.strip(',') - codename = codename.strip() - # codename appears within paranthese. - props['codename'] = codename - else: - props['codename'] = '' else: # Ignore any tokens that are not variable assignments pass + + if 'version_codename' in props: + # os-release added a version_codename field. Use that in + # preference to anything else Note that some distros purposefully + # do not have code names. They should be setting + # version_codename="" + props['codename'] = props['version_codename'] + elif 'ubuntu_codename' in props: + # Same as above but a non-standard field name used on older Ubuntus + props['codename'] = props['ubuntu_codename'] + elif 'version' in props: + # If there is no version_codename, parse it from the version + codename = re.search(r'(\(\D+\))|,(\s+)?\D+', props['version']) + if codename: + codename = codename.group() + codename = codename.strip('()') + codename = codename.strip(',') + codename = codename.strip() + # codename appears within paranthese. + props['codename'] = codename + return props @cached_property @@ -1072,7 +1086,10 @@ def _distro_release_info(self): # file), because we want to use what was specified as best as # possible. match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename) - if match: + if 'name' in distro_info \ + and 'cloudlinux' in distro_info['name'].lower(): + distro_info['id'] = 'cloudlinux' + elif match: distro_info['id'] = match.group(1) return distro_info else: @@ -1113,6 +1130,8 @@ def _distro_release_info(self): # The name is always present if the pattern matches self.distro_release_file = filepath distro_info['id'] = match.group(1) + if 'cloudlinux' in distro_info['name'].lower(): + distro_info['id'] = 'cloudlinux' return distro_info return {} diff --git a/pipenv/patched/notpip/_vendor/html5lib/_trie/_base.py b/pipenv/patched/notpip/_vendor/html5lib/_trie/_base.py index a1158bbbfa..6b71975f08 100644 --- a/pipenv/patched/notpip/_vendor/html5lib/_trie/_base.py +++ b/pipenv/patched/notpip/_vendor/html5lib/_trie/_base.py @@ -1,6 +1,9 @@ from __future__ import absolute_import, division, unicode_literals -from collections import Mapping +try: + from collections.abc import Mapping +except ImportError: # Python 2.7 + from collections import Mapping class Trie(Mapping): diff --git a/pipenv/patched/notpip/_vendor/html5lib/treebuilders/dom.py b/pipenv/patched/notpip/_vendor/html5lib/treebuilders/dom.py index dcfac220bf..d8b5300465 100644 --- a/pipenv/patched/notpip/_vendor/html5lib/treebuilders/dom.py +++ b/pipenv/patched/notpip/_vendor/html5lib/treebuilders/dom.py @@ -1,7 +1,10 @@ from __future__ import absolute_import, division, unicode_literals -from collections import MutableMapping +try: + from collections.abc import MutableMapping +except ImportError: # Python 2.7 + from collections import MutableMapping from xml.dom import minidom, Node import weakref diff --git a/pipenv/patched/notpip/_vendor/lockfile/LICENSE b/pipenv/patched/notpip/_vendor/lockfile/LICENSE deleted file mode 100644 index 610c0793f7..0000000000 --- a/pipenv/patched/notpip/_vendor/lockfile/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -This is the MIT license: http://www.opensource.org/licenses/mit-license.php - -Copyright (c) 2007 Skip Montanaro. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. diff --git a/pipenv/patched/notpip/_vendor/lockfile/__init__.py b/pipenv/patched/notpip/_vendor/lockfile/__init__.py deleted file mode 100644 index a6f44a55c6..0000000000 --- a/pipenv/patched/notpip/_vendor/lockfile/__init__.py +++ /dev/null @@ -1,347 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -lockfile.py - Platform-independent advisory file locks. - -Requires Python 2.5 unless you apply 2.4.diff -Locking is done on a per-thread basis instead of a per-process basis. - -Usage: - ->>> lock = LockFile('somefile') ->>> try: -... lock.acquire() -... except AlreadyLocked: -... print 'somefile', 'is locked already.' -... except LockFailed: -... print 'somefile', 'can\\'t be locked.' -... else: -... print 'got lock' -got lock ->>> print lock.is_locked() -True ->>> lock.release() - ->>> lock = LockFile('somefile') ->>> print lock.is_locked() -False ->>> with lock: -... print lock.is_locked() -True ->>> print lock.is_locked() -False - ->>> lock = LockFile('somefile') ->>> # It is okay to lock twice from the same thread... ->>> with lock: -... lock.acquire() -... ->>> # Though no counter is kept, so you can't unlock multiple times... ->>> print lock.is_locked() -False - -Exceptions: - - Error - base class for other exceptions - LockError - base class for all locking exceptions - AlreadyLocked - Another thread or process already holds the lock - LockFailed - Lock failed for some other reason - UnlockError - base class for all unlocking exceptions - AlreadyUnlocked - File was not locked. - NotMyLock - File was locked but not by the current thread/process -""" - -from __future__ import absolute_import - -import functools -import os -import socket -import threading -import warnings - -# Work with PEP8 and non-PEP8 versions of threading module. -if not hasattr(threading, "current_thread"): - threading.current_thread = threading.currentThread -if not hasattr(threading.Thread, "get_name"): - threading.Thread.get_name = threading.Thread.getName - -__all__ = ['Error', 'LockError', 'LockTimeout', 'AlreadyLocked', - 'LockFailed', 'UnlockError', 'NotLocked', 'NotMyLock', - 'LinkFileLock', 'MkdirFileLock', 'SQLiteFileLock', - 'LockBase', 'locked'] - - -class Error(Exception): - """ - Base class for other exceptions. - - >>> try: - ... raise Error - ... except Exception: - ... pass - """ - pass - - -class LockError(Error): - """ - Base class for error arising from attempts to acquire the lock. - - >>> try: - ... raise LockError - ... except Error: - ... pass - """ - pass - - -class LockTimeout(LockError): - """Raised when lock creation fails within a user-defined period of time. - - >>> try: - ... raise LockTimeout - ... except LockError: - ... pass - """ - pass - - -class AlreadyLocked(LockError): - """Some other thread/process is locking the file. - - >>> try: - ... raise AlreadyLocked - ... except LockError: - ... pass - """ - pass - - -class LockFailed(LockError): - """Lock file creation failed for some other reason. - - >>> try: - ... raise LockFailed - ... except LockError: - ... pass - """ - pass - - -class UnlockError(Error): - """ - Base class for errors arising from attempts to release the lock. - - >>> try: - ... raise UnlockError - ... except Error: - ... pass - """ - pass - - -class NotLocked(UnlockError): - """Raised when an attempt is made to unlock an unlocked file. - - >>> try: - ... raise NotLocked - ... except UnlockError: - ... pass - """ - pass - - -class NotMyLock(UnlockError): - """Raised when an attempt is made to unlock a file someone else locked. - - >>> try: - ... raise NotMyLock - ... except UnlockError: - ... pass - """ - pass - - -class _SharedBase(object): - def __init__(self, path): - self.path = path - - def acquire(self, timeout=None): - """ - Acquire the lock. - - * If timeout is omitted (or None), wait forever trying to lock the - file. - - * If timeout > 0, try to acquire the lock for that many seconds. If - the lock period expires and the file is still locked, raise - LockTimeout. - - * If timeout <= 0, raise AlreadyLocked immediately if the file is - already locked. - """ - raise NotImplemented("implement in subclass") - - def release(self): - """ - Release the lock. - - If the file is not locked, raise NotLocked. - """ - raise NotImplemented("implement in subclass") - - def __enter__(self): - """ - Context manager support. - """ - self.acquire() - return self - - def __exit__(self, *_exc): - """ - Context manager support. - """ - self.release() - - def __repr__(self): - return "<%s: %r>" % (self.__class__.__name__, self.path) - - -class LockBase(_SharedBase): - """Base class for platform-specific lock classes.""" - def __init__(self, path, threaded=True, timeout=None): - """ - >>> lock = LockBase('somefile') - >>> lock = LockBase('somefile', threaded=False) - """ - super(LockBase, self).__init__(path) - self.lock_file = os.path.abspath(path) + ".lock" - self.hostname = socket.gethostname() - self.pid = os.getpid() - if threaded: - t = threading.current_thread() - # Thread objects in Python 2.4 and earlier do not have ident - # attrs. Worm around that. - ident = getattr(t, "ident", hash(t)) - self.tname = "-%x" % (ident & 0xffffffff) - else: - self.tname = "" - dirname = os.path.dirname(self.lock_file) - - # unique name is mostly about the current process, but must - # also contain the path -- otherwise, two adjacent locked - # files conflict (one file gets locked, creating lock-file and - # unique file, the other one gets locked, creating lock-file - # and overwriting the already existing lock-file, then one - # gets unlocked, deleting both lock-file and unique file, - # finally the last lock errors out upon releasing. - self.unique_name = os.path.join(dirname, - "%s%s.%s%s" % (self.hostname, - self.tname, - self.pid, - hash(self.path))) - self.timeout = timeout - - def is_locked(self): - """ - Tell whether or not the file is locked. - """ - raise NotImplemented("implement in subclass") - - def i_am_locking(self): - """ - Return True if this object is locking the file. - """ - raise NotImplemented("implement in subclass") - - def break_lock(self): - """ - Remove a lock. Useful if a locking thread failed to unlock. - """ - raise NotImplemented("implement in subclass") - - def __repr__(self): - return "<%s: %r -- %r>" % (self.__class__.__name__, self.unique_name, - self.path) - - -def _fl_helper(cls, mod, *args, **kwds): - warnings.warn("Import from %s module instead of lockfile package" % mod, - DeprecationWarning, stacklevel=2) - # This is a bit funky, but it's only for awhile. The way the unit tests - # are constructed this function winds up as an unbound method, so it - # actually takes three args, not two. We want to toss out self. - if not isinstance(args[0], str): - # We are testing, avoid the first arg - args = args[1:] - if len(args) == 1 and not kwds: - kwds["threaded"] = True - return cls(*args, **kwds) - - -def LinkFileLock(*args, **kwds): - """Factory function provided for backwards compatibility. - - Do not use in new code. Instead, import LinkLockFile from the - lockfile.linklockfile module. - """ - from . import linklockfile - return _fl_helper(linklockfile.LinkLockFile, "lockfile.linklockfile", - *args, **kwds) - - -def MkdirFileLock(*args, **kwds): - """Factory function provided for backwards compatibility. - - Do not use in new code. Instead, import MkdirLockFile from the - lockfile.mkdirlockfile module. - """ - from . import mkdirlockfile - return _fl_helper(mkdirlockfile.MkdirLockFile, "lockfile.mkdirlockfile", - *args, **kwds) - - -def SQLiteFileLock(*args, **kwds): - """Factory function provided for backwards compatibility. - - Do not use in new code. Instead, import SQLiteLockFile from the - lockfile.mkdirlockfile module. - """ - from . import sqlitelockfile - return _fl_helper(sqlitelockfile.SQLiteLockFile, "lockfile.sqlitelockfile", - *args, **kwds) - - -def locked(path, timeout=None): - """Decorator which enables locks for decorated function. - - Arguments: - - path: path for lockfile. - - timeout (optional): Timeout for acquiring lock. - - Usage: - @locked('/var/run/myname', timeout=0) - def myname(...): - ... - """ - def decor(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - lock = FileLock(path, timeout=timeout) - lock.acquire() - try: - return func(*args, **kwargs) - finally: - lock.release() - return wrapper - return decor - - -if hasattr(os, "link"): - from . import linklockfile as _llf - LockFile = _llf.LinkLockFile -else: - from . import mkdirlockfile as _mlf - LockFile = _mlf.MkdirLockFile - -FileLock = LockFile diff --git a/pipenv/patched/notpip/_vendor/lockfile/linklockfile.py b/pipenv/patched/notpip/_vendor/lockfile/linklockfile.py deleted file mode 100644 index 2ca9be0423..0000000000 --- a/pipenv/patched/notpip/_vendor/lockfile/linklockfile.py +++ /dev/null @@ -1,73 +0,0 @@ -from __future__ import absolute_import - -import time -import os - -from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout, - AlreadyLocked) - - -class LinkLockFile(LockBase): - """Lock access to a file using atomic property of link(2). - - >>> lock = LinkLockFile('somefile') - >>> lock = LinkLockFile('somefile', threaded=False) - """ - - def acquire(self, timeout=None): - try: - open(self.unique_name, "wb").close() - except IOError: - raise LockFailed("failed to create %s" % self.unique_name) - - timeout = timeout if timeout is not None else self.timeout - end_time = time.time() - if timeout is not None and timeout > 0: - end_time += timeout - - while True: - # Try and create a hard link to it. - try: - os.link(self.unique_name, self.lock_file) - except OSError: - # Link creation failed. Maybe we've double-locked? - nlinks = os.stat(self.unique_name).st_nlink - if nlinks == 2: - # The original link plus the one I created == 2. We're - # good to go. - return - else: - # Otherwise the lock creation failed. - if timeout is not None and time.time() > end_time: - os.unlink(self.unique_name) - if timeout > 0: - raise LockTimeout("Timeout waiting to acquire" - " lock for %s" % - self.path) - else: - raise AlreadyLocked("%s is already locked" % - self.path) - time.sleep(timeout is not None and timeout / 10 or 0.1) - else: - # Link creation succeeded. We're good to go. - return - - def release(self): - if not self.is_locked(): - raise NotLocked("%s is not locked" % self.path) - elif not os.path.exists(self.unique_name): - raise NotMyLock("%s is locked, but not by me" % self.path) - os.unlink(self.unique_name) - os.unlink(self.lock_file) - - def is_locked(self): - return os.path.exists(self.lock_file) - - def i_am_locking(self): - return (self.is_locked() and - os.path.exists(self.unique_name) and - os.stat(self.unique_name).st_nlink == 2) - - def break_lock(self): - if os.path.exists(self.lock_file): - os.unlink(self.lock_file) diff --git a/pipenv/patched/notpip/_vendor/lockfile/mkdirlockfile.py b/pipenv/patched/notpip/_vendor/lockfile/mkdirlockfile.py deleted file mode 100644 index 05a8c96ca5..0000000000 --- a/pipenv/patched/notpip/_vendor/lockfile/mkdirlockfile.py +++ /dev/null @@ -1,84 +0,0 @@ -from __future__ import absolute_import, division - -import time -import os -import sys -import errno - -from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout, - AlreadyLocked) - - -class MkdirLockFile(LockBase): - """Lock file by creating a directory.""" - def __init__(self, path, threaded=True, timeout=None): - """ - >>> lock = MkdirLockFile('somefile') - >>> lock = MkdirLockFile('somefile', threaded=False) - """ - LockBase.__init__(self, path, threaded, timeout) - # Lock file itself is a directory. Place the unique file name into - # it. - self.unique_name = os.path.join(self.lock_file, - "%s.%s%s" % (self.hostname, - self.tname, - self.pid)) - - def acquire(self, timeout=None): - timeout = timeout if timeout is not None else self.timeout - end_time = time.time() - if timeout is not None and timeout > 0: - end_time += timeout - - if timeout is None: - wait = 0.1 - else: - wait = max(0, timeout / 10) - - while True: - try: - os.mkdir(self.lock_file) - except OSError: - err = sys.exc_info()[1] - if err.errno == errno.EEXIST: - # Already locked. - if os.path.exists(self.unique_name): - # Already locked by me. - return - if timeout is not None and time.time() > end_time: - if timeout > 0: - raise LockTimeout("Timeout waiting to acquire" - " lock for %s" % - self.path) - else: - # Someone else has the lock. - raise AlreadyLocked("%s is already locked" % - self.path) - time.sleep(wait) - else: - # Couldn't create the lock for some other reason - raise LockFailed("failed to create %s" % self.lock_file) - else: - open(self.unique_name, "wb").close() - return - - def release(self): - if not self.is_locked(): - raise NotLocked("%s is not locked" % self.path) - elif not os.path.exists(self.unique_name): - raise NotMyLock("%s is locked, but not by me" % self.path) - os.unlink(self.unique_name) - os.rmdir(self.lock_file) - - def is_locked(self): - return os.path.exists(self.lock_file) - - def i_am_locking(self): - return (self.is_locked() and - os.path.exists(self.unique_name)) - - def break_lock(self): - if os.path.exists(self.lock_file): - for name in os.listdir(self.lock_file): - os.unlink(os.path.join(self.lock_file, name)) - os.rmdir(self.lock_file) diff --git a/pipenv/patched/notpip/_vendor/lockfile/pidlockfile.py b/pipenv/patched/notpip/_vendor/lockfile/pidlockfile.py deleted file mode 100644 index 069e85b15b..0000000000 --- a/pipenv/patched/notpip/_vendor/lockfile/pidlockfile.py +++ /dev/null @@ -1,190 +0,0 @@ -# -*- coding: utf-8 -*- - -# pidlockfile.py -# -# Copyright © 2008–2009 Ben Finney <ben+python@benfinney.id.au> -# -# This is free software: you may copy, modify, and/or distribute this work -# under the terms of the Python Software Foundation License, version 2 or -# later as published by the Python Software Foundation. -# No warranty expressed or implied. See the file LICENSE.PSF-2 for details. - -""" Lockfile behaviour implemented via Unix PID files. - """ - -from __future__ import absolute_import - -import errno -import os -import time - -from . import (LockBase, AlreadyLocked, LockFailed, NotLocked, NotMyLock, - LockTimeout) - - -class PIDLockFile(LockBase): - """ Lockfile implemented as a Unix PID file. - - The lock file is a normal file named by the attribute `path`. - A lock's PID file contains a single line of text, containing - the process ID (PID) of the process that acquired the lock. - - >>> lock = PIDLockFile('somefile') - >>> lock = PIDLockFile('somefile') - """ - - def __init__(self, path, threaded=False, timeout=None): - # pid lockfiles don't support threaded operation, so always force - # False as the threaded arg. - LockBase.__init__(self, path, False, timeout) - self.unique_name = self.path - - def read_pid(self): - """ Get the PID from the lock file. - """ - return read_pid_from_pidfile(self.path) - - def is_locked(self): - """ Test if the lock is currently held. - - The lock is held if the PID file for this lock exists. - - """ - return os.path.exists(self.path) - - def i_am_locking(self): - """ Test if the lock is held by the current process. - - Returns ``True`` if the current process ID matches the - number stored in the PID file. - """ - return self.is_locked() and os.getpid() == self.read_pid() - - def acquire(self, timeout=None): - """ Acquire the lock. - - Creates the PID file for this lock, or raises an error if - the lock could not be acquired. - """ - - timeout = timeout if timeout is not None else self.timeout - end_time = time.time() - if timeout is not None and timeout > 0: - end_time += timeout - - while True: - try: - write_pid_to_pidfile(self.path) - except OSError as exc: - if exc.errno == errno.EEXIST: - # The lock creation failed. Maybe sleep a bit. - if time.time() > end_time: - if timeout is not None and timeout > 0: - raise LockTimeout("Timeout waiting to acquire" - " lock for %s" % - self.path) - else: - raise AlreadyLocked("%s is already locked" % - self.path) - time.sleep(timeout is not None and timeout / 10 or 0.1) - else: - raise LockFailed("failed to create %s" % self.path) - else: - return - - def release(self): - """ Release the lock. - - Removes the PID file to release the lock, or raises an - error if the current process does not hold the lock. - - """ - if not self.is_locked(): - raise NotLocked("%s is not locked" % self.path) - if not self.i_am_locking(): - raise NotMyLock("%s is locked, but not by me" % self.path) - remove_existing_pidfile(self.path) - - def break_lock(self): - """ Break an existing lock. - - Removes the PID file if it already exists, otherwise does - nothing. - - """ - remove_existing_pidfile(self.path) - - -def read_pid_from_pidfile(pidfile_path): - """ Read the PID recorded in the named PID file. - - Read and return the numeric PID recorded as text in the named - PID file. If the PID file cannot be read, or if the content is - not a valid PID, return ``None``. - - """ - pid = None - try: - pidfile = open(pidfile_path, 'r') - except IOError: - pass - else: - # According to the FHS 2.3 section on PID files in /var/run: - # - # The file must consist of the process identifier in - # ASCII-encoded decimal, followed by a newline character. - # - # Programs that read PID files should be somewhat flexible - # in what they accept; i.e., they should ignore extra - # whitespace, leading zeroes, absence of the trailing - # newline, or additional lines in the PID file. - - line = pidfile.readline().strip() - try: - pid = int(line) - except ValueError: - pass - pidfile.close() - - return pid - - -def write_pid_to_pidfile(pidfile_path): - """ Write the PID in the named PID file. - - Get the numeric process ID (“PID”) of the current process - and write it to the named file as a line of text. - - """ - open_flags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY) - open_mode = 0o644 - pidfile_fd = os.open(pidfile_path, open_flags, open_mode) - pidfile = os.fdopen(pidfile_fd, 'w') - - # According to the FHS 2.3 section on PID files in /var/run: - # - # The file must consist of the process identifier in - # ASCII-encoded decimal, followed by a newline character. For - # example, if crond was process number 25, /var/run/crond.pid - # would contain three characters: two, five, and newline. - - pid = os.getpid() - pidfile.write("%s\n" % pid) - pidfile.close() - - -def remove_existing_pidfile(pidfile_path): - """ Remove the named PID file if it exists. - - Removing a PID file that doesn't already exist puts us in the - desired state, so we ignore the condition if the file does not - exist. - - """ - try: - os.remove(pidfile_path) - except OSError as exc: - if exc.errno == errno.ENOENT: - pass - else: - raise diff --git a/pipenv/patched/notpip/_vendor/lockfile/sqlitelockfile.py b/pipenv/patched/notpip/_vendor/lockfile/sqlitelockfile.py deleted file mode 100644 index f997e2444e..0000000000 --- a/pipenv/patched/notpip/_vendor/lockfile/sqlitelockfile.py +++ /dev/null @@ -1,156 +0,0 @@ -from __future__ import absolute_import, division - -import time -import os - -try: - unicode -except NameError: - unicode = str - -from . import LockBase, NotLocked, NotMyLock, LockTimeout, AlreadyLocked - - -class SQLiteLockFile(LockBase): - "Demonstrate SQL-based locking." - - testdb = None - - def __init__(self, path, threaded=True, timeout=None): - """ - >>> lock = SQLiteLockFile('somefile') - >>> lock = SQLiteLockFile('somefile', threaded=False) - """ - LockBase.__init__(self, path, threaded, timeout) - self.lock_file = unicode(self.lock_file) - self.unique_name = unicode(self.unique_name) - - if SQLiteLockFile.testdb is None: - import tempfile - _fd, testdb = tempfile.mkstemp() - os.close(_fd) - os.unlink(testdb) - del _fd, tempfile - SQLiteLockFile.testdb = testdb - - import sqlite3 - self.connection = sqlite3.connect(SQLiteLockFile.testdb) - - c = self.connection.cursor() - try: - c.execute("create table locks" - "(" - " lock_file varchar(32)," - " unique_name varchar(32)" - ")") - except sqlite3.OperationalError: - pass - else: - self.connection.commit() - import atexit - atexit.register(os.unlink, SQLiteLockFile.testdb) - - def acquire(self, timeout=None): - timeout = timeout if timeout is not None else self.timeout - end_time = time.time() - if timeout is not None and timeout > 0: - end_time += timeout - - if timeout is None: - wait = 0.1 - elif timeout <= 0: - wait = 0 - else: - wait = timeout / 10 - - cursor = self.connection.cursor() - - while True: - if not self.is_locked(): - # Not locked. Try to lock it. - cursor.execute("insert into locks" - " (lock_file, unique_name)" - " values" - " (?, ?)", - (self.lock_file, self.unique_name)) - self.connection.commit() - - # Check to see if we are the only lock holder. - cursor.execute("select * from locks" - " where unique_name = ?", - (self.unique_name,)) - rows = cursor.fetchall() - if len(rows) > 1: - # Nope. Someone else got there. Remove our lock. - cursor.execute("delete from locks" - " where unique_name = ?", - (self.unique_name,)) - self.connection.commit() - else: - # Yup. We're done, so go home. - return - else: - # Check to see if we are the only lock holder. - cursor.execute("select * from locks" - " where unique_name = ?", - (self.unique_name,)) - rows = cursor.fetchall() - if len(rows) == 1: - # We're the locker, so go home. - return - - # Maybe we should wait a bit longer. - if timeout is not None and time.time() > end_time: - if timeout > 0: - # No more waiting. - raise LockTimeout("Timeout waiting to acquire" - " lock for %s" % - self.path) - else: - # Someone else has the lock and we are impatient.. - raise AlreadyLocked("%s is already locked" % self.path) - - # Well, okay. We'll give it a bit longer. - time.sleep(wait) - - def release(self): - if not self.is_locked(): - raise NotLocked("%s is not locked" % self.path) - if not self.i_am_locking(): - raise NotMyLock("%s is locked, but not by me (by %s)" % - (self.unique_name, self._who_is_locking())) - cursor = self.connection.cursor() - cursor.execute("delete from locks" - " where unique_name = ?", - (self.unique_name,)) - self.connection.commit() - - def _who_is_locking(self): - cursor = self.connection.cursor() - cursor.execute("select unique_name from locks" - " where lock_file = ?", - (self.lock_file,)) - return cursor.fetchone()[0] - - def is_locked(self): - cursor = self.connection.cursor() - cursor.execute("select * from locks" - " where lock_file = ?", - (self.lock_file,)) - rows = cursor.fetchall() - return not not rows - - def i_am_locking(self): - cursor = self.connection.cursor() - cursor.execute("select * from locks" - " where lock_file = ?" - " and unique_name = ?", - (self.lock_file, self.unique_name)) - return not not cursor.fetchall() - - def break_lock(self): - cursor = self.connection.cursor() - cursor.execute("delete from locks" - " where lock_file = ?", - (self.lock_file,)) - self.connection.commit() diff --git a/pipenv/patched/notpip/_vendor/lockfile/symlinklockfile.py b/pipenv/patched/notpip/_vendor/lockfile/symlinklockfile.py deleted file mode 100644 index 23b41f582b..0000000000 --- a/pipenv/patched/notpip/_vendor/lockfile/symlinklockfile.py +++ /dev/null @@ -1,70 +0,0 @@ -from __future__ import absolute_import - -import os -import time - -from . import (LockBase, NotLocked, NotMyLock, LockTimeout, - AlreadyLocked) - - -class SymlinkLockFile(LockBase): - """Lock access to a file using symlink(2).""" - - def __init__(self, path, threaded=True, timeout=None): - # super(SymlinkLockFile).__init(...) - LockBase.__init__(self, path, threaded, timeout) - # split it back! - self.unique_name = os.path.split(self.unique_name)[1] - - def acquire(self, timeout=None): - # Hopefully unnecessary for symlink. - # try: - # open(self.unique_name, "wb").close() - # except IOError: - # raise LockFailed("failed to create %s" % self.unique_name) - timeout = timeout if timeout is not None else self.timeout - end_time = time.time() - if timeout is not None and timeout > 0: - end_time += timeout - - while True: - # Try and create a symbolic link to it. - try: - os.symlink(self.unique_name, self.lock_file) - except OSError: - # Link creation failed. Maybe we've double-locked? - if self.i_am_locking(): - # Linked to out unique name. Proceed. - return - else: - # Otherwise the lock creation failed. - if timeout is not None and time.time() > end_time: - if timeout > 0: - raise LockTimeout("Timeout waiting to acquire" - " lock for %s" % - self.path) - else: - raise AlreadyLocked("%s is already locked" % - self.path) - time.sleep(timeout / 10 if timeout is not None else 0.1) - else: - # Link creation succeeded. We're good to go. - return - - def release(self): - if not self.is_locked(): - raise NotLocked("%s is not locked" % self.path) - elif not self.i_am_locking(): - raise NotMyLock("%s is locked, but not by me" % self.path) - os.unlink(self.lock_file) - - def is_locked(self): - return os.path.islink(self.lock_file) - - def i_am_locking(self): - return (os.path.islink(self.lock_file) - and os.readlink(self.lock_file) == self.unique_name) - - def break_lock(self): - if os.path.islink(self.lock_file): # exists && link - os.unlink(self.lock_file) diff --git a/pipenv/patched/notpip/_vendor/msgpack/__init__.py b/pipenv/patched/notpip/_vendor/msgpack/__init__.py index a15e57698b..4ad9c1a5e1 100644 --- a/pipenv/patched/notpip/_vendor/msgpack/__init__.py +++ b/pipenv/patched/notpip/_vendor/msgpack/__init__.py @@ -1,6 +1,6 @@ # coding: utf-8 -from pipenv.patched.notpip._vendor.msgpack._version import version -from pipenv.patched.notpip._vendor.msgpack.exceptions import * +from ._version import version +from .exceptions import * from collections import namedtuple @@ -19,13 +19,12 @@ def __new__(cls, code, data): import os if os.environ.get('MSGPACK_PUREPYTHON'): - from pipenv.patched.notpip._vendor.msgpack.fallback import Packer, unpackb, Unpacker + from .fallback import Packer, unpackb, Unpacker else: try: - from pipenv.patched.notpip._vendor.msgpack._packer import Packer - from pipenv.patched.notpip._vendor.msgpack._unpacker import unpackb, Unpacker + from ._cmsgpack import Packer, unpackb, Unpacker except ImportError: - from pipenv.patched.notpip._vendor.msgpack.fallback import Packer, unpackb, Unpacker + from .fallback import Packer, unpackb, Unpacker def pack(o, stream, **kwargs): diff --git a/pipenv/patched/notpip/_vendor/msgpack/_version.py b/pipenv/patched/notpip/_vendor/msgpack/_version.py index d28f0deb86..1e73a00f63 100644 --- a/pipenv/patched/notpip/_vendor/msgpack/_version.py +++ b/pipenv/patched/notpip/_vendor/msgpack/_version.py @@ -1 +1 @@ -version = (0, 5, 6) +version = (0, 6, 2) diff --git a/pipenv/patched/notpip/_vendor/msgpack/exceptions.py b/pipenv/patched/notpip/_vendor/msgpack/exceptions.py index 97668814f2..d6d2615cfd 100644 --- a/pipenv/patched/notpip/_vendor/msgpack/exceptions.py +++ b/pipenv/patched/notpip/_vendor/msgpack/exceptions.py @@ -1,5 +1,10 @@ class UnpackException(Exception): - """Deprecated. Use Exception instead to catch all exception during unpacking.""" + """Base class for some exceptions raised while unpacking. + + NOTE: unpack may raise exception other than subclass of + UnpackException. If you want to catch all error, catch + Exception instead. + """ class BufferFull(UnpackException): @@ -10,32 +15,34 @@ class OutOfData(UnpackException): pass -class UnpackValueError(UnpackException, ValueError): - """Deprecated. Use ValueError instead.""" +class FormatError(ValueError, UnpackException): + """Invalid msgpack format""" -class ExtraData(UnpackValueError): - def __init__(self, unpacked, extra): - self.unpacked = unpacked - self.extra = extra - - def __str__(self): - return "unpack(b) received extra data." +class StackError(ValueError, UnpackException): + """Too nested""" -class PackException(Exception): - """Deprecated. Use Exception instead to catch all exception during packing.""" +# Deprecated. Use ValueError instead +UnpackValueError = ValueError -class PackValueError(PackException, ValueError): - """PackValueError is raised when type of input data is supported but it's value is unsupported. +class ExtraData(UnpackValueError): + """ExtraData is raised when there is trailing data. - Deprecated. Use ValueError instead. + This exception is raised while only one-shot (not streaming) + unpack. """ + def __init__(self, unpacked, extra): + self.unpacked = unpacked + self.extra = extra + + def __str__(self): + return "unpack(b) received extra data." -class PackOverflowError(PackValueError, OverflowError): - """PackOverflowError is raised when integer value is out of range of msgpack support [-2**31, 2**32). - Deprecated. Use ValueError instead. - """ +# Deprecated. Use Exception instead to catch all exception during packing. +PackException = Exception +PackValueError = ValueError +PackOverflowError = OverflowError diff --git a/pipenv/patched/notpip/_vendor/msgpack/fallback.py b/pipenv/patched/notpip/_vendor/msgpack/fallback.py index d3a7d55807..3836e830b8 100644 --- a/pipenv/patched/notpip/_vendor/msgpack/fallback.py +++ b/pipenv/patched/notpip/_vendor/msgpack/fallback.py @@ -4,20 +4,30 @@ import struct import warnings -if sys.version_info[0] == 3: - PY3 = True + +if sys.version_info[0] == 2: + PY2 = True + int_types = (int, long) + def dict_iteritems(d): + return d.iteritems() +else: + PY2 = False int_types = int - Unicode = str + unicode = str xrange = range def dict_iteritems(d): return d.items() -else: - PY3 = False - int_types = (int, long) - Unicode = unicode - def dict_iteritems(d): - return d.iteritems() +if sys.version_info < (3, 5): + # Ugly hack... + RecursionError = RuntimeError + + def _is_recursionerror(e): + return len(e.args) == 1 and isinstance(e.args[0], str) and \ + e.args[0].startswith('maximum recursion depth exceeded') +else: + def _is_recursionerror(e): + return True if hasattr(sys, 'pypy_version_info'): # cStringIO is slow on PyPy, StringIO is faster. However: PyPy's own @@ -49,15 +59,15 @@ def getvalue(self): newlist_hint = lambda size: [] -from pipenv.patched.notpip._vendor.msgpack.exceptions import ( +from .exceptions import ( BufferFull, OutOfData, - UnpackValueError, - PackValueError, - PackOverflowError, - ExtraData) + ExtraData, + FormatError, + StackError, +) -from pipenv.patched.notpip._vendor.msgpack import ExtType +from . import ExtType EX_SKIP = 0 @@ -87,12 +97,12 @@ def _get_data_from_buffer(obj): view = memoryview(obj) except TypeError: # try to use legacy buffer protocol if 2.7, otherwise re-raise - if not PY3: + if PY2: view = memoryview(buffer(obj)) warnings.warn("using old buffer interface to unpack %s; " "this leads to unpacking errors if slicing is used and " "will be removed in a future version" % type(obj), - RuntimeWarning) + RuntimeWarning, stacklevel=3) else: raise if view.itemsize != 1: @@ -103,7 +113,7 @@ def _get_data_from_buffer(obj): def unpack(stream, **kwargs): warnings.warn( "Direct calling implementation's unpack() is deprecated, Use msgpack.unpack() or unpackb() instead.", - PendingDeprecationWarning) + DeprecationWarning, stacklevel=2) data = stream.read() return unpackb(data, **kwargs) @@ -112,20 +122,37 @@ def unpackb(packed, **kwargs): """ Unpack an object from `packed`. - Raises `ExtraData` when `packed` contains extra bytes. + Raises ``ExtraData`` when *packed* contains extra bytes. + Raises ``ValueError`` when *packed* is incomplete. + Raises ``FormatError`` when *packed* is not valid msgpack. + Raises ``StackError`` when *packed* contains too nested. + Other exceptions can be raised during unpacking. + See :class:`Unpacker` for options. """ - unpacker = Unpacker(None, **kwargs) + unpacker = Unpacker(None, max_buffer_size=len(packed), **kwargs) unpacker.feed(packed) try: ret = unpacker._unpack() except OutOfData: - raise UnpackValueError("Data is not enough.") + raise ValueError("Unpack failed: incomplete input") + except RecursionError as e: + if _is_recursionerror(e): + raise StackError + raise if unpacker._got_extradata(): raise ExtraData(ret, unpacker._get_extradata()) return ret +if sys.version_info < (2, 7, 6): + def _unpack_from(f, b, o=0): + """Explicit typcast for legacy struct.unpack_from""" + return struct.unpack_from(f, bytes(b), o) +else: + _unpack_from = struct.unpack_from + + class Unpacker(object): """Streaming unpacker. @@ -152,6 +179,11 @@ class Unpacker(object): *encoding* option which is deprecated overrides this option. + :param bool strict_map_key: + If true, only str or bytes are accepted for map (dict) keys. + It's False by default for backward-compatibility. + But it will be True from msgpack 1.0. + :param callable object_hook: When specified, it should be callable. Unpacker calls it with a dict argument after unpacking msgpack map. @@ -176,27 +208,34 @@ class Unpacker(object): You should set this parameter when unpacking data from untrusted source. :param int max_str_len: - Limits max length of str. (default: 2**31-1) + Deprecated, use *max_buffer_size* instead. + Limits max length of str. (default: max_buffer_size or 1024*1024) :param int max_bin_len: - Limits max length of bin. (default: 2**31-1) + Deprecated, use *max_buffer_size* instead. + Limits max length of bin. (default: max_buffer_size or 1024*1024) :param int max_array_len: - Limits max length of array. (default: 2**31-1) + Limits max length of array. + (default: max_buffer_size or 128*1024) :param int max_map_len: - Limits max length of map. (default: 2**31-1) + Limits max length of map. + (default: max_buffer_size//2 or 32*1024) + :param int max_ext_len: + Deprecated, use *max_buffer_size* instead. + Limits max size of ext type. (default: max_buffer_size or 1024*1024) - example of streaming deserialize from file-like object:: + Example of streaming deserialize from file-like object:: - unpacker = Unpacker(file_like, raw=False) + unpacker = Unpacker(file_like, raw=False, max_buffer_size=10*1024*1024) for o in unpacker: process(o) - example of streaming deserialize from socket:: + Example of streaming deserialize from socket:: - unpacker = Unpacker(raw=False) + unpacker = Unpacker(raw=False, max_buffer_size=10*1024*1024) while True: buf = sock.recv(1024**2) if not buf: @@ -204,22 +243,27 @@ class Unpacker(object): unpacker.feed(buf) for o in unpacker: process(o) + + Raises ``ExtraData`` when *packed* contains extra bytes. + Raises ``OutOfData`` when *packed* is incomplete. + Raises ``FormatError`` when *packed* is not valid msgpack. + Raises ``StackError`` when *packed* contains too nested. + Other exceptions can be raised during unpacking. """ - def __init__(self, file_like=None, read_size=0, use_list=True, raw=True, + def __init__(self, file_like=None, read_size=0, use_list=True, raw=True, strict_map_key=False, object_hook=None, object_pairs_hook=None, list_hook=None, encoding=None, unicode_errors=None, max_buffer_size=0, ext_hook=ExtType, - max_str_len=2147483647, # 2**32-1 - max_bin_len=2147483647, - max_array_len=2147483647, - max_map_len=2147483647, - max_ext_len=2147483647): - + max_str_len=-1, + max_bin_len=-1, + max_array_len=-1, + max_map_len=-1, + max_ext_len=-1): if encoding is not None: warnings.warn( "encoding is deprecated, Use raw=False instead.", - PendingDeprecationWarning) + DeprecationWarning, stacklevel=2) if unicode_errors is None: unicode_errors = 'strict' @@ -234,12 +278,6 @@ def __init__(self, file_like=None, read_size=0, use_list=True, raw=True, #: array of bytes fed. self._buffer = bytearray() - # Some very old pythons don't support `struct.unpack_from()` with a - # `bytearray`. So we wrap it in a `buffer()` there. - if sys.version_info < (2, 7, 6): - self._buffer_view = buffer(self._buffer) - else: - self._buffer_view = self._buffer #: Which position we currently reads self._buff_i = 0 @@ -252,11 +290,23 @@ def __init__(self, file_like=None, read_size=0, use_list=True, raw=True, # state, which _buf_checkpoint records. self._buf_checkpoint = 0 + if max_str_len == -1: + max_str_len = max_buffer_size or 1024*1024 + if max_bin_len == -1: + max_bin_len = max_buffer_size or 1024*1024 + if max_array_len == -1: + max_array_len = max_buffer_size or 128*1024 + if max_map_len == -1: + max_map_len = max_buffer_size//2 or 32*1024 + if max_ext_len == -1: + max_ext_len = max_buffer_size or 1024*1024 + self._max_buffer_size = max_buffer_size or 2**31-1 if read_size > self._max_buffer_size: raise ValueError("read_size must be smaller than max_buffer_size") self._read_size = read_size or min(self._max_buffer_size, 16*1024) self._raw = bool(raw) + self._strict_map_key = bool(strict_map_key) self._encoding = encoding self._unicode_errors = unicode_errors self._use_list = use_list @@ -295,7 +345,8 @@ def feed(self, next_bytes): self._buff_i -= self._buf_checkpoint self._buf_checkpoint = 0 - self._buffer += view + # Use extend here: INPLACE_ADD += doesn't reliably typecast memoryview in jython + self._buffer.extend(view) def _consume(self): """ Gets rid of the used parts of the buffer. """ @@ -365,18 +416,18 @@ def _read_header(self, execute=EX_CONSTRUCT): n = b & 0b00011111 typ = TYPE_RAW if n > self._max_str_len: - raise UnpackValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) + raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) obj = self._read(n) elif b & 0b11110000 == 0b10010000: n = b & 0b00001111 typ = TYPE_ARRAY if n > self._max_array_len: - raise UnpackValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) + raise ValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) elif b & 0b11110000 == 0b10000000: n = b & 0b00001111 typ = TYPE_MAP if n > self._max_map_len: - raise UnpackValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) + raise ValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) elif b == 0xc0: obj = None elif b == 0xc2: @@ -389,55 +440,55 @@ def _read_header(self, execute=EX_CONSTRUCT): n = self._buffer[self._buff_i] self._buff_i += 1 if n > self._max_bin_len: - raise UnpackValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) + raise ValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) obj = self._read(n) elif b == 0xc5: typ = TYPE_BIN self._reserve(2) - n = struct.unpack_from(">H", self._buffer_view, self._buff_i)[0] + n = _unpack_from(">H", self._buffer, self._buff_i)[0] self._buff_i += 2 if n > self._max_bin_len: - raise UnpackValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) + raise ValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) obj = self._read(n) elif b == 0xc6: typ = TYPE_BIN self._reserve(4) - n = struct.unpack_from(">I", self._buffer_view, self._buff_i)[0] + n = _unpack_from(">I", self._buffer, self._buff_i)[0] self._buff_i += 4 if n > self._max_bin_len: - raise UnpackValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) + raise ValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) obj = self._read(n) elif b == 0xc7: # ext 8 typ = TYPE_EXT self._reserve(2) - L, n = struct.unpack_from('Bb', self._buffer_view, self._buff_i) + L, n = _unpack_from('Bb', self._buffer, self._buff_i) self._buff_i += 2 if L > self._max_ext_len: - raise UnpackValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) + raise ValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) obj = self._read(L) elif b == 0xc8: # ext 16 typ = TYPE_EXT self._reserve(3) - L, n = struct.unpack_from('>Hb', self._buffer_view, self._buff_i) + L, n = _unpack_from('>Hb', self._buffer, self._buff_i) self._buff_i += 3 if L > self._max_ext_len: - raise UnpackValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) + raise ValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) obj = self._read(L) elif b == 0xc9: # ext 32 typ = TYPE_EXT self._reserve(5) - L, n = struct.unpack_from('>Ib', self._buffer_view, self._buff_i) + L, n = _unpack_from('>Ib', self._buffer, self._buff_i) self._buff_i += 5 if L > self._max_ext_len: - raise UnpackValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) + raise ValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) obj = self._read(L) elif b == 0xca: self._reserve(4) - obj = struct.unpack_from(">f", self._buffer_view, self._buff_i)[0] + obj = _unpack_from(">f", self._buffer, self._buff_i)[0] self._buff_i += 4 elif b == 0xcb: self._reserve(8) - obj = struct.unpack_from(">d", self._buffer_view, self._buff_i)[0] + obj = _unpack_from(">d", self._buffer, self._buff_i)[0] self._buff_i += 8 elif b == 0xcc: self._reserve(1) @@ -445,66 +496,66 @@ def _read_header(self, execute=EX_CONSTRUCT): self._buff_i += 1 elif b == 0xcd: self._reserve(2) - obj = struct.unpack_from(">H", self._buffer_view, self._buff_i)[0] + obj = _unpack_from(">H", self._buffer, self._buff_i)[0] self._buff_i += 2 elif b == 0xce: self._reserve(4) - obj = struct.unpack_from(">I", self._buffer_view, self._buff_i)[0] + obj = _unpack_from(">I", self._buffer, self._buff_i)[0] self._buff_i += 4 elif b == 0xcf: self._reserve(8) - obj = struct.unpack_from(">Q", self._buffer_view, self._buff_i)[0] + obj = _unpack_from(">Q", self._buffer, self._buff_i)[0] self._buff_i += 8 elif b == 0xd0: self._reserve(1) - obj = struct.unpack_from("b", self._buffer_view, self._buff_i)[0] + obj = _unpack_from("b", self._buffer, self._buff_i)[0] self._buff_i += 1 elif b == 0xd1: self._reserve(2) - obj = struct.unpack_from(">h", self._buffer_view, self._buff_i)[0] + obj = _unpack_from(">h", self._buffer, self._buff_i)[0] self._buff_i += 2 elif b == 0xd2: self._reserve(4) - obj = struct.unpack_from(">i", self._buffer_view, self._buff_i)[0] + obj = _unpack_from(">i", self._buffer, self._buff_i)[0] self._buff_i += 4 elif b == 0xd3: self._reserve(8) - obj = struct.unpack_from(">q", self._buffer_view, self._buff_i)[0] + obj = _unpack_from(">q", self._buffer, self._buff_i)[0] self._buff_i += 8 elif b == 0xd4: # fixext 1 typ = TYPE_EXT if self._max_ext_len < 1: - raise UnpackValueError("%s exceeds max_ext_len(%s)" % (1, self._max_ext_len)) + raise ValueError("%s exceeds max_ext_len(%s)" % (1, self._max_ext_len)) self._reserve(2) - n, obj = struct.unpack_from("b1s", self._buffer_view, self._buff_i) + n, obj = _unpack_from("b1s", self._buffer, self._buff_i) self._buff_i += 2 elif b == 0xd5: # fixext 2 typ = TYPE_EXT if self._max_ext_len < 2: - raise UnpackValueError("%s exceeds max_ext_len(%s)" % (2, self._max_ext_len)) + raise ValueError("%s exceeds max_ext_len(%s)" % (2, self._max_ext_len)) self._reserve(3) - n, obj = struct.unpack_from("b2s", self._buffer_view, self._buff_i) + n, obj = _unpack_from("b2s", self._buffer, self._buff_i) self._buff_i += 3 elif b == 0xd6: # fixext 4 typ = TYPE_EXT if self._max_ext_len < 4: - raise UnpackValueError("%s exceeds max_ext_len(%s)" % (4, self._max_ext_len)) + raise ValueError("%s exceeds max_ext_len(%s)" % (4, self._max_ext_len)) self._reserve(5) - n, obj = struct.unpack_from("b4s", self._buffer_view, self._buff_i) + n, obj = _unpack_from("b4s", self._buffer, self._buff_i) self._buff_i += 5 elif b == 0xd7: # fixext 8 typ = TYPE_EXT if self._max_ext_len < 8: - raise UnpackValueError("%s exceeds max_ext_len(%s)" % (8, self._max_ext_len)) + raise ValueError("%s exceeds max_ext_len(%s)" % (8, self._max_ext_len)) self._reserve(9) - n, obj = struct.unpack_from("b8s", self._buffer_view, self._buff_i) + n, obj = _unpack_from("b8s", self._buffer, self._buff_i) self._buff_i += 9 elif b == 0xd8: # fixext 16 typ = TYPE_EXT if self._max_ext_len < 16: - raise UnpackValueError("%s exceeds max_ext_len(%s)" % (16, self._max_ext_len)) + raise ValueError("%s exceeds max_ext_len(%s)" % (16, self._max_ext_len)) self._reserve(17) - n, obj = struct.unpack_from("b16s", self._buffer_view, self._buff_i) + n, obj = _unpack_from("b16s", self._buffer, self._buff_i) self._buff_i += 17 elif b == 0xd9: typ = TYPE_RAW @@ -512,54 +563,54 @@ def _read_header(self, execute=EX_CONSTRUCT): n = self._buffer[self._buff_i] self._buff_i += 1 if n > self._max_str_len: - raise UnpackValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) + raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) obj = self._read(n) elif b == 0xda: typ = TYPE_RAW self._reserve(2) - n, = struct.unpack_from(">H", self._buffer_view, self._buff_i) + n, = _unpack_from(">H", self._buffer, self._buff_i) self._buff_i += 2 if n > self._max_str_len: - raise UnpackValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) + raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) obj = self._read(n) elif b == 0xdb: typ = TYPE_RAW self._reserve(4) - n, = struct.unpack_from(">I", self._buffer_view, self._buff_i) + n, = _unpack_from(">I", self._buffer, self._buff_i) self._buff_i += 4 if n > self._max_str_len: - raise UnpackValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) + raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) obj = self._read(n) elif b == 0xdc: typ = TYPE_ARRAY self._reserve(2) - n, = struct.unpack_from(">H", self._buffer_view, self._buff_i) + n, = _unpack_from(">H", self._buffer, self._buff_i) self._buff_i += 2 if n > self._max_array_len: - raise UnpackValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) + raise ValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) elif b == 0xdd: typ = TYPE_ARRAY self._reserve(4) - n, = struct.unpack_from(">I", self._buffer_view, self._buff_i) + n, = _unpack_from(">I", self._buffer, self._buff_i) self._buff_i += 4 if n > self._max_array_len: - raise UnpackValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) + raise ValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) elif b == 0xde: self._reserve(2) - n, = struct.unpack_from(">H", self._buffer_view, self._buff_i) + n, = _unpack_from(">H", self._buffer, self._buff_i) self._buff_i += 2 if n > self._max_map_len: - raise UnpackValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) + raise ValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) typ = TYPE_MAP elif b == 0xdf: self._reserve(4) - n, = struct.unpack_from(">I", self._buffer_view, self._buff_i) + n, = _unpack_from(">I", self._buffer, self._buff_i) self._buff_i += 4 if n > self._max_map_len: - raise UnpackValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) + raise ValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) typ = TYPE_MAP else: - raise UnpackValueError("Unknown header: 0x%x" % b) + raise FormatError("Unknown header: 0x%x" % b) return typ, n, obj def _unpack(self, execute=EX_CONSTRUCT): @@ -567,11 +618,11 @@ def _unpack(self, execute=EX_CONSTRUCT): if execute == EX_READ_ARRAY_HEADER: if typ != TYPE_ARRAY: - raise UnpackValueError("Expected array") + raise ValueError("Expected array") return n if execute == EX_READ_MAP_HEADER: if typ != TYPE_MAP: - raise UnpackValueError("Expected map") + raise ValueError("Expected map") return n # TODO should we eliminate the recursion? if typ == TYPE_ARRAY: @@ -603,6 +654,8 @@ def _unpack(self, execute=EX_CONSTRUCT): ret = {} for _ in xrange(n): key = self._unpack(EX_CONSTRUCT) + if self._strict_map_key and type(key) not in (unicode, bytes): + raise ValueError("%s is not allowed for map key" % str(type(key))) ret[key] = self._unpack(EX_CONSTRUCT) if self._object_hook is not None: ret = self._object_hook(ret) @@ -635,37 +688,30 @@ def __next__(self): except OutOfData: self._consume() raise StopIteration + except RecursionError: + raise StackError next = __next__ - def skip(self, write_bytes=None): + def skip(self): self._unpack(EX_SKIP) - if write_bytes is not None: - warnings.warn("`write_bytes` option is deprecated. Use `.tell()` instead.", DeprecationWarning) - write_bytes(self._buffer[self._buf_checkpoint:self._buff_i]) self._consume() - def unpack(self, write_bytes=None): - ret = self._unpack(EX_CONSTRUCT) - if write_bytes is not None: - warnings.warn("`write_bytes` option is deprecated. Use `.tell()` instead.", DeprecationWarning) - write_bytes(self._buffer[self._buf_checkpoint:self._buff_i]) + def unpack(self): + try: + ret = self._unpack(EX_CONSTRUCT) + except RecursionError: + raise StackError self._consume() return ret - def read_array_header(self, write_bytes=None): + def read_array_header(self): ret = self._unpack(EX_READ_ARRAY_HEADER) - if write_bytes is not None: - warnings.warn("`write_bytes` option is deprecated. Use `.tell()` instead.", DeprecationWarning) - write_bytes(self._buffer[self._buf_checkpoint:self._buff_i]) self._consume() return ret - def read_map_header(self, write_bytes=None): + def read_map_header(self): ret = self._unpack(EX_READ_MAP_HEADER) - if write_bytes is not None: - warnings.warn("`write_bytes` option is deprecated. Use `.tell()` instead.", DeprecationWarning) - write_bytes(self._buffer[self._buf_checkpoint:self._buff_i]) self._consume() return ret @@ -722,7 +768,7 @@ def __init__(self, default=None, encoding=None, unicode_errors=None, else: warnings.warn( "encoding is deprecated, Use raw=False instead.", - PendingDeprecationWarning) + DeprecationWarning, stacklevel=2) if unicode_errors is None: unicode_errors = 'strict' @@ -749,7 +795,7 @@ def _pack(self, obj, nest_limit=DEFAULT_RECURSE_LIMIT, list_types = (list, tuple) while True: if nest_limit < 0: - raise PackValueError("recursion limit exceeded") + raise ValueError("recursion limit exceeded") if obj is None: return self._buffer.write(b"\xc0") if check(obj, bool): @@ -781,14 +827,14 @@ def _pack(self, obj, nest_limit=DEFAULT_RECURSE_LIMIT, obj = self._default(obj) default_used = True continue - raise PackOverflowError("Integer value out of range") + raise OverflowError("Integer value out of range") if check(obj, (bytes, bytearray)): n = len(obj) if n >= 2**32: - raise PackValueError("%s is too large" % type(obj).__name__) + raise ValueError("%s is too large" % type(obj).__name__) self._pack_bin_header(n) return self._buffer.write(obj) - if check(obj, Unicode): + if check(obj, unicode): if self._encoding is None: raise TypeError( "Can't encode unicode string: " @@ -796,13 +842,13 @@ def _pack(self, obj, nest_limit=DEFAULT_RECURSE_LIMIT, obj = obj.encode(self._encoding, self._unicode_errors) n = len(obj) if n >= 2**32: - raise PackValueError("String is too large") + raise ValueError("String is too large") self._pack_raw_header(n) return self._buffer.write(obj) if check(obj, memoryview): n = len(obj) * obj.itemsize if n >= 2**32: - raise PackValueError("Memoryview is too large") + raise ValueError("Memoryview is too large") self._pack_bin_header(n) return self._buffer.write(obj) if check(obj, float): @@ -855,43 +901,35 @@ def pack(self, obj): except: self._buffer = StringIO() # force reset raise - ret = self._buffer.getvalue() if self._autoreset: + ret = self._buffer.getvalue() self._buffer = StringIO() - elif USING_STRINGBUILDER: - self._buffer = StringIO(ret) - return ret + return ret def pack_map_pairs(self, pairs): self._pack_map_pairs(len(pairs), pairs) - ret = self._buffer.getvalue() if self._autoreset: + ret = self._buffer.getvalue() self._buffer = StringIO() - elif USING_STRINGBUILDER: - self._buffer = StringIO(ret) - return ret + return ret def pack_array_header(self, n): if n >= 2**32: - raise PackValueError + raise ValueError self._pack_array_header(n) - ret = self._buffer.getvalue() if self._autoreset: + ret = self._buffer.getvalue() self._buffer = StringIO() - elif USING_STRINGBUILDER: - self._buffer = StringIO(ret) - return ret + return ret def pack_map_header(self, n): if n >= 2**32: - raise PackValueError + raise ValueError self._pack_map_header(n) - ret = self._buffer.getvalue() if self._autoreset: + ret = self._buffer.getvalue() self._buffer = StringIO() - elif USING_STRINGBUILDER: - self._buffer = StringIO(ret) - return ret + return ret def pack_ext_type(self, typecode, data): if not isinstance(typecode, int): @@ -902,7 +940,7 @@ def pack_ext_type(self, typecode, data): raise TypeError("data must have bytes type") L = len(data) if L > 0xffffffff: - raise PackValueError("Too large data") + raise ValueError("Too large data") if L == 1: self._buffer.write(b'\xd4') elif L == 2: @@ -929,7 +967,7 @@ def _pack_array_header(self, n): return self._buffer.write(struct.pack(">BH", 0xdc, n)) if n <= 0xffffffff: return self._buffer.write(struct.pack(">BI", 0xdd, n)) - raise PackValueError("Array is too large") + raise ValueError("Array is too large") def _pack_map_header(self, n): if n <= 0x0f: @@ -938,7 +976,7 @@ def _pack_map_header(self, n): return self._buffer.write(struct.pack(">BH", 0xde, n)) if n <= 0xffffffff: return self._buffer.write(struct.pack(">BI", 0xdf, n)) - raise PackValueError("Dict is too large") + raise ValueError("Dict is too large") def _pack_map_pairs(self, n, pairs, nest_limit=DEFAULT_RECURSE_LIMIT): self._pack_map_header(n) @@ -956,7 +994,7 @@ def _pack_raw_header(self, n): elif n <= 0xffffffff: self._buffer.write(struct.pack(">BI", 0xdb, n)) else: - raise PackValueError('Raw is too large') + raise ValueError('Raw is too large') def _pack_bin_header(self, n): if not self._use_bin_type: @@ -968,10 +1006,22 @@ def _pack_bin_header(self, n): elif n <= 0xffffffff: return self._buffer.write(struct.pack(">BI", 0xc6, n)) else: - raise PackValueError('Bin is too large') + raise ValueError('Bin is too large') def bytes(self): + """Return internal buffer contents as bytes object""" return self._buffer.getvalue() def reset(self): + """Reset internal buffer. + + This method is usaful only when autoreset=False. + """ self._buffer = StringIO() + + def getbuffer(self): + """Return view of internal buffer.""" + if USING_STRINGBUILDER or PY2: + return memoryview(self.bytes()) + else: + return self._buffer.getbuffer() diff --git a/pipenv/patched/notpip/_vendor/packaging/__about__.py b/pipenv/patched/notpip/_vendor/packaging/__about__.py index 7481c9e298..dc95138d04 100644 --- a/pipenv/patched/notpip/_vendor/packaging/__about__.py +++ b/pipenv/patched/notpip/_vendor/packaging/__about__.py @@ -18,7 +18,7 @@ __summary__ = "Core utilities for Python packages" __uri__ = "https://github.com/pypa/packaging" -__version__ = "19.0" +__version__ = "19.2" __author__ = "Donald Stufft and individual contributors" __email__ = "donald@stufft.io" diff --git a/pipenv/patched/notpip/_vendor/packaging/markers.py b/pipenv/patched/notpip/_vendor/packaging/markers.py index 50a0809120..aef30331c2 100644 --- a/pipenv/patched/notpip/_vendor/packaging/markers.py +++ b/pipenv/patched/notpip/_vendor/packaging/markers.py @@ -259,7 +259,7 @@ def default_environment(): "platform_version": platform.version(), "python_full_version": platform.python_version(), "platform_python_implementation": platform.python_implementation(), - "python_version": platform.python_version()[:3], + "python_version": ".".join(platform.python_version_tuple()[:2]), "sys_platform": sys.platform, } diff --git a/pipenv/patched/notpip/_vendor/packaging/tags.py b/pipenv/patched/notpip/_vendor/packaging/tags.py new file mode 100644 index 0000000000..ec9942f0f6 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/packaging/tags.py @@ -0,0 +1,404 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import + +import distutils.util + +try: + from importlib.machinery import EXTENSION_SUFFIXES +except ImportError: # pragma: no cover + import imp + + EXTENSION_SUFFIXES = [x[0] for x in imp.get_suffixes()] + del imp +import platform +import re +import sys +import sysconfig +import warnings + + +INTERPRETER_SHORT_NAMES = { + "python": "py", # Generic. + "cpython": "cp", + "pypy": "pp", + "ironpython": "ip", + "jython": "jy", +} + + +_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32 + + +class Tag(object): + + __slots__ = ["_interpreter", "_abi", "_platform"] + + def __init__(self, interpreter, abi, platform): + self._interpreter = interpreter.lower() + self._abi = abi.lower() + self._platform = platform.lower() + + @property + def interpreter(self): + return self._interpreter + + @property + def abi(self): + return self._abi + + @property + def platform(self): + return self._platform + + def __eq__(self, other): + return ( + (self.platform == other.platform) + and (self.abi == other.abi) + and (self.interpreter == other.interpreter) + ) + + def __hash__(self): + return hash((self._interpreter, self._abi, self._platform)) + + def __str__(self): + return "{}-{}-{}".format(self._interpreter, self._abi, self._platform) + + def __repr__(self): + return "<{self} @ {self_id}>".format(self=self, self_id=id(self)) + + +def parse_tag(tag): + tags = set() + interpreters, abis, platforms = tag.split("-") + for interpreter in interpreters.split("."): + for abi in abis.split("."): + for platform_ in platforms.split("."): + tags.add(Tag(interpreter, abi, platform_)) + return frozenset(tags) + + +def _normalize_string(string): + return string.replace(".", "_").replace("-", "_") + + +def _cpython_interpreter(py_version): + # TODO: Is using py_version_nodot for interpreter version critical? + return "cp{major}{minor}".format(major=py_version[0], minor=py_version[1]) + + +def _cpython_abis(py_version): + abis = [] + version = "{}{}".format(*py_version[:2]) + debug = pymalloc = ucs4 = "" + with_debug = sysconfig.get_config_var("Py_DEBUG") + has_refcount = hasattr(sys, "gettotalrefcount") + # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled + # extension modules is the best option. + # https://github.com/pypa/pip/issues/3383#issuecomment-173267692 + has_ext = "_d.pyd" in EXTENSION_SUFFIXES + if with_debug or (with_debug is None and (has_refcount or has_ext)): + debug = "d" + if py_version < (3, 8): + with_pymalloc = sysconfig.get_config_var("WITH_PYMALLOC") + if with_pymalloc or with_pymalloc is None: + pymalloc = "m" + if py_version < (3, 3): + unicode_size = sysconfig.get_config_var("Py_UNICODE_SIZE") + if unicode_size == 4 or ( + unicode_size is None and sys.maxunicode == 0x10FFFF + ): + ucs4 = "u" + elif debug: + # Debug builds can also load "normal" extension modules. + # We can also assume no UCS-4 or pymalloc requirement. + abis.append("cp{version}".format(version=version)) + abis.insert( + 0, + "cp{version}{debug}{pymalloc}{ucs4}".format( + version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4 + ), + ) + return abis + + +def _cpython_tags(py_version, interpreter, abis, platforms): + for abi in abis: + for platform_ in platforms: + yield Tag(interpreter, abi, platform_) + for tag in (Tag(interpreter, "abi3", platform_) for platform_ in platforms): + yield tag + for tag in (Tag(interpreter, "none", platform_) for platform_ in platforms): + yield tag + # PEP 384 was first implemented in Python 3.2. + for minor_version in range(py_version[1] - 1, 1, -1): + for platform_ in platforms: + interpreter = "cp{major}{minor}".format( + major=py_version[0], minor=minor_version + ) + yield Tag(interpreter, "abi3", platform_) + + +def _pypy_interpreter(): + return "pp{py_major}{pypy_major}{pypy_minor}".format( + py_major=sys.version_info[0], + pypy_major=sys.pypy_version_info.major, + pypy_minor=sys.pypy_version_info.minor, + ) + + +def _generic_abi(): + abi = sysconfig.get_config_var("SOABI") + if abi: + return _normalize_string(abi) + else: + return "none" + + +def _pypy_tags(py_version, interpreter, abi, platforms): + for tag in (Tag(interpreter, abi, platform) for platform in platforms): + yield tag + for tag in (Tag(interpreter, "none", platform) for platform in platforms): + yield tag + + +def _generic_tags(interpreter, py_version, abi, platforms): + for tag in (Tag(interpreter, abi, platform) for platform in platforms): + yield tag + if abi != "none": + tags = (Tag(interpreter, "none", platform_) for platform_ in platforms) + for tag in tags: + yield tag + + +def _py_interpreter_range(py_version): + """ + Yield Python versions in descending order. + + After the latest version, the major-only version will be yielded, and then + all following versions up to 'end'. + """ + yield "py{major}{minor}".format(major=py_version[0], minor=py_version[1]) + yield "py{major}".format(major=py_version[0]) + for minor in range(py_version[1] - 1, -1, -1): + yield "py{major}{minor}".format(major=py_version[0], minor=minor) + + +def _independent_tags(interpreter, py_version, platforms): + """ + Return the sequence of tags that are consistent across implementations. + + The tags consist of: + - py*-none-<platform> + - <interpreter>-none-any + - py*-none-any + """ + for version in _py_interpreter_range(py_version): + for platform_ in platforms: + yield Tag(version, "none", platform_) + yield Tag(interpreter, "none", "any") + for version in _py_interpreter_range(py_version): + yield Tag(version, "none", "any") + + +def _mac_arch(arch, is_32bit=_32_BIT_INTERPRETER): + if not is_32bit: + return arch + + if arch.startswith("ppc"): + return "ppc" + + return "i386" + + +def _mac_binary_formats(version, cpu_arch): + formats = [cpu_arch] + if cpu_arch == "x86_64": + if version < (10, 4): + return [] + formats.extend(["intel", "fat64", "fat32"]) + + elif cpu_arch == "i386": + if version < (10, 4): + return [] + formats.extend(["intel", "fat32", "fat"]) + + elif cpu_arch == "ppc64": + # TODO: Need to care about 32-bit PPC for ppc64 through 10.2? + if version > (10, 5) or version < (10, 4): + return [] + formats.append("fat64") + + elif cpu_arch == "ppc": + if version > (10, 6): + return [] + formats.extend(["fat32", "fat"]) + + formats.append("universal") + return formats + + +def _mac_platforms(version=None, arch=None): + version_str, _, cpu_arch = platform.mac_ver() + if version is None: + version = tuple(map(int, version_str.split(".")[:2])) + if arch is None: + arch = _mac_arch(cpu_arch) + platforms = [] + for minor_version in range(version[1], -1, -1): + compat_version = version[0], minor_version + binary_formats = _mac_binary_formats(compat_version, arch) + for binary_format in binary_formats: + platforms.append( + "macosx_{major}_{minor}_{binary_format}".format( + major=compat_version[0], + minor=compat_version[1], + binary_format=binary_format, + ) + ) + return platforms + + +# From PEP 513. +def _is_manylinux_compatible(name, glibc_version): + # Check for presence of _manylinux module. + try: + import _manylinux + + return bool(getattr(_manylinux, name + "_compatible")) + except (ImportError, AttributeError): + # Fall through to heuristic check below. + pass + + return _have_compatible_glibc(*glibc_version) + + +def _glibc_version_string(): + # Returns glibc version string, or None if not using glibc. + import ctypes + + # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen + # manpage says, "If filename is NULL, then the returned handle is for the + # main program". This way we can let the linker do the work to figure out + # which libc our process is actually using. + process_namespace = ctypes.CDLL(None) + try: + gnu_get_libc_version = process_namespace.gnu_get_libc_version + except AttributeError: + # Symbol doesn't exist -> therefore, we are not linked to + # glibc. + return None + + # Call gnu_get_libc_version, which returns a string like "2.5" + gnu_get_libc_version.restype = ctypes.c_char_p + version_str = gnu_get_libc_version() + # py2 / py3 compatibility: + if not isinstance(version_str, str): + version_str = version_str.decode("ascii") + + return version_str + + +# Separated out from have_compatible_glibc for easier unit testing. +def _check_glibc_version(version_str, required_major, minimum_minor): + # Parse string and check against requested version. + # + # We use a regexp instead of str.split because we want to discard any + # random junk that might come after the minor version -- this might happen + # in patched/forked versions of glibc (e.g. Linaro's version of glibc + # uses version strings like "2.20-2014.11"). See gh-3588. + m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str) + if not m: + warnings.warn( + "Expected glibc version with 2 components major.minor," + " got: %s" % version_str, + RuntimeWarning, + ) + return False + return ( + int(m.group("major")) == required_major + and int(m.group("minor")) >= minimum_minor + ) + + +def _have_compatible_glibc(required_major, minimum_minor): + version_str = _glibc_version_string() + if version_str is None: + return False + return _check_glibc_version(version_str, required_major, minimum_minor) + + +def _linux_platforms(is_32bit=_32_BIT_INTERPRETER): + linux = _normalize_string(distutils.util.get_platform()) + if linux == "linux_x86_64" and is_32bit: + linux = "linux_i686" + manylinux_support = ( + ("manylinux2014", (2, 17)), # CentOS 7 w/ glibc 2.17 (PEP 599) + ("manylinux2010", (2, 12)), # CentOS 6 w/ glibc 2.12 (PEP 571) + ("manylinux1", (2, 5)), # CentOS 5 w/ glibc 2.5 (PEP 513) + ) + manylinux_support_iter = iter(manylinux_support) + for name, glibc_version in manylinux_support_iter: + if _is_manylinux_compatible(name, glibc_version): + platforms = [linux.replace("linux", name)] + break + else: + platforms = [] + # Support for a later manylinux implies support for an earlier version. + platforms += [linux.replace("linux", name) for name, _ in manylinux_support_iter] + platforms.append(linux) + return platforms + + +def _generic_platforms(): + platform = _normalize_string(distutils.util.get_platform()) + return [platform] + + +def _interpreter_name(): + name = platform.python_implementation().lower() + return INTERPRETER_SHORT_NAMES.get(name) or name + + +def _generic_interpreter(name, py_version): + version = sysconfig.get_config_var("py_version_nodot") + if not version: + version = "".join(map(str, py_version[:2])) + return "{name}{version}".format(name=name, version=version) + + +def sys_tags(): + """ + Returns the sequence of tag triples for the running interpreter. + + The order of the sequence corresponds to priority order for the + interpreter, from most to least important. + """ + py_version = sys.version_info[:2] + interpreter_name = _interpreter_name() + if platform.system() == "Darwin": + platforms = _mac_platforms() + elif platform.system() == "Linux": + platforms = _linux_platforms() + else: + platforms = _generic_platforms() + + if interpreter_name == "cp": + interpreter = _cpython_interpreter(py_version) + abis = _cpython_abis(py_version) + for tag in _cpython_tags(py_version, interpreter, abis, platforms): + yield tag + elif interpreter_name == "pp": + interpreter = _pypy_interpreter() + abi = _generic_abi() + for tag in _pypy_tags(py_version, interpreter, abi, platforms): + yield tag + else: + interpreter = _generic_interpreter(interpreter_name, py_version) + abi = _generic_abi() + for tag in _generic_tags(interpreter, py_version, abi, platforms): + yield tag + for tag in _independent_tags(interpreter, py_version, platforms): + yield tag diff --git a/pipenv/patched/notpip/_vendor/pep517/__init__.py b/pipenv/patched/notpip/_vendor/pep517/__init__.py index 9c1a098f78..38d8e63ca1 100644 --- a/pipenv/patched/notpip/_vendor/pep517/__init__.py +++ b/pipenv/patched/notpip/_vendor/pep517/__init__.py @@ -1,4 +1,4 @@ """Wrappers to build Python packages using PEP 517 hooks """ -__version__ = '0.5.0' +__version__ = '0.7.0' diff --git a/pipenv/patched/notpip/_vendor/pep517/_in_process.py b/pipenv/patched/notpip/_vendor/pep517/_in_process.py index d6524b660a..1589a6cac5 100644 --- a/pipenv/patched/notpip/_vendor/pep517/_in_process.py +++ b/pipenv/patched/notpip/_vendor/pep517/_in_process.py @@ -2,7 +2,9 @@ It expects: - Command line args: hook_name, control_dir -- Environment variable: PEP517_BUILD_BACKEND=entry.point:spec +- Environment variables: + PEP517_BUILD_BACKEND=entry.point:spec + PEP517_BACKEND_PATH=paths (separated with os.pathsep) - control_dir/input.json: - {"kwargs": {...}} @@ -13,10 +15,12 @@ from glob import glob from importlib import import_module import os +import os.path from os.path import join as pjoin import re import shutil import sys +import traceback # This is run as a script, not a module, so it can't do a relative import import compat @@ -24,16 +28,49 @@ class BackendUnavailable(Exception): """Raised if we cannot import the backend""" + def __init__(self, traceback): + self.traceback = traceback + + +class BackendInvalid(Exception): + """Raised if the backend is invalid""" + def __init__(self, message): + self.message = message + + +class HookMissing(Exception): + """Raised if a hook is missing and we are not executing the fallback""" + + +def contained_in(filename, directory): + """Test if a file is located within the given directory.""" + filename = os.path.normcase(os.path.abspath(filename)) + directory = os.path.normcase(os.path.abspath(directory)) + return os.path.commonprefix([filename, directory]) == directory def _build_backend(): """Find and load the build backend""" + # Add in-tree backend directories to the front of sys.path. + backend_path = os.environ.get('PEP517_BACKEND_PATH') + if backend_path: + extra_pathitems = backend_path.split(os.pathsep) + sys.path[:0] = extra_pathitems + ep = os.environ['PEP517_BUILD_BACKEND'] mod_path, _, obj_path = ep.partition(':') try: obj = import_module(mod_path) except ImportError: - raise BackendUnavailable + raise BackendUnavailable(traceback.format_exc()) + + if backend_path: + if not any( + contained_in(obj.__file__, path) + for path in extra_pathitems + ): + raise BackendInvalid("Backend was not loaded from backend-path") + if obj_path: for path_part in obj_path.split('.'): obj = getattr(obj, path_part) @@ -54,15 +91,19 @@ def get_requires_for_build_wheel(config_settings): return hook(config_settings) -def prepare_metadata_for_build_wheel(metadata_directory, config_settings): +def prepare_metadata_for_build_wheel( + metadata_directory, config_settings, _allow_fallback): """Invoke optional prepare_metadata_for_build_wheel - Implements a fallback by building a wheel if the hook isn't defined. + Implements a fallback by building a wheel if the hook isn't defined, + unless _allow_fallback is False in which case HookMissing is raised. """ backend = _build_backend() try: hook = backend.prepare_metadata_for_build_wheel except AttributeError: + if not _allow_fallback: + raise HookMissing() return _get_wheel_metadata_from_wheel(backend, metadata_directory, config_settings) else: @@ -161,6 +202,8 @@ class _DummyException(Exception): class GotUnsupportedOperation(Exception): """For internal use when backend raises UnsupportedOperation""" + def __init__(self, traceback): + self.traceback = traceback def build_sdist(sdist_directory, config_settings): @@ -169,7 +212,7 @@ def build_sdist(sdist_directory, config_settings): try: return backend.build_sdist(sdist_directory, config_settings) except getattr(backend, 'UnsupportedOperation', _DummyException): - raise GotUnsupportedOperation + raise GotUnsupportedOperation(traceback.format_exc()) HOOK_NAMES = { @@ -195,10 +238,17 @@ def main(): json_out = {'unsupported': False, 'return_val': None} try: json_out['return_val'] = hook(**hook_input['kwargs']) - except BackendUnavailable: + except BackendUnavailable as e: json_out['no_backend'] = True - except GotUnsupportedOperation: + json_out['traceback'] = e.traceback + except BackendInvalid as e: + json_out['backend_invalid'] = True + json_out['backend_error'] = e.message + except GotUnsupportedOperation as e: json_out['unsupported'] = True + json_out['traceback'] = e.traceback + except HookMissing: + json_out['hook_missing'] = True compat.write_json(json_out, pjoin(control_dir, 'output.json'), indent=2) diff --git a/pipenv/patched/notpip/_vendor/pep517/build.py b/pipenv/patched/notpip/_vendor/pep517/build.py index db9a0799d3..7618c78c19 100644 --- a/pipenv/patched/notpip/_vendor/pep517/build.py +++ b/pipenv/patched/notpip/_vendor/pep517/build.py @@ -3,25 +3,56 @@ import argparse import logging import os -import contextlib -from pipenv.patched.notpip._vendor import pytoml +import toml import shutil -import errno -import tempfile from .envbuild import BuildEnvironment from .wrappers import Pep517HookCaller +from .dirtools import tempdir, mkdir_p +from .compat import FileNotFoundError log = logging.getLogger(__name__) -@contextlib.contextmanager -def tempdir(): - td = tempfile.mkdtemp() +def validate_system(system): + """ + Ensure build system has the requisite fields. + """ + required = {'requires', 'build-backend'} + if not (required <= set(system)): + message = "Missing required fields: {missing}".format( + missing=required-set(system), + ) + raise ValueError(message) + + +def load_system(source_dir): + """ + Load the build system from a source dir (pyproject.toml). + """ + pyproject = os.path.join(source_dir, 'pyproject.toml') + with open(pyproject) as f: + pyproject_data = toml.load(f) + return pyproject_data['build-system'] + + +def compat_system(source_dir): + """ + Given a source dir, attempt to get a build system backend + and requirements from pyproject.toml. Fallback to + setuptools but only if the file was not found or a build + system was not indicated. + """ try: - yield td - finally: - shutil.rmtree(td) + system = load_system(source_dir) + except (FileNotFoundError, KeyError): + system = {} + system.setdefault( + 'build-backend', + 'setuptools.build_meta:__legacy__', + ) + system.setdefault('requires', ['setuptools', 'wheel']) + return system def _do_build(hooks, env, dist, dest): @@ -42,33 +73,18 @@ def _do_build(hooks, env, dist, dest): shutil.move(source, os.path.join(dest, os.path.basename(filename))) -def mkdir_p(*args, **kwargs): - """Like `mkdir`, but does not raise an exception if the - directory already exists. - """ - try: - return os.mkdir(*args, **kwargs) - except OSError as exc: - if exc.errno != errno.EEXIST: - raise - - -def build(source_dir, dist, dest=None): - pyproject = os.path.join(source_dir, 'pyproject.toml') +def build(source_dir, dist, dest=None, system=None): + system = system or load_system(source_dir) dest = os.path.join(source_dir, dest or 'dist') mkdir_p(dest) - with open(pyproject) as f: - pyproject_data = pytoml.load(f) - # Ensure the mandatory data can be loaded - buildsys = pyproject_data['build-system'] - requires = buildsys['requires'] - backend = buildsys['build-backend'] - - hooks = Pep517HookCaller(source_dir, backend) + validate_system(system) + hooks = Pep517HookCaller( + source_dir, system['build-backend'], system.get('backend-path') + ) with BuildEnvironment() as env: - env.pip_install(requires) + env.pip_install(system['requires']) _do_build(hooks, env, dist, dest) diff --git a/pipenv/patched/notpip/_vendor/pep517/check.py b/pipenv/patched/notpip/_vendor/pep517/check.py index 9d28ba4496..9e0c068209 100644 --- a/pipenv/patched/notpip/_vendor/pep517/check.py +++ b/pipenv/patched/notpip/_vendor/pep517/check.py @@ -4,7 +4,7 @@ import logging import os from os.path import isfile, join as pjoin -from pipenv.patched.notpip._vendor.pytoml import TomlError, load as toml_load +from toml import TomlDecodeError, load as toml_load import shutil from subprocess import CalledProcessError import sys @@ -147,12 +147,13 @@ def check(source_dir): buildsys = pyproject_data['build-system'] requires = buildsys['requires'] backend = buildsys['build-backend'] + backend_path = buildsys.get('backend-path') log.info('Loaded pyproject.toml') - except (TomlError, KeyError): + except (TomlDecodeError, KeyError): log.error("Invalid pyproject.toml", exc_info=True) return False - hooks = Pep517HookCaller(source_dir, backend) + hooks = Pep517HookCaller(source_dir, backend, backend_path) sdist_ok = check_build_sdist(hooks, requires) wheel_ok = check_build_wheel(hooks, requires) diff --git a/pipenv/patched/notpip/_vendor/pep517/compat.py b/pipenv/patched/notpip/_vendor/pep517/compat.py index 01c66fc7e4..8432acb732 100644 --- a/pipenv/patched/notpip/_vendor/pep517/compat.py +++ b/pipenv/patched/notpip/_vendor/pep517/compat.py @@ -1,7 +1,10 @@ -"""Handle reading and writing JSON in UTF-8, on Python 3 and 2.""" +"""Python 2/3 compatibility""" import json import sys + +# Handle reading and writing JSON in UTF-8, on Python 3 and 2. + if sys.version_info[0] >= 3: # Python 3 def write_json(obj, path, **kwargs): @@ -21,3 +24,11 @@ def write_json(obj, path, **kwargs): def read_json(path): with open(path, 'rb') as f: return json.load(f) + + +# FileNotFoundError + +try: + FileNotFoundError = FileNotFoundError +except NameError: + FileNotFoundError = IOError diff --git a/pipenv/patched/notpip/_vendor/pep517/dirtools.py b/pipenv/patched/notpip/_vendor/pep517/dirtools.py new file mode 100644 index 0000000000..58c6ca0c56 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/pep517/dirtools.py @@ -0,0 +1,44 @@ +import os +import io +import contextlib +import tempfile +import shutil +import errno +import zipfile + + +@contextlib.contextmanager +def tempdir(): + """Create a temporary directory in a context manager.""" + td = tempfile.mkdtemp() + try: + yield td + finally: + shutil.rmtree(td) + + +def mkdir_p(*args, **kwargs): + """Like `mkdir`, but does not raise an exception if the + directory already exists. + """ + try: + return os.mkdir(*args, **kwargs) + except OSError as exc: + if exc.errno != errno.EEXIST: + raise + + +def dir_to_zipfile(root): + """Construct an in-memory zip file for a directory.""" + buffer = io.BytesIO() + zip_file = zipfile.ZipFile(buffer, 'w') + for root, dirs, files in os.walk(root): + for path in dirs: + fs_path = os.path.join(root, path) + rel_path = os.path.relpath(fs_path, root) + zip_file.writestr(rel_path + '/', '') + for path in files: + fs_path = os.path.join(root, path) + rel_path = os.path.relpath(fs_path, root) + zip_file.write(fs_path, rel_path) + return zip_file diff --git a/pipenv/patched/notpip/_vendor/pep517/envbuild.py b/pipenv/patched/notpip/_vendor/pep517/envbuild.py index 8a5ad4d729..cacd2b12c0 100644 --- a/pipenv/patched/notpip/_vendor/pep517/envbuild.py +++ b/pipenv/patched/notpip/_vendor/pep517/envbuild.py @@ -3,23 +3,27 @@ import os import logging -from pipenv.patched.notpip._vendor import pytoml +import toml import shutil from subprocess import check_call import sys from sysconfig import get_paths from tempfile import mkdtemp -from .wrappers import Pep517HookCaller +from .wrappers import Pep517HookCaller, LoggerWrapper log = logging.getLogger(__name__) def _load_pyproject(source_dir): with open(os.path.join(source_dir, 'pyproject.toml')) as f: - pyproject_data = pytoml.load(f) + pyproject_data = toml.load(f) buildsys = pyproject_data['build-system'] - return buildsys['requires'], buildsys['build-backend'] + return ( + buildsys['requires'], + buildsys['build-backend'], + buildsys.get('backend-path'), + ) class BuildEnvironment(object): @@ -90,9 +94,14 @@ def pip_install(self, reqs): if not reqs: return log.info('Calling pip to install %s', reqs) - check_call([ + cmd = [ sys.executable, '-m', 'pip', 'install', '--ignore-installed', - '--prefix', self.path] + list(reqs)) + '--prefix', self.path] + list(reqs) + check_call( + cmd, + stdout=LoggerWrapper(log, logging.INFO), + stderr=LoggerWrapper(log, logging.ERROR), + ) def __exit__(self, exc_type, exc_val, exc_tb): needs_cleanup = ( @@ -126,8 +135,8 @@ def build_wheel(source_dir, wheel_dir, config_settings=None): """ if config_settings is None: config_settings = {} - requires, backend = _load_pyproject(source_dir) - hooks = Pep517HookCaller(source_dir, backend) + requires, backend, backend_path = _load_pyproject(source_dir) + hooks = Pep517HookCaller(source_dir, backend, backend_path) with BuildEnvironment() as env: env.pip_install(requires) @@ -148,8 +157,8 @@ def build_sdist(source_dir, sdist_dir, config_settings=None): """ if config_settings is None: config_settings = {} - requires, backend = _load_pyproject(source_dir) - hooks = Pep517HookCaller(source_dir, backend) + requires, backend, backend_path = _load_pyproject(source_dir) + hooks = Pep517HookCaller(source_dir, backend, backend_path) with BuildEnvironment() as env: env.pip_install(requires) diff --git a/pipenv/patched/notpip/_vendor/pep517/meta.py b/pipenv/patched/notpip/_vendor/pep517/meta.py new file mode 100644 index 0000000000..d525de5c6c --- /dev/null +++ b/pipenv/patched/notpip/_vendor/pep517/meta.py @@ -0,0 +1,92 @@ +"""Build metadata for a project using PEP 517 hooks. +""" +import argparse +import logging +import os +import shutil +import functools + +try: + import importlib.metadata as imp_meta +except ImportError: + import importlib_metadata as imp_meta + +try: + from zipfile import Path +except ImportError: + from zipp import Path + +from .envbuild import BuildEnvironment +from .wrappers import Pep517HookCaller, quiet_subprocess_runner +from .dirtools import tempdir, mkdir_p, dir_to_zipfile +from .build import validate_system, load_system, compat_system + +log = logging.getLogger(__name__) + + +def _prep_meta(hooks, env, dest): + reqs = hooks.get_requires_for_build_wheel({}) + log.info('Got build requires: %s', reqs) + + env.pip_install(reqs) + log.info('Installed dynamic build dependencies') + + with tempdir() as td: + log.info('Trying to build metadata in %s', td) + filename = hooks.prepare_metadata_for_build_wheel(td, {}) + source = os.path.join(td, filename) + shutil.move(source, os.path.join(dest, os.path.basename(filename))) + + +def build(source_dir='.', dest=None, system=None): + system = system or load_system(source_dir) + dest = os.path.join(source_dir, dest or 'dist') + mkdir_p(dest) + validate_system(system) + hooks = Pep517HookCaller( + source_dir, system['build-backend'], system.get('backend-path') + ) + + with hooks.subprocess_runner(quiet_subprocess_runner): + with BuildEnvironment() as env: + env.pip_install(system['requires']) + _prep_meta(hooks, env, dest) + + +def build_as_zip(builder=build): + with tempdir() as out_dir: + builder(dest=out_dir) + return dir_to_zipfile(out_dir) + + +def load(root): + """ + Given a source directory (root) of a package, + return an importlib.metadata.Distribution object + with metadata build from that package. + """ + root = os.path.expanduser(root) + system = compat_system(root) + builder = functools.partial(build, source_dir=root, system=system) + path = Path(build_as_zip(builder)) + return imp_meta.PathDistribution(path) + + +parser = argparse.ArgumentParser() +parser.add_argument( + 'source_dir', + help="A directory containing pyproject.toml", +) +parser.add_argument( + '--out-dir', '-o', + help="Destination in which to save the builds relative to source dir", +) + + +def main(): + args = parser.parse_args() + build(args.source_dir, args.out_dir) + + +if __name__ == '__main__': + main() diff --git a/pipenv/patched/notpip/_vendor/pep517/wrappers.py b/pipenv/patched/notpip/_vendor/pep517/wrappers.py index b14b899150..ad9a4f8c32 100644 --- a/pipenv/patched/notpip/_vendor/pep517/wrappers.py +++ b/pipenv/patched/notpip/_vendor/pep517/wrappers.py @@ -1,8 +1,9 @@ +import threading from contextlib import contextmanager import os from os.path import dirname, abspath, join as pjoin import shutil -from subprocess import check_call +from subprocess import check_call, check_output, STDOUT import sys from tempfile import mkdtemp @@ -22,10 +23,29 @@ def tempdir(): class BackendUnavailable(Exception): """Will be raised if the backend cannot be imported in the hook process.""" + def __init__(self, traceback): + self.traceback = traceback + + +class BackendInvalid(Exception): + """Will be raised if the backend is invalid.""" + def __init__(self, backend_name, backend_path, message): + self.backend_name = backend_name + self.backend_path = backend_path + self.message = message + + +class HookMissing(Exception): + """Will be raised on missing hooks.""" + def __init__(self, hook_name): + super(HookMissing, self).__init__(hook_name) + self.hook_name = hook_name class UnsupportedOperation(Exception): """May be raised by build_sdist if the backend indicates that it can't.""" + def __init__(self, traceback): + self.traceback = traceback def default_subprocess_runner(cmd, cwd=None, extra_environ=None): @@ -37,21 +57,82 @@ def default_subprocess_runner(cmd, cwd=None, extra_environ=None): check_call(cmd, cwd=cwd, env=env) +def quiet_subprocess_runner(cmd, cwd=None, extra_environ=None): + """A method of calling the wrapper subprocess while suppressing output.""" + env = os.environ.copy() + if extra_environ: + env.update(extra_environ) + + check_output(cmd, cwd=cwd, env=env, stderr=STDOUT) + + +def norm_and_check(source_tree, requested): + """Normalise and check a backend path. + + Ensure that the requested backend path is specified as a relative path, + and resolves to a location under the given source tree. + + Return an absolute version of the requested path. + """ + if os.path.isabs(requested): + raise ValueError("paths must be relative") + + abs_source = os.path.abspath(source_tree) + abs_requested = os.path.normpath(os.path.join(abs_source, requested)) + # We have to use commonprefix for Python 2.7 compatibility. So we + # normalise case to avoid problems because commonprefix is a character + # based comparison :-( + norm_source = os.path.normcase(abs_source) + norm_requested = os.path.normcase(abs_requested) + if os.path.commonprefix([norm_source, norm_requested]) != norm_source: + raise ValueError("paths must be inside source tree") + + return abs_requested + + class Pep517HookCaller(object): """A wrapper around a source directory to be built with a PEP 517 backend. source_dir : The path to the source directory, containing pyproject.toml. - backend : The build backend spec, as per PEP 517, from pyproject.toml. + build_backend : The build backend spec, as per PEP 517, from + pyproject.toml. + backend_path : The backend path, as per PEP 517, from pyproject.toml. + runner : A callable that invokes the wrapper subprocess. + + The 'runner', if provided, must expect the following: + cmd : a list of strings representing the command and arguments to + execute, as would be passed to e.g. 'subprocess.check_call'. + cwd : a string representing the working directory that must be + used for the subprocess. Corresponds to the provided source_dir. + extra_environ : a dict mapping environment variable names to values + which must be set for the subprocess execution. """ - def __init__(self, source_dir, build_backend): + def __init__( + self, + source_dir, + build_backend, + backend_path=None, + runner=None, + ): + if runner is None: + runner = default_subprocess_runner + self.source_dir = abspath(source_dir) self.build_backend = build_backend - self._subprocess_runner = default_subprocess_runner + if backend_path: + backend_path = [ + norm_and_check(self.source_dir, p) for p in backend_path + ] + self.backend_path = backend_path + self._subprocess_runner = runner # TODO: Is this over-engineered? Maybe frontends only need to # set this when creating the wrapper, not on every call. @contextmanager def subprocess_runner(self, runner): + """A context manager for temporarily overriding the default subprocess + runner. + """ prev = self._subprocess_runner self._subprocess_runner = runner yield @@ -72,18 +153,21 @@ def get_requires_for_build_wheel(self, config_settings=None): }) def prepare_metadata_for_build_wheel( - self, metadata_directory, config_settings=None): + self, metadata_directory, config_settings=None, + _allow_fallback=True): """Prepare a *.dist-info folder with metadata for this project. Returns the name of the newly created folder. If the build backend defines a hook with this name, it will be called in a subprocess. If not, the backend will be asked to build a wheel, - and the dist-info extracted from that. + and the dist-info extracted from that (unless _allow_fallback is + False). """ return self._call_hook('prepare_metadata_for_build_wheel', { 'metadata_directory': abspath(metadata_directory), 'config_settings': config_settings, + '_allow_fallback': _allow_fallback, }) def build_wheel( @@ -139,25 +223,76 @@ def _call_hook(self, hook_name, kwargs): # letters, digits and _, . and : characters, and will be used as a # Python identifier, so non-ASCII content is wrong on Python 2 in # any case). + # For backend_path, we use sys.getfilesystemencoding. if sys.version_info[0] == 2: build_backend = self.build_backend.encode('ASCII') else: build_backend = self.build_backend + extra_environ = {'PEP517_BUILD_BACKEND': build_backend} + + if self.backend_path: + backend_path = os.pathsep.join(self.backend_path) + if sys.version_info[0] == 2: + backend_path = backend_path.encode(sys.getfilesystemencoding()) + extra_environ['PEP517_BACKEND_PATH'] = backend_path with tempdir() as td: - compat.write_json({'kwargs': kwargs}, pjoin(td, 'input.json'), + hook_input = {'kwargs': kwargs} + compat.write_json(hook_input, pjoin(td, 'input.json'), indent=2) # Run the hook in a subprocess self._subprocess_runner( [sys.executable, _in_proc_script, hook_name, td], cwd=self.source_dir, - extra_environ={'PEP517_BUILD_BACKEND': build_backend} + extra_environ=extra_environ ) data = compat.read_json(pjoin(td, 'output.json')) if data.get('unsupported'): - raise UnsupportedOperation + raise UnsupportedOperation(data.get('traceback', '')) if data.get('no_backend'): - raise BackendUnavailable + raise BackendUnavailable(data.get('traceback', '')) + if data.get('backend_invalid'): + raise BackendInvalid( + backend_name=self.build_backend, + backend_path=self.backend_path, + message=data.get('backend_error', '') + ) + if data.get('hook_missing'): + raise HookMissing(hook_name) return data['return_val'] + + +class LoggerWrapper(threading.Thread): + """ + Read messages from a pipe and redirect them + to a logger (see python's logging module). + """ + + def __init__(self, logger, level): + threading.Thread.__init__(self) + self.daemon = True + + self.logger = logger + self.level = level + + # create the pipe and reader + self.fd_read, self.fd_write = os.pipe() + self.reader = os.fdopen(self.fd_read) + + self.start() + + def fileno(self): + return self.fd_write + + @staticmethod + def remove_newline(msg): + return msg[:-1] if msg.endswith(os.linesep) else msg + + def run(self): + for line in self.reader: + self._write(self.remove_newline(line)) + + def _write(self, message): + self.logger.log(self.level, message) diff --git a/pipenv/patched/notpip/_vendor/pkg_resources/__init__.py b/pipenv/patched/notpip/_vendor/pkg_resources/__init__.py index 0459a7dad5..c13e11c159 100644 --- a/pipenv/patched/notpip/_vendor/pkg_resources/__init__.py +++ b/pipenv/patched/notpip/_vendor/pkg_resources/__init__.py @@ -39,6 +39,8 @@ import textwrap import itertools import inspect +import ntpath +import posixpath from pkgutil import get_importer try: @@ -1401,14 +1403,30 @@ def get_resource_string(self, manager, resource_name): def has_resource(self, resource_name): return self._has(self._fn(self.module_path, resource_name)) + def _get_metadata_path(self, name): + return self._fn(self.egg_info, name) + def has_metadata(self, name): - return self.egg_info and self._has(self._fn(self.egg_info, name)) + if not self.egg_info: + return self.egg_info + + path = self._get_metadata_path(name) + return self._has(path) def get_metadata(self, name): if not self.egg_info: return "" - value = self._get(self._fn(self.egg_info, name)) - return value.decode('utf-8') if six.PY3 else value + path = self._get_metadata_path(name) + value = self._get(path) + if six.PY2: + return value + try: + return value.decode('utf-8') + except UnicodeDecodeError as exc: + # Include the path in the error message to simplify + # troubleshooting, and without changing the exception type. + exc.reason += ' in {} file at path: {}'.format(name, path) + raise def get_metadata_lines(self, name): return yield_lines(self.get_metadata(name)) @@ -1466,10 +1484,86 @@ def _listdir(self, path): ) def _fn(self, base, resource_name): + self._validate_resource_path(resource_name) if resource_name: return os.path.join(base, *resource_name.split('/')) return base + @staticmethod + def _validate_resource_path(path): + """ + Validate the resource paths according to the docs. + https://setuptools.readthedocs.io/en/latest/pkg_resources.html#basic-resource-access + + >>> warned = getfixture('recwarn') + >>> warnings.simplefilter('always') + >>> vrp = NullProvider._validate_resource_path + >>> vrp('foo/bar.txt') + >>> bool(warned) + False + >>> vrp('../foo/bar.txt') + >>> bool(warned) + True + >>> warned.clear() + >>> vrp('/foo/bar.txt') + >>> bool(warned) + True + >>> vrp('foo/../../bar.txt') + >>> bool(warned) + True + >>> warned.clear() + >>> vrp('foo/f../bar.txt') + >>> bool(warned) + False + + Windows path separators are straight-up disallowed. + >>> vrp(r'\\foo/bar.txt') + Traceback (most recent call last): + ... + ValueError: Use of .. or absolute path in a resource path \ +is not allowed. + + >>> vrp(r'C:\\foo/bar.txt') + Traceback (most recent call last): + ... + ValueError: Use of .. or absolute path in a resource path \ +is not allowed. + + Blank values are allowed + + >>> vrp('') + >>> bool(warned) + False + + Non-string values are not. + + >>> vrp(None) + Traceback (most recent call last): + ... + AttributeError: ... + """ + invalid = ( + os.path.pardir in path.split(posixpath.sep) or + posixpath.isabs(path) or + ntpath.isabs(path) + ) + if not invalid: + return + + msg = "Use of .. or absolute path in a resource path is not allowed." + + # Aggressively disallow Windows absolute paths + if ntpath.isabs(path) and not posixpath.isabs(path): + raise ValueError(msg) + + # for compatibility, warn; in future + # raise ValueError(msg) + warnings.warn( + msg[:-1] + " and will raise exceptions in a future release.", + DeprecationWarning, + stacklevel=4, + ) + def _get(self, path): if hasattr(self.loader, 'get_data'): return self.loader.get_data(path) @@ -1790,6 +1884,9 @@ class FileMetadata(EmptyProvider): def __init__(self, path): self.path = path + def _get_metadata_path(self, name): + return self.path + def has_metadata(self, name): return name == 'PKG-INFO' and os.path.isfile(self.path) @@ -1888,7 +1985,7 @@ def find_eggs_in_zip(importer, path_item, only=False): if only: # don't yield nested distros return - for subitem in metadata.resource_listdir('/'): + for subitem in metadata.resource_listdir(''): if _is_egg_path(subitem): subpath = os.path.join(path_item, subitem) dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath) @@ -2583,10 +2680,14 @@ def version(self): try: return self._version except AttributeError: - version = _version_from_file(self._get_metadata(self.PKG_INFO)) + version = self._get_version() if version is None: - tmpl = "Missing 'Version:' header and/or %s file" - raise ValueError(tmpl % self.PKG_INFO, self) + path = self._get_metadata_path_for_display(self.PKG_INFO) + msg = ( + "Missing 'Version:' header and/or {} file at path: {}" + ).format(self.PKG_INFO, path) + raise ValueError(msg, self) + return version @property @@ -2644,11 +2745,34 @@ def requires(self, extras=()): ) return deps + def _get_metadata_path_for_display(self, name): + """ + Return the path to the given metadata file, if available. + """ + try: + # We need to access _get_metadata_path() on the provider object + # directly rather than through this class's __getattr__() + # since _get_metadata_path() is marked private. + path = self._provider._get_metadata_path(name) + + # Handle exceptions e.g. in case the distribution's metadata + # provider doesn't support _get_metadata_path(). + except Exception: + return '[could not detect]' + + return path + def _get_metadata(self, name): if self.has_metadata(name): for line in self.get_metadata_lines(name): yield line + def _get_version(self): + lines = self._get_metadata(self.PKG_INFO) + version = _version_from_file(lines) + + return version + def activate(self, path=None, replace=False): """Ensure distribution is importable on `path` (default=sys.path)""" if path is None: @@ -2867,7 +2991,7 @@ def _reload_version(self): take an extra step and try to get the version number from the metadata file itself instead of the filename. """ - md_version = _version_from_file(self._get_metadata(self.PKG_INFO)) + md_version = self._get_version() if md_version: self._version = md_version return self diff --git a/pipenv/patched/notpip/_vendor/progress/__init__.py b/pipenv/patched/notpip/_vendor/progress/__init__.py index a41f65dc59..e434c257fe 100644 --- a/pipenv/patched/notpip/_vendor/progress/__init__.py +++ b/pipenv/patched/notpip/_vendor/progress/__init__.py @@ -12,31 +12,49 @@ # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -from __future__ import division +from __future__ import division, print_function from collections import deque from datetime import timedelta from math import ceil from sys import stderr -from time import time +try: + from time import monotonic +except ImportError: + from time import time as monotonic -__version__ = '1.4' +__version__ = '1.5' + +HIDE_CURSOR = '\x1b[?25l' +SHOW_CURSOR = '\x1b[?25h' class Infinite(object): file = stderr sma_window = 10 # Simple Moving Average window + check_tty = True + hide_cursor = True - def __init__(self, *args, **kwargs): + def __init__(self, message='', **kwargs): self.index = 0 - self.start_ts = time() + self.start_ts = monotonic() self.avg = 0 + self._avg_update_ts = self.start_ts self._ts = self.start_ts self._xput = deque(maxlen=self.sma_window) for key, val in kwargs.items(): setattr(self, key, val) + self._width = 0 + self.message = message + + if self.file and self.is_tty(): + if self.hide_cursor: + print(HIDE_CURSOR, end='', file=self.file) + print(self.message, end='', file=self.file) + self.file.flush() + def __getitem__(self, key): if key.startswith('_'): return None @@ -44,7 +62,7 @@ def __getitem__(self, key): @property def elapsed(self): - return int(time() - self.start_ts) + return int(monotonic() - self.start_ts) @property def elapsed_td(self): @@ -52,8 +70,14 @@ def elapsed_td(self): def update_avg(self, n, dt): if n > 0: + xput_len = len(self._xput) self._xput.append(dt / n) - self.avg = sum(self._xput) / len(self._xput) + now = monotonic() + # update when we're still filling _xput, then after every second + if (xput_len < self.sma_window or + now - self._avg_update_ts > 1): + self.avg = sum(self._xput) / len(self._xput) + self._avg_update_ts = now def update(self): pass @@ -61,11 +85,34 @@ def update(self): def start(self): pass + def clearln(self): + if self.file and self.is_tty(): + print('\r\x1b[K', end='', file=self.file) + + def write(self, s): + if self.file and self.is_tty(): + line = self.message + s.ljust(self._width) + print('\r' + line, end='', file=self.file) + self._width = max(self._width, len(s)) + self.file.flush() + + def writeln(self, line): + if self.file and self.is_tty(): + self.clearln() + print(line, end='', file=self.file) + self.file.flush() + def finish(self): - pass + if self.file and self.is_tty(): + print(file=self.file) + if self.hide_cursor: + print(SHOW_CURSOR, end='', file=self.file) + + def is_tty(self): + return self.file.isatty() if self.check_tty else True def next(self, n=1): - now = time() + now = monotonic() dt = now - self._ts self.update_avg(n, dt) self._ts = now @@ -73,12 +120,17 @@ def next(self, n=1): self.update() def iter(self, it): - try: + with self: for x in it: yield x self.next() - finally: - self.finish() + + def __enter__(self): + self.start() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.finish() class Progress(Infinite): @@ -119,9 +171,7 @@ def iter(self, it): except TypeError: pass - try: + with self: for x in it: yield x self.next() - finally: - self.finish() diff --git a/pipenv/patched/notpip/_vendor/progress/bar.py b/pipenv/patched/notpip/_vendor/progress/bar.py index 025e61c452..8819efda65 100644 --- a/pipenv/patched/notpip/_vendor/progress/bar.py +++ b/pipenv/patched/notpip/_vendor/progress/bar.py @@ -19,18 +19,15 @@ import sys from . import Progress -from .helpers import WritelnMixin -class Bar(WritelnMixin, Progress): +class Bar(Progress): width = 32 - message = '' suffix = '%(index)d/%(max)d' bar_prefix = ' |' bar_suffix = '| ' empty_fill = ' ' fill = '#' - hide_cursor = True def update(self): filled_length = int(self.width * self.progress) diff --git a/pipenv/patched/notpip/_vendor/progress/counter.py b/pipenv/patched/notpip/_vendor/progress/counter.py index 6b45a1ec60..d955ca4771 100644 --- a/pipenv/patched/notpip/_vendor/progress/counter.py +++ b/pipenv/patched/notpip/_vendor/progress/counter.py @@ -16,27 +16,20 @@ from __future__ import unicode_literals from . import Infinite, Progress -from .helpers import WriteMixin -class Counter(WriteMixin, Infinite): - message = '' - hide_cursor = True - +class Counter(Infinite): def update(self): self.write(str(self.index)) -class Countdown(WriteMixin, Progress): - hide_cursor = True - +class Countdown(Progress): def update(self): self.write(str(self.remaining)) -class Stack(WriteMixin, Progress): +class Stack(Progress): phases = (' ', '▁', '▂', '▃', '▄', '▅', '▆', '▇', '█') - hide_cursor = True def update(self): nphases = len(self.phases) diff --git a/pipenv/patched/notpip/_vendor/progress/helpers.py b/pipenv/patched/notpip/_vendor/progress/helpers.py deleted file mode 100644 index 0cde44ec27..0000000000 --- a/pipenv/patched/notpip/_vendor/progress/helpers.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com> -# -# Permission to use, copy, modify, and distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -from __future__ import print_function - - -HIDE_CURSOR = '\x1b[?25l' -SHOW_CURSOR = '\x1b[?25h' - - -class WriteMixin(object): - hide_cursor = False - - def __init__(self, message=None, **kwargs): - super(WriteMixin, self).__init__(**kwargs) - self._width = 0 - if message: - self.message = message - - if self.file and self.file.isatty(): - if self.hide_cursor: - print(HIDE_CURSOR, end='', file=self.file) - print(self.message, end='', file=self.file) - self.file.flush() - - def write(self, s): - if self.file and self.file.isatty(): - b = '\b' * self._width - c = s.ljust(self._width) - print(b + c, end='', file=self.file) - self._width = max(self._width, len(s)) - self.file.flush() - - def finish(self): - if self.file and self.file.isatty() and self.hide_cursor: - print(SHOW_CURSOR, end='', file=self.file) - - -class WritelnMixin(object): - hide_cursor = False - - def __init__(self, message=None, **kwargs): - super(WritelnMixin, self).__init__(**kwargs) - if message: - self.message = message - - if self.file and self.file.isatty() and self.hide_cursor: - print(HIDE_CURSOR, end='', file=self.file) - - def clearln(self): - if self.file and self.file.isatty(): - print('\r\x1b[K', end='', file=self.file) - - def writeln(self, line): - if self.file and self.file.isatty(): - self.clearln() - print(line, end='', file=self.file) - self.file.flush() - - def finish(self): - if self.file and self.file.isatty(): - print(file=self.file) - if self.hide_cursor: - print(SHOW_CURSOR, end='', file=self.file) - - -from signal import signal, SIGINT -from sys import exit - - -class SigIntMixin(object): - """Registers a signal handler that calls finish on SIGINT""" - - def __init__(self, *args, **kwargs): - super(SigIntMixin, self).__init__(*args, **kwargs) - signal(SIGINT, self._sigint_handler) - - def _sigint_handler(self, signum, frame): - self.finish() - exit(0) diff --git a/pipenv/patched/notpip/_vendor/progress/spinner.py b/pipenv/patched/notpip/_vendor/progress/spinner.py index 464c7b2750..4e100cabb9 100644 --- a/pipenv/patched/notpip/_vendor/progress/spinner.py +++ b/pipenv/patched/notpip/_vendor/progress/spinner.py @@ -16,11 +16,9 @@ from __future__ import unicode_literals from . import Infinite -from .helpers import WriteMixin -class Spinner(WriteMixin, Infinite): - message = '' +class Spinner(Infinite): phases = ('-', '\\', '|', '/') hide_cursor = True @@ -40,5 +38,6 @@ class MoonSpinner(Spinner): class LineSpinner(Spinner): phases = ['⎺', '⎻', '⎼', '⎽', '⎼', '⎻'] + class PixelSpinner(Spinner): - phases = ['⣾','⣷', '⣯', '⣟', '⡿', '⢿', '⣻', '⣽'] + phases = ['⣾', '⣷', '⣯', '⣟', '⡿', '⢿', '⣻', '⣽'] diff --git a/pipenv/patched/notpip/_vendor/pyparsing.py b/pipenv/patched/notpip/_vendor/pyparsing.py index 3972b370ac..1d47c4601b 100644 --- a/pipenv/patched/notpip/_vendor/pyparsing.py +++ b/pipenv/patched/notpip/_vendor/pyparsing.py @@ -1,4 +1,4 @@ -#-*- coding: utf-8 -*- +# -*- coding: utf-8 -*- # module pyparsing.py # # Copyright (c) 2003-2019 Paul T. McGuire @@ -87,14 +87,16 @@ more complex ones - associate names with your parsed results using :class:`ParserElement.setResultsName` + - access the parsed data, which is returned as a :class:`ParseResults` + object - find some helpful expression short-cuts like :class:`delimitedList` and :class:`oneOf` - find more useful common expressions in the :class:`pyparsing_common` namespace class """ -__version__ = "2.3.1" -__versionTime__ = "09 Jan 2019 23:26 UTC" +__version__ = "2.4.2" +__versionTime__ = "29 Jul 2019 02:58 UTC" __author__ = "Paul McGuire <ptmcg@users.sourceforge.net>" import string @@ -109,6 +111,9 @@ import traceback import types from datetime import datetime +from operator import itemgetter +import itertools +from functools import wraps try: # Python 3 @@ -124,11 +129,11 @@ try: # Python 3 from collections.abc import Iterable - from collections.abc import MutableMapping + from collections.abc import MutableMapping, Mapping except ImportError: # Python 2.7 from collections import Iterable - from collections import MutableMapping + from collections import MutableMapping, Mapping try: from collections import OrderedDict as _OrderedDict @@ -143,29 +148,66 @@ except ImportError: class SimpleNamespace: pass - -#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) ) - -__all__ = [ -'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty', -'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal', -'PrecededBy', 'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or', -'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException', -'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException', -'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', -'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore', 'Char', -'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col', -'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString', -'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums', -'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno', -'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral', -'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables', -'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', -'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd', -'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute', -'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass', -'CloseMatch', 'tokenMap', 'pyparsing_common', 'pyparsing_unicode', 'unicode_set', -] +# version compatibility configuration +__compat__ = SimpleNamespace() +__compat__.__doc__ = """ + A cross-version compatibility configuration for pyparsing features that will be + released in a future version. By setting values in this configuration to True, + those features can be enabled in prior versions for compatibility development + and testing. + + - collect_all_And_tokens - flag to enable fix for Issue #63 that fixes erroneous grouping + of results names when an And expression is nested within an Or or MatchFirst; set to + True to enable bugfix released in pyparsing 2.3.0, or False to preserve + pre-2.3.0 handling of named results +""" +__compat__.collect_all_And_tokens = True + +__diag__ = SimpleNamespace() +__diag__.__doc__ = """ +Diagnostic configuration (all default to False) + - warn_multiple_tokens_in_named_alternation - flag to enable warnings when a results + name is defined on a MatchFirst or Or expression with one or more And subexpressions + (only warns if __compat__.collect_all_And_tokens is False) + - warn_ungrouped_named_tokens_in_collection - flag to enable warnings when a results + name is defined on a containing expression with ungrouped subexpressions that also + have results names + - warn_name_set_on_empty_Forward - flag to enable warnings whan a Forward is defined + with a results name, but has no contents defined + - warn_on_multiple_string_args_to_oneof - flag to enable warnings whan oneOf is + incorrectly called with multiple str arguments + - enable_debug_on_named_expressions - flag to auto-enable debug on all subsequent + calls to ParserElement.setName() +""" +__diag__.warn_multiple_tokens_in_named_alternation = False +__diag__.warn_ungrouped_named_tokens_in_collection = False +__diag__.warn_name_set_on_empty_Forward = False +__diag__.warn_on_multiple_string_args_to_oneof = False +__diag__.enable_debug_on_named_expressions = False + +# ~ sys.stderr.write("testing pyparsing module, version %s, %s\n" % (__version__, __versionTime__)) + +__all__ = ['__version__', '__versionTime__', '__author__', '__compat__', '__diag__', + 'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty', + 'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal', + 'PrecededBy', 'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or', + 'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException', + 'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException', + 'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', + 'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore', 'Char', + 'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col', + 'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString', + 'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums', + 'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno', + 'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral', + 'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables', + 'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', + 'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd', + 'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute', + 'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation', 'locatedExpr', 'withClass', + 'CloseMatch', 'tokenMap', 'pyparsing_common', 'pyparsing_unicode', 'unicode_set', + 'conditionAsParseAction', + ] system_version = tuple(sys.version_info)[:3] PY_3 = system_version[0] == 3 @@ -190,7 +232,7 @@ def _ustr(obj): < returns the unicode object | encodes it with the default encoding | ... >. """ - if isinstance(obj,unicode): + if isinstance(obj, unicode): return obj try: @@ -208,9 +250,10 @@ def _ustr(obj): # build list of single arg builtins, tolerant of Python version, that can be used as parse actions singleArgBuiltins = [] import __builtin__ + for fname in "sum len sorted reversed list tuple set any all min max".split(): try: - singleArgBuiltins.append(getattr(__builtin__,fname)) + singleArgBuiltins.append(getattr(__builtin__, fname)) except AttributeError: continue @@ -221,23 +264,36 @@ def _xml_escape(data): # ampersand must be replaced first from_symbols = '&><"\'' - to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split()) - for from_,to_ in zip(from_symbols, to_symbols): + to_symbols = ('&' + s + ';' for s in "amp gt lt quot apos".split()) + for from_, to_ in zip(from_symbols, to_symbols): data = data.replace(from_, to_) return data -alphas = string.ascii_uppercase + string.ascii_lowercase -nums = "0123456789" -hexnums = nums + "ABCDEFabcdef" -alphanums = alphas + nums -_bslash = chr(92) +alphas = string.ascii_uppercase + string.ascii_lowercase +nums = "0123456789" +hexnums = nums + "ABCDEFabcdef" +alphanums = alphas + nums +_bslash = chr(92) printables = "".join(c for c in string.printable if c not in string.whitespace) + +def conditionAsParseAction(fn, message=None, fatal=False): + msg = message if message is not None else "failed user-defined condition" + exc_type = ParseFatalException if fatal else ParseException + fn = _trim_arity(fn) + + @wraps(fn) + def pa(s, l, t): + if not bool(fn(s, l, t)): + raise exc_type(s, l, msg) + + return pa + class ParseBaseException(Exception): """base exception class for all parsing runtime exceptions""" # Performance tuning: we construct a *lot* of these, so keep this # constructor as small and fast as possible - def __init__( self, pstr, loc=0, msg=None, elem=None ): + def __init__(self, pstr, loc=0, msg=None, elem=None): self.loc = loc if msg is None: self.msg = pstr @@ -256,27 +312,34 @@ def _from_exception(cls, pe): """ return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement) - def __getattr__( self, aname ): + def __getattr__(self, aname): """supported attributes by name are: - lineno - returns the line number of the exception text - col - returns the column number of the exception text - line - returns the line containing the exception text """ - if( aname == "lineno" ): - return lineno( self.loc, self.pstr ) - elif( aname in ("col", "column") ): - return col( self.loc, self.pstr ) - elif( aname == "line" ): - return line( self.loc, self.pstr ) + if aname == "lineno": + return lineno(self.loc, self.pstr) + elif aname in ("col", "column"): + return col(self.loc, self.pstr) + elif aname == "line": + return line(self.loc, self.pstr) else: raise AttributeError(aname) - def __str__( self ): - return "%s (at char %d), (line:%d, col:%d)" % \ - ( self.msg, self.loc, self.lineno, self.column ) - def __repr__( self ): + def __str__(self): + if self.pstr: + if self.loc >= len(self.pstr): + foundstr = ', found end of text' + else: + foundstr = (', found %r' % self.pstr[self.loc:self.loc + 1]).replace(r'\\', '\\') + else: + foundstr = '' + return ("%s%s (at char %d), (line:%d, col:%d)" % + (self.msg, foundstr, self.loc, self.lineno, self.column)) + def __repr__(self): return _ustr(self) - def markInputline( self, markerString = ">!<" ): + def markInputline(self, markerString=">!<"): """Extracts the exception line from the input string, and marks the location of the exception with a special symbol. """ @@ -350,7 +413,7 @@ def explain(exc, depth=16): callers = inspect.getinnerframes(exc.__traceback__, context=depth) seen = set() for i, ff in enumerate(callers[-depth:]): - frm = ff.frame + frm = ff[0] f_self = frm.f_locals.get('self', None) if isinstance(f_self, ParserElement): @@ -412,21 +475,21 @@ class RecursiveGrammarException(Exception): """exception thrown by :class:`ParserElement.validate` if the grammar could be improperly recursive """ - def __init__( self, parseElementList ): + def __init__(self, parseElementList): self.parseElementTrace = parseElementList - def __str__( self ): + def __str__(self): return "RecursiveGrammarException: %s" % self.parseElementTrace class _ParseResultsWithOffset(object): - def __init__(self,p1,p2): - self.tup = (p1,p2) - def __getitem__(self,i): + def __init__(self, p1, p2): + self.tup = (p1, p2) + def __getitem__(self, i): return self.tup[i] def __repr__(self): return repr(self.tup[0]) - def setOffset(self,i): - self.tup = (self.tup[0],i) + def setOffset(self, i): + self.tup = (self.tup[0], i) class ParseResults(object): """Structured parse results, to provide multiple means of access to @@ -471,7 +534,7 @@ def test(s, fn=repr): - month: 12 - year: 1999 """ - def __new__(cls, toklist=None, name=None, asList=True, modal=True ): + def __new__(cls, toklist=None, name=None, asList=True, modal=True): if isinstance(toklist, cls): return toklist retobj = object.__new__(cls) @@ -480,7 +543,7 @@ def __new__(cls, toklist=None, name=None, asList=True, modal=True ): # Performance tuning: we construct a *lot* of these, so keep this # constructor as small and fast as possible - def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ): + def __init__(self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance): if self.__doinit: self.__doinit = False self.__name = None @@ -501,85 +564,93 @@ def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance if name is not None and name: if not modal: self.__accumNames[name] = 0 - if isinstance(name,int): - name = _ustr(name) # will always return a str, but use _ustr for consistency + if isinstance(name, int): + name = _ustr(name) # will always return a str, but use _ustr for consistency self.__name = name - if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])): - if isinstance(toklist,basestring): - toklist = [ toklist ] + if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None, '', [])): + if isinstance(toklist, basestring): + toklist = [toklist] if asList: - if isinstance(toklist,ParseResults): + if isinstance(toklist, ParseResults): self[name] = _ParseResultsWithOffset(ParseResults(toklist.__toklist), 0) else: - self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0) + self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]), 0) self[name].__name = name else: try: self[name] = toklist[0] - except (KeyError,TypeError,IndexError): + except (KeyError, TypeError, IndexError): self[name] = toklist - def __getitem__( self, i ): - if isinstance( i, (int,slice) ): + def __getitem__(self, i): + if isinstance(i, (int, slice)): return self.__toklist[i] else: if i not in self.__accumNames: return self.__tokdict[i][-1][0] else: - return ParseResults([ v[0] for v in self.__tokdict[i] ]) + return ParseResults([v[0] for v in self.__tokdict[i]]) - def __setitem__( self, k, v, isinstance=isinstance ): - if isinstance(v,_ParseResultsWithOffset): - self.__tokdict[k] = self.__tokdict.get(k,list()) + [v] + def __setitem__(self, k, v, isinstance=isinstance): + if isinstance(v, _ParseResultsWithOffset): + self.__tokdict[k] = self.__tokdict.get(k, list()) + [v] sub = v[0] - elif isinstance(k,(int,slice)): + elif isinstance(k, (int, slice)): self.__toklist[k] = v sub = v else: - self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)] + self.__tokdict[k] = self.__tokdict.get(k, list()) + [_ParseResultsWithOffset(v, 0)] sub = v - if isinstance(sub,ParseResults): + if isinstance(sub, ParseResults): sub.__parent = wkref(self) - def __delitem__( self, i ): - if isinstance(i,(int,slice)): - mylen = len( self.__toklist ) + def __delitem__(self, i): + if isinstance(i, (int, slice)): + mylen = len(self.__toklist) del self.__toklist[i] # convert int to slice if isinstance(i, int): if i < 0: i += mylen - i = slice(i, i+1) + i = slice(i, i + 1) # get removed indices removed = list(range(*i.indices(mylen))) removed.reverse() # fixup indices in token dictionary - for name,occurrences in self.__tokdict.items(): + for name, occurrences in self.__tokdict.items(): for j in removed: for k, (value, position) in enumerate(occurrences): occurrences[k] = _ParseResultsWithOffset(value, position - (position > j)) else: del self.__tokdict[i] - def __contains__( self, k ): + def __contains__(self, k): return k in self.__tokdict - def __len__( self ): return len( self.__toklist ) - def __bool__(self): return ( not not self.__toklist ) + def __len__(self): + return len(self.__toklist) + + def __bool__(self): + return (not not self.__toklist) __nonzero__ = __bool__ - def __iter__( self ): return iter( self.__toklist ) - def __reversed__( self ): return iter( self.__toklist[::-1] ) - def _iterkeys( self ): + + def __iter__(self): + return iter(self.__toklist) + + def __reversed__(self): + return iter(self.__toklist[::-1]) + + def _iterkeys(self): if hasattr(self.__tokdict, "iterkeys"): return self.__tokdict.iterkeys() else: return iter(self.__tokdict) - def _itervalues( self ): + def _itervalues(self): return (self[k] for k in self._iterkeys()) - def _iteritems( self ): + def _iteritems(self): return ((k, self[k]) for k in self._iterkeys()) if PY_3: @@ -602,24 +673,24 @@ def _iteritems( self ): iteritems = _iteritems """Returns an iterator of all named result key-value tuples (Python 2.x only).""" - def keys( self ): + def keys(self): """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x).""" return list(self.iterkeys()) - def values( self ): + def values(self): """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x).""" return list(self.itervalues()) - def items( self ): + def items(self): """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x).""" return list(self.iteritems()) - def haskeys( self ): + def haskeys(self): """Since keys() returns an iterator, this method is helpful in bypassing code that looks for the existence of any defined results names.""" return bool(self.__tokdict) - def pop( self, *args, **kwargs): + def pop(self, *args, **kwargs): """ Removes and returns item at specified index (default= ``last``). Supports both ``list`` and ``dict`` semantics for ``pop()``. If @@ -658,14 +729,14 @@ def remove_LABEL(tokens): """ if not args: args = [-1] - for k,v in kwargs.items(): + for k, v in kwargs.items(): if k == 'default': args = (args[0], v) else: raise TypeError("pop() got an unexpected keyword argument '%s'" % k) - if (isinstance(args[0], int) or - len(args) == 1 or - args[0] in self): + if (isinstance(args[0], int) + or len(args) == 1 + or args[0] in self): index = args[0] ret = self[index] del self[index] @@ -697,7 +768,7 @@ def get(self, key, defaultValue=None): else: return defaultValue - def insert( self, index, insStr ): + def insert(self, index, insStr): """ Inserts new element at location index in the list of parsed tokens. @@ -714,11 +785,11 @@ def insert_locn(locn, tokens): """ self.__toklist.insert(index, insStr) # fixup indices in token dictionary - for name,occurrences in self.__tokdict.items(): + for name, occurrences in self.__tokdict.items(): for k, (value, position) in enumerate(occurrences): occurrences[k] = _ParseResultsWithOffset(value, position + (position > index)) - def append( self, item ): + def append(self, item): """ Add single element to end of ParseResults list of elements. @@ -733,7 +804,7 @@ def append_sum(tokens): """ self.__toklist.append(item) - def extend( self, itemseq ): + def extend(self, itemseq): """ Add sequence of elements to end of ParseResults list of elements. @@ -748,78 +819,70 @@ def make_palindrome(tokens): print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl' """ if isinstance(itemseq, ParseResults): - self += itemseq + self.__iadd__(itemseq) else: self.__toklist.extend(itemseq) - def clear( self ): + def clear(self): """ Clear all elements and results names. """ del self.__toklist[:] self.__tokdict.clear() - def __getattr__( self, name ): + def __getattr__(self, name): try: return self[name] except KeyError: return "" - if name in self.__tokdict: - if name not in self.__accumNames: - return self.__tokdict[name][-1][0] - else: - return ParseResults([ v[0] for v in self.__tokdict[name] ]) - else: - return "" - - def __add__( self, other ): + def __add__(self, other): ret = self.copy() ret += other return ret - def __iadd__( self, other ): + def __iadd__(self, other): if other.__tokdict: offset = len(self.__toklist) - addoffset = lambda a: offset if a<0 else a+offset + addoffset = lambda a: offset if a < 0 else a + offset otheritems = other.__tokdict.items() - otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) ) - for (k,vlist) in otheritems for v in vlist] - for k,v in otherdictitems: + otherdictitems = [(k, _ParseResultsWithOffset(v[0], addoffset(v[1]))) + for k, vlist in otheritems for v in vlist] + for k, v in otherdictitems: self[k] = v - if isinstance(v[0],ParseResults): + if isinstance(v[0], ParseResults): v[0].__parent = wkref(self) self.__toklist += other.__toklist - self.__accumNames.update( other.__accumNames ) + self.__accumNames.update(other.__accumNames) return self def __radd__(self, other): - if isinstance(other,int) and other == 0: + if isinstance(other, int) and other == 0: # useful for merging many ParseResults using sum() builtin return self.copy() else: # this may raise a TypeError - so be it return other + self - def __repr__( self ): - return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) ) + def __repr__(self): + return "(%s, %s)" % (repr(self.__toklist), repr(self.__tokdict)) - def __str__( self ): + def __str__(self): return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']' - def _asStringList( self, sep='' ): + def _asStringList(self, sep=''): out = [] for item in self.__toklist: if out and sep: out.append(sep) - if isinstance( item, ParseResults ): + if isinstance(item, ParseResults): out += item._asStringList() else: - out.append( _ustr(item) ) + out.append(_ustr(item)) return out - def asList( self ): + def asList(self): """ Returns the parse results as a nested list of matching tokens, all converted to strings. @@ -834,9 +897,9 @@ def asList( self ): result_list = result.asList() print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj'] """ - return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist] + return [res.asList() if isinstance(res, ParseResults) else res for res in self.__toklist] - def asDict( self ): + def asDict(self): """ Returns the named parse results as a nested dictionary. @@ -870,27 +933,27 @@ def toItem(obj): else: return obj - return dict((k,toItem(v)) for k,v in item_fn()) + return dict((k, toItem(v)) for k, v in item_fn()) - def copy( self ): + def copy(self): """ Returns a new copy of a :class:`ParseResults` object. """ - ret = ParseResults( self.__toklist ) + ret = ParseResults(self.__toklist) ret.__tokdict = dict(self.__tokdict.items()) ret.__parent = self.__parent - ret.__accumNames.update( self.__accumNames ) + ret.__accumNames.update(self.__accumNames) ret.__name = self.__name return ret - def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ): + def asXML(self, doctag=None, namedItemsOnly=False, indent="", formatted=True): """ (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names. """ nl = "\n" out = [] - namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items() - for v in vlist) + namedItems = dict((v[1], k) for (k, vlist) in self.__tokdict.items() + for v in vlist) nextLevelIndent = indent + " " # collapse out indents if formatting is not desired @@ -912,20 +975,20 @@ def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ): else: selfTag = "ITEM" - out += [ nl, indent, "<", selfTag, ">" ] + out += [nl, indent, "<", selfTag, ">"] - for i,res in enumerate(self.__toklist): - if isinstance(res,ParseResults): + for i, res in enumerate(self.__toklist): + if isinstance(res, ParseResults): if i in namedItems: - out += [ res.asXML(namedItems[i], - namedItemsOnly and doctag is None, - nextLevelIndent, - formatted)] + out += [res.asXML(namedItems[i], + namedItemsOnly and doctag is None, + nextLevelIndent, + formatted)] else: - out += [ res.asXML(None, - namedItemsOnly and doctag is None, - nextLevelIndent, - formatted)] + out += [res.asXML(None, + namedItemsOnly and doctag is None, + nextLevelIndent, + formatted)] else: # individual token, see if there is a name for it resTag = None @@ -937,16 +1000,16 @@ def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ): else: resTag = "ITEM" xmlBodyText = _xml_escape(_ustr(res)) - out += [ nl, nextLevelIndent, "<", resTag, ">", - xmlBodyText, - "</", resTag, ">" ] + out += [nl, nextLevelIndent, "<", resTag, ">", + xmlBodyText, + "</", resTag, ">"] - out += [ nl, indent, "</", selfTag, ">" ] + out += [nl, indent, "</", selfTag, ">"] return "".join(out) - def __lookup(self,sub): - for k,vlist in self.__tokdict.items(): - for v,loc in vlist: + def __lookup(self, sub): + for k, vlist in self.__tokdict.items(): + for v, loc in vlist: if sub is v: return k return None @@ -984,14 +1047,14 @@ def getName(self): return par.__lookup(self) else: return None - elif (len(self) == 1 and - len(self.__tokdict) == 1 and - next(iter(self.__tokdict.values()))[0][1] in (0,-1)): + elif (len(self) == 1 + and len(self.__tokdict) == 1 + and next(iter(self.__tokdict.values()))[0][1] in (0, -1)): return next(iter(self.__tokdict.keys())) else: return None - def dump(self, indent='', depth=0, full=True): + def dump(self, indent='', full=True, include_list=True, _depth=0): """ Diagnostic method for listing out the contents of a :class:`ParseResults`. Accepts an optional ``indent`` argument so @@ -1014,28 +1077,45 @@ def dump(self, indent='', depth=0, full=True): """ out = [] NL = '\n' - out.append( indent+_ustr(self.asList()) ) + if include_list: + out.append(indent + _ustr(self.asList())) + else: + out.append('') + if full: if self.haskeys(): - items = sorted((str(k), v) for k,v in self.items()) - for k,v in items: + items = sorted((str(k), v) for k, v in self.items()) + for k, v in items: if out: out.append(NL) - out.append( "%s%s- %s: " % (indent,(' '*depth), k) ) - if isinstance(v,ParseResults): + out.append("%s%s- %s: " % (indent, (' ' * _depth), k)) + if isinstance(v, ParseResults): if v: - out.append( v.dump(indent,depth+1) ) + out.append(v.dump(indent=indent, full=full, include_list=include_list, _depth=_depth + 1)) else: out.append(_ustr(v)) else: out.append(repr(v)) - elif any(isinstance(vv,ParseResults) for vv in self): + elif any(isinstance(vv, ParseResults) for vv in self): v = self - for i,vv in enumerate(v): - if isinstance(vv,ParseResults): - out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) )) + for i, vv in enumerate(v): + if isinstance(vv, ParseResults): + out.append("\n%s%s[%d]:\n%s%s%s" % (indent, + (' ' * (_depth)), + i, + indent, + (' ' * (_depth + 1)), + vv.dump(indent=indent, + full=full, + include_list=include_list, + _depth=_depth + 1))) else: - out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv))) + out.append("\n%s%s[%d]:\n%s%s%s" % (indent, + (' ' * (_depth)), + i, + indent, + (' ' * (_depth + 1)), + _ustr(vv))) return "".join(out) @@ -1068,18 +1148,15 @@ def pprint(self, *args, **kwargs): # add support for pickle protocol def __getstate__(self): - return ( self.__toklist, - ( self.__tokdict.copy(), - self.__parent is not None and self.__parent() or None, - self.__accumNames, - self.__name ) ) + return (self.__toklist, + (self.__tokdict.copy(), + self.__parent is not None and self.__parent() or None, + self.__accumNames, + self.__name)) - def __setstate__(self,state): + def __setstate__(self, state): self.__toklist = state[0] - (self.__tokdict, - par, - inAccumNames, - self.__name) = state[1] + self.__tokdict, par, inAccumNames, self.__name = state[1] self.__accumNames = {} self.__accumNames.update(inAccumNames) if par is not None: @@ -1091,11 +1168,39 @@ def __getnewargs__(self): return self.__toklist, self.__name, self.__asList, self.__modal def __dir__(self): - return (dir(type(self)) + list(self.keys())) + return dir(type(self)) + list(self.keys()) + + @classmethod + def from_dict(cls, other, name=None): + """ + Helper classmethod to construct a ParseResults from a dict, preserving the + name-value relations as results names. If an optional 'name' argument is + given, a nested ParseResults will be returned + """ + def is_iterable(obj): + try: + iter(obj) + except Exception: + return False + else: + if PY_3: + return not isinstance(obj, (str, bytes)) + else: + return not isinstance(obj, basestring) + + ret = cls([]) + for k, v in other.items(): + if isinstance(v, Mapping): + ret += cls.from_dict(v, name=k) + else: + ret += cls([v], name=k, asList=is_iterable(v)) + if name is not None: + ret = cls([ret], name=name) + return ret MutableMapping.register(ParseResults) -def col (loc,strg): +def col (loc, strg): """Returns current column within a string, counting newlines as line separators. The first column is number 1. @@ -1107,9 +1212,9 @@ def col (loc,strg): location, and line and column positions within the parsed string. """ s = strg - return 1 if 0<loc<len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc) + return 1 if 0 < loc < len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc) -def lineno(loc,strg): +def lineno(loc, strg): """Returns current line number within a string, counting newlines as line separators. The first line is number 1. @@ -1119,26 +1224,26 @@ def lineno(loc,strg): suggested methods to maintain a consistent view of the parsed string, the parse location, and line and column positions within the parsed string. """ - return strg.count("\n",0,loc) + 1 + return strg.count("\n", 0, loc) + 1 -def line( loc, strg ): +def line(loc, strg): """Returns the line of text containing loc within a string, counting newlines as line separators. """ lastCR = strg.rfind("\n", 0, loc) nextCR = strg.find("\n", loc) if nextCR >= 0: - return strg[lastCR+1:nextCR] + return strg[lastCR + 1:nextCR] else: - return strg[lastCR+1:] + return strg[lastCR + 1:] -def _defaultStartDebugAction( instring, loc, expr ): - print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))) +def _defaultStartDebugAction(instring, loc, expr): + print(("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % (lineno(loc, instring), col(loc, instring)))) -def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ): - print ("Matched " + _ustr(expr) + " -> " + str(toks.asList())) +def _defaultSuccessDebugAction(instring, startloc, endloc, expr, toks): + print("Matched " + _ustr(expr) + " -> " + str(toks.asList())) -def _defaultExceptionDebugAction( instring, loc, expr, exc ): - print ("Exception raised:" + _ustr(exc)) +def _defaultExceptionDebugAction(instring, loc, expr, exc): + print("Exception raised:" + _ustr(exc)) def nullDebugAction(*args): """'Do-nothing' debug action, to suppress debugging output during parsing.""" @@ -1169,16 +1274,16 @@ def nullDebugAction(*args): 'decorator to trim function calls to match the arity of the target' def _trim_arity(func, maxargs=2): if func in singleArgBuiltins: - return lambda s,l,t: func(t) + return lambda s, l, t: func(t) limit = [0] foundArity = [False] # traceback return data structure changed in Py3.5 - normalize back to plain tuples - if system_version[:2] >= (3,5): + if system_version[:2] >= (3, 5): def extract_stack(limit=0): # special handling for Python 3.5.0 - extra deep call stack by 1 - offset = -3 if system_version == (3,5,0) else -2 - frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset] + offset = -3 if system_version == (3, 5, 0) else -2 + frame_summary = traceback.extract_stack(limit=-offset + limit - 1)[offset] return [frame_summary[:2]] def extract_tb(tb, limit=0): frames = traceback.extract_tb(tb, limit=limit) @@ -1195,7 +1300,7 @@ def extract_tb(tb, limit=0): # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!! this_line = extract_stack(limit=2)[-1] - pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF) + pa_call_line_synth = (this_line[0], this_line[1] + LINE_DIFF) def wrapper(*args): while 1: @@ -1213,7 +1318,10 @@ def wrapper(*args): if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth: raise finally: - del tb + try: + del tb + except NameError: + pass if limit[0] <= maxargs: limit[0] += 1 @@ -1231,13 +1339,14 @@ def wrapper(*args): return wrapper + class ParserElement(object): """Abstract base level parser element class.""" DEFAULT_WHITE_CHARS = " \n\t\r" verbose_stacktrace = False @staticmethod - def setDefaultWhitespaceChars( chars ): + def setDefaultWhitespaceChars(chars): r""" Overrides the default whitespace chars @@ -1274,10 +1383,10 @@ def inlineLiteralsUsing(cls): """ ParserElement._literalStringClass = cls - def __init__( self, savelist=False ): + def __init__(self, savelist=False): self.parseAction = list() self.failAction = None - #~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall + # ~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall self.strRepr = None self.resultsName = None self.saveAsList = savelist @@ -1292,12 +1401,12 @@ def __init__( self, savelist=False ): self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index self.errmsg = "" self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all) - self.debugActions = ( None, None, None ) #custom debug actions + self.debugActions = (None, None, None) # custom debug actions self.re = None self.callPreparse = True # used to avoid redundant calls to preParse self.callDuringTry = False - def copy( self ): + def copy(self): """ Make a copy of this :class:`ParserElement`. Useful for defining different parse actions for the same parsing pattern, using copies of @@ -1306,8 +1415,8 @@ def copy( self ): Example:: integer = Word(nums).setParseAction(lambda toks: int(toks[0])) - integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K") - integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") + integerK = integer.copy().addParseAction(lambda toks: toks[0] * 1024) + Suppress("K") + integerM = integer.copy().addParseAction(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M")) @@ -1317,16 +1426,16 @@ def copy( self ): Equivalent form of ``expr.copy()`` is just ``expr()``:: - integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") + integerM = integer().addParseAction(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") """ - cpy = copy.copy( self ) + cpy = copy.copy(self) cpy.parseAction = self.parseAction[:] cpy.ignoreExprs = self.ignoreExprs[:] if self.copyDefaultWhiteChars: cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS return cpy - def setName( self, name ): + def setName(self, name): """ Define name for this expression, makes debugging and exception messages clearer. @@ -1337,11 +1446,11 @@ def setName( self, name ): """ self.name = name self.errmsg = "Expected " + self.name - if hasattr(self,"exception"): - self.exception.msg = self.errmsg + if __diag__.enable_debug_on_named_expressions: + self.setDebug() return self - def setResultsName( self, name, listAllMatches=False ): + def setResultsName(self, name, listAllMatches=False): """ Define name for referencing matching tokens as a nested attribute of the returned parse results. @@ -1362,15 +1471,18 @@ def setResultsName( self, name, listAllMatches=False ): # equivalent form: date_str = integer("year") + '/' + integer("month") + '/' + integer("day") """ + return self._setResultsName(name, listAllMatches) + + def _setResultsName(self, name, listAllMatches=False): newself = self.copy() if name.endswith("*"): name = name[:-1] - listAllMatches=True + listAllMatches = True newself.resultsName = name newself.modalResults = not listAllMatches return newself - def setBreak(self,breakFlag = True): + def setBreak(self, breakFlag=True): """Method to invoke the Python pdb debugger when this element is about to be parsed. Set ``breakFlag`` to True to enable, False to disable. @@ -1379,20 +1491,21 @@ def setBreak(self,breakFlag = True): _parseMethod = self._parse def breaker(instring, loc, doActions=True, callPreParse=True): import pdb + # this call to pdb.set_trace() is intentional, not a checkin error pdb.set_trace() - return _parseMethod( instring, loc, doActions, callPreParse ) + return _parseMethod(instring, loc, doActions, callPreParse) breaker._originalParseMethod = _parseMethod self._parse = breaker else: - if hasattr(self._parse,"_originalParseMethod"): + if hasattr(self._parse, "_originalParseMethod"): self._parse = self._parse._originalParseMethod return self - def setParseAction( self, *fns, **kwargs ): + def setParseAction(self, *fns, **kwargs): """ Define one or more actions to perform when successfully matching parse element definition. - Parse action fn is a callable method with 0-3 arguments, called as ``fn(s,loc,toks)`` , - ``fn(loc,toks)`` , ``fn(toks)`` , or just ``fn()`` , where: + Parse action fn is a callable method with 0-3 arguments, called as ``fn(s, loc, toks)`` , + ``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where: - s = the original string being parsed (see note below) - loc = the location of the matching substring @@ -1402,8 +1515,11 @@ def setParseAction( self, *fns, **kwargs ): value from fn, and the modified list of tokens will replace the original. Otherwise, fn does not need to return any value. + If None is passed as the parse action, all previously added parse actions for this + expression are cleared. + Optional keyword arguments: - - callDuringTry = (default= ``False`` ) indicate if parse action should be run during lookaheads and alternate testing + - callDuringTry = (default= ``False``) indicate if parse action should be run during lookaheads and alternate testing Note: the default parsing behavior is to expand tabs in the input string before starting the parsing process. See :class:`parseString for more @@ -1425,11 +1541,16 @@ def setParseAction( self, *fns, **kwargs ): # note that integer fields are now ints, not strings date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31] """ - self.parseAction = list(map(_trim_arity, list(fns))) - self.callDuringTry = kwargs.get("callDuringTry", False) + if list(fns) == [None,]: + self.parseAction = [] + else: + if not all(callable(fn) for fn in fns): + raise TypeError("parse actions must be callable") + self.parseAction = list(map(_trim_arity, list(fns))) + self.callDuringTry = kwargs.get("callDuringTry", False) return self - def addParseAction( self, *fns, **kwargs ): + def addParseAction(self, *fns, **kwargs): """ Add one or more parse actions to expression's list of parse actions. See :class:`setParseAction`. @@ -1457,21 +1578,17 @@ def addCondition(self, *fns, **kwargs): result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1) """ - msg = kwargs.get("message", "failed user-defined condition") - exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException for fn in fns: - fn = _trim_arity(fn) - def pa(s,l,t): - if not bool(fn(s,l,t)): - raise exc_type(s,l,msg) - self.parseAction.append(pa) + self.parseAction.append(conditionAsParseAction(fn, message=kwargs.get('message'), + fatal=kwargs.get('fatal', False))) + self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) return self - def setFailAction( self, fn ): + def setFailAction(self, fn): """Define action to perform if parsing fails at this expression. Fail acton fn is a callable function that takes the arguments - ``fn(s,loc,expr,err)`` where: + ``fn(s, loc, expr, err)`` where: - s = string being parsed - loc = location where expression match was attempted and failed - expr = the parse expression that failed @@ -1481,22 +1598,22 @@ def setFailAction( self, fn ): self.failAction = fn return self - def _skipIgnorables( self, instring, loc ): + def _skipIgnorables(self, instring, loc): exprsFound = True while exprsFound: exprsFound = False for e in self.ignoreExprs: try: while 1: - loc,dummy = e._parse( instring, loc ) + loc, dummy = e._parse(instring, loc) exprsFound = True except ParseException: pass return loc - def preParse( self, instring, loc ): + def preParse(self, instring, loc): if self.ignoreExprs: - loc = self._skipIgnorables( instring, loc ) + loc = self._skipIgnorables(instring, loc) if self.skipWhitespace: wt = self.whiteChars @@ -1506,101 +1623,105 @@ def preParse( self, instring, loc ): return loc - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): return loc, [] - def postParse( self, instring, loc, tokenlist ): + def postParse(self, instring, loc, tokenlist): return tokenlist - #~ @profile - def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ): - debugging = ( self.debug ) #and doActions ) + # ~ @profile + def _parseNoCache(self, instring, loc, doActions=True, callPreParse=True): + TRY, MATCH, FAIL = 0, 1, 2 + debugging = (self.debug) # and doActions) if debugging or self.failAction: - #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )) - if (self.debugActions[0] ): - self.debugActions[0]( instring, loc, self ) - if callPreParse and self.callPreparse: - preloc = self.preParse( instring, loc ) - else: - preloc = loc - tokensStart = preloc + # ~ print ("Match", self, "at loc", loc, "(%d, %d)" % (lineno(loc, instring), col(loc, instring))) + if self.debugActions[TRY]: + self.debugActions[TRY](instring, loc, self) try: - try: - loc,tokens = self.parseImpl( instring, preloc, doActions ) - except IndexError: - raise ParseException( instring, len(instring), self.errmsg, self ) - except ParseBaseException as err: - #~ print ("Exception raised:", err) - if self.debugActions[2]: - self.debugActions[2]( instring, tokensStart, self, err ) + if callPreParse and self.callPreparse: + preloc = self.preParse(instring, loc) + else: + preloc = loc + tokensStart = preloc + if self.mayIndexError or preloc >= len(instring): + try: + loc, tokens = self.parseImpl(instring, preloc, doActions) + except IndexError: + raise ParseException(instring, len(instring), self.errmsg, self) + else: + loc, tokens = self.parseImpl(instring, preloc, doActions) + except Exception as err: + # ~ print ("Exception raised:", err) + if self.debugActions[FAIL]: + self.debugActions[FAIL](instring, tokensStart, self, err) if self.failAction: - self.failAction( instring, tokensStart, self, err ) + self.failAction(instring, tokensStart, self, err) raise else: if callPreParse and self.callPreparse: - preloc = self.preParse( instring, loc ) + preloc = self.preParse(instring, loc) else: preloc = loc tokensStart = preloc if self.mayIndexError or preloc >= len(instring): try: - loc,tokens = self.parseImpl( instring, preloc, doActions ) + loc, tokens = self.parseImpl(instring, preloc, doActions) except IndexError: - raise ParseException( instring, len(instring), self.errmsg, self ) + raise ParseException(instring, len(instring), self.errmsg, self) else: - loc,tokens = self.parseImpl( instring, preloc, doActions ) + loc, tokens = self.parseImpl(instring, preloc, doActions) - tokens = self.postParse( instring, loc, tokens ) + tokens = self.postParse(instring, loc, tokens) - retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults ) + retTokens = ParseResults(tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults) if self.parseAction and (doActions or self.callDuringTry): if debugging: try: for fn in self.parseAction: try: - tokens = fn( instring, tokensStart, retTokens ) + tokens = fn(instring, tokensStart, retTokens) except IndexError as parse_action_exc: exc = ParseException("exception raised in parse action") exc.__cause__ = parse_action_exc raise exc if tokens is not None and tokens is not retTokens: - retTokens = ParseResults( tokens, + retTokens = ParseResults(tokens, self.resultsName, - asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), - modal=self.modalResults ) - except ParseBaseException as err: - #~ print "Exception raised in user parse action:", err - if (self.debugActions[2] ): - self.debugActions[2]( instring, tokensStart, self, err ) + asList=self.saveAsList and isinstance(tokens, (ParseResults, list)), + modal=self.modalResults) + except Exception as err: + # ~ print "Exception raised in user parse action:", err + if self.debugActions[FAIL]: + self.debugActions[FAIL](instring, tokensStart, self, err) raise else: for fn in self.parseAction: try: - tokens = fn( instring, tokensStart, retTokens ) + tokens = fn(instring, tokensStart, retTokens) except IndexError as parse_action_exc: exc = ParseException("exception raised in parse action") exc.__cause__ = parse_action_exc raise exc if tokens is not None and tokens is not retTokens: - retTokens = ParseResults( tokens, + retTokens = ParseResults(tokens, self.resultsName, - asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), - modal=self.modalResults ) + asList=self.saveAsList and isinstance(tokens, (ParseResults, list)), + modal=self.modalResults) if debugging: - #~ print ("Matched",self,"->",retTokens.asList()) - if (self.debugActions[1] ): - self.debugActions[1]( instring, tokensStart, loc, self, retTokens ) + # ~ print ("Matched", self, "->", retTokens.asList()) + if self.debugActions[MATCH]: + self.debugActions[MATCH](instring, tokensStart, loc, self, retTokens) return loc, retTokens - def tryParse( self, instring, loc ): + def tryParse(self, instring, loc): try: - return self._parse( instring, loc, doActions=False )[0] + return self._parse(instring, loc, doActions=False)[0] except ParseFatalException: - raise ParseException( instring, loc, self.errmsg, self) + raise ParseException(instring, loc, self.errmsg, self) def canParseNext(self, instring, loc): try: @@ -1697,7 +1818,7 @@ def cache_len(self): # this method gets repeatedly called during backtracking with the same arguments - # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression - def _parseCache( self, instring, loc, doActions=True, callPreParse=True ): + def _parseCache(self, instring, loc, doActions=True, callPreParse=True): HIT, MISS = 0, 1 lookup = (self, instring, loc, callPreParse, doActions) with ParserElement.packrat_cache_lock: @@ -1718,7 +1839,7 @@ def _parseCache( self, instring, loc, doActions=True, callPreParse=True ): ParserElement.packrat_cache_stats[HIT] += 1 if isinstance(value, Exception): raise value - return (value[0], value[1].copy()) + return value[0], value[1].copy() _parse = _parseNoCache @@ -1763,12 +1884,16 @@ def enablePackrat(cache_size_limit=128): ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit) ParserElement._parse = ParserElement._parseCache - def parseString( self, instring, parseAll=False ): + def parseString(self, instring, parseAll=False): """ Execute the parse expression with the given string. This is the main interface to the client code, once the complete expression has been built. + Returns the parsed data as a :class:`ParseResults` object, which may be + accessed as a list, or as a dict or object with attributes if the given parser + includes results names. + If you want the grammar to require that the entire input string be successfully parsed, then set ``parseAll`` to True (equivalent to ending the grammar with ``StringEnd()``). @@ -1782,7 +1907,7 @@ def parseString( self, instring, parseAll=False ): - calling ``parseWithTabs`` on your grammar before calling ``parseString`` (see :class:`parseWithTabs`) - - define your parse action using the full ``(s,loc,toks)`` signature, and + - define your parse action using the full ``(s, loc, toks)`` signature, and reference the input string using the parse action's ``s`` argument - explictly expand the tabs in your input string before calling ``parseString`` @@ -1795,17 +1920,17 @@ def parseString( self, instring, parseAll=False ): ParserElement.resetCache() if not self.streamlined: self.streamline() - #~ self.saveAsList = True + # ~ self.saveAsList = True for e in self.ignoreExprs: e.streamline() if not self.keepTabs: instring = instring.expandtabs() try: - loc, tokens = self._parse( instring, 0 ) + loc, tokens = self._parse(instring, 0) if parseAll: - loc = self.preParse( instring, loc ) + loc = self.preParse(instring, loc) se = Empty() + StringEnd() - se._parse( instring, loc ) + se._parse(instring, loc) except ParseBaseException as exc: if ParserElement.verbose_stacktrace: raise @@ -1815,7 +1940,7 @@ def parseString( self, instring, parseAll=False ): else: return tokens - def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ): + def scanString(self, instring, maxMatches=_MAX_INT, overlap=False): """ Scan the input string for expression matches. Each match will return the matching tokens, start location, and end location. May be called with optional @@ -1830,7 +1955,7 @@ def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ): source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" print(source) - for tokens,start,end in Word(alphas).scanString(source): + for tokens, start, end in Word(alphas).scanString(source): print(' '*start + '^'*(end-start)) print(' '*start + tokens[0]) @@ -1862,16 +1987,16 @@ def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ): try: while loc <= instrlen and matches < maxMatches: try: - preloc = preparseFn( instring, loc ) - nextLoc,tokens = parseFn( instring, preloc, callPreParse=False ) + preloc = preparseFn(instring, loc) + nextLoc, tokens = parseFn(instring, preloc, callPreParse=False) except ParseException: - loc = preloc+1 + loc = preloc + 1 else: if nextLoc > loc: matches += 1 yield tokens, preloc, nextLoc if overlap: - nextloc = preparseFn( instring, loc ) + nextloc = preparseFn(instring, loc) if nextloc > loc: loc = nextLoc else: @@ -1879,7 +2004,7 @@ def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ): else: loc = nextLoc else: - loc = preloc+1 + loc = preloc + 1 except ParseBaseException as exc: if ParserElement.verbose_stacktrace: raise @@ -1887,7 +2012,7 @@ def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ): # catch and re-raise exception from here, clears out pyparsing internal stack trace raise exc - def transformString( self, instring ): + def transformString(self, instring): """ Extension to :class:`scanString`, to modify matching text with modified tokens that may be returned from a parse action. To use ``transformString``, define a grammar and @@ -1913,19 +2038,19 @@ def transformString( self, instring ): # keep string locs straight between transformString and scanString self.keepTabs = True try: - for t,s,e in self.scanString( instring ): - out.append( instring[lastE:s] ) + for t, s, e in self.scanString(instring): + out.append(instring[lastE:s]) if t: - if isinstance(t,ParseResults): + if isinstance(t, ParseResults): out += t.asList() - elif isinstance(t,list): + elif isinstance(t, list): out += t else: out.append(t) lastE = e out.append(instring[lastE:]) out = [o for o in out if o] - return "".join(map(_ustr,_flatten(out))) + return "".join(map(_ustr, _flatten(out))) except ParseBaseException as exc: if ParserElement.verbose_stacktrace: raise @@ -1933,7 +2058,7 @@ def transformString( self, instring ): # catch and re-raise exception from here, clears out pyparsing internal stack trace raise exc - def searchString( self, instring, maxMatches=_MAX_INT ): + def searchString(self, instring, maxMatches=_MAX_INT): """ Another extension to :class:`scanString`, simplifying the access to the tokens found to match the given parse expression. May be called with optional @@ -1955,7 +2080,7 @@ def searchString( self, instring, maxMatches=_MAX_INT ): ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity'] """ try: - return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ]) + return ParseResults([t for t, s, e in self.scanString(instring, maxMatches)]) except ParseBaseException as exc: if ParserElement.verbose_stacktrace: raise @@ -1981,14 +2106,14 @@ def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False): """ splits = 0 last = 0 - for t,s,e in self.scanString(instring, maxMatches=maxsplit): + for t, s, e in self.scanString(instring, maxMatches=maxsplit): yield instring[last:s] if includeSeparators: yield t[0] last = e yield instring[last:] - def __add__(self, other ): + def __add__(self, other): """ Implementation of + operator - returns :class:`And`. Adding strings to a ParserElement converts them to :class:`Literal`s by default. @@ -2002,24 +2127,42 @@ def __add__(self, other ): prints:: Hello, World! -> ['Hello', ',', 'World', '!'] + + ``...`` may be used as a parse expression as a short form of :class:`SkipTo`. + + Literal('start') + ... + Literal('end') + + is equivalent to: + + Literal('start') + SkipTo('end')("_skipped*") + Literal('end') + + Note that the skipped text is returned with '_skipped' as a results name, + and to support having multiple skips in the same parser, the value returned is + a list of all skipped text. """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): + if other is Ellipsis: + return _PendingSkip(self) + + if isinstance(other, basestring): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) return None - return And( [ self, other ] ) + return And([self, other]) - def __radd__(self, other ): + def __radd__(self, other): """ Implementation of + operator when left operand is not a :class:`ParserElement` """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): + if other is Ellipsis: + return SkipTo(self)("_skipped*") + self + + if isinstance(other, basestring): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) return None return other + self @@ -2027,64 +2170,70 @@ def __sub__(self, other): """ Implementation of - operator, returns :class:`And` with error stop """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): + if isinstance(other, basestring): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) return None return self + And._ErrorStop() + other - def __rsub__(self, other ): + def __rsub__(self, other): """ Implementation of - operator when left operand is not a :class:`ParserElement` """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): + if isinstance(other, basestring): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) return None return other - self - def __mul__(self,other): + def __mul__(self, other): """ Implementation of * operator, allows use of ``expr * 3`` in place of ``expr + expr + expr``. Expressions may also me multiplied by a 2-integer - tuple, similar to ``{min,max}`` multipliers in regular expressions. Tuples + tuple, similar to ``{min, max}`` multipliers in regular expressions. Tuples may also include ``None`` as in: - - ``expr*(n,None)`` or ``expr*(n,)`` is equivalent + - ``expr*(n, None)`` or ``expr*(n, )`` is equivalent to ``expr*n + ZeroOrMore(expr)`` (read as "at least n instances of ``expr``") - - ``expr*(None,n)`` is equivalent to ``expr*(0,n)`` + - ``expr*(None, n)`` is equivalent to ``expr*(0, n)`` (read as "0 to n instances of ``expr``") - - ``expr*(None,None)`` is equivalent to ``ZeroOrMore(expr)`` - - ``expr*(1,None)`` is equivalent to ``OneOrMore(expr)`` + - ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)`` + - ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)`` - Note that ``expr*(None,n)`` does not raise an exception if + Note that ``expr*(None, n)`` does not raise an exception if more than n exprs exist in the input stream; that is, - ``expr*(None,n)`` does not enforce a maximum number of expr + ``expr*(None, n)`` does not enforce a maximum number of expr occurrences. If this behavior is desired, then write - ``expr*(None,n) + ~expr`` + ``expr*(None, n) + ~expr`` """ - if isinstance(other,int): - minElements, optElements = other,0 - elif isinstance(other,tuple): + if other is Ellipsis: + other = (0, None) + elif isinstance(other, tuple) and other[:1] == (Ellipsis,): + other = ((0, ) + other[1:] + (None,))[:2] + + if isinstance(other, int): + minElements, optElements = other, 0 + elif isinstance(other, tuple): + other = tuple(o if o is not Ellipsis else None for o in other) other = (other + (None, None))[:2] if other[0] is None: other = (0, other[1]) - if isinstance(other[0],int) and other[1] is None: + if isinstance(other[0], int) and other[1] is None: if other[0] == 0: return ZeroOrMore(self) if other[0] == 1: return OneOrMore(self) else: - return self*other[0] + ZeroOrMore(self) - elif isinstance(other[0],int) and isinstance(other[1],int): + return self * other[0] + ZeroOrMore(self) + elif isinstance(other[0], int) and isinstance(other[1], int): minElements, optElements = other optElements -= minElements else: - raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1])) + raise TypeError("cannot multiply 'ParserElement' and ('%s', '%s') objects", type(other[0]), type(other[1])) else: raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other)) @@ -2093,108 +2242,152 @@ def __mul__(self,other): if optElements < 0: raise ValueError("second tuple value must be greater or equal to first tuple value") if minElements == optElements == 0: - raise ValueError("cannot multiply ParserElement by 0 or (0,0)") + raise ValueError("cannot multiply ParserElement by 0 or (0, 0)") - if (optElements): + if optElements: def makeOptionalList(n): - if n>1: - return Optional(self + makeOptionalList(n-1)) + if n > 1: + return Optional(self + makeOptionalList(n - 1)) else: return Optional(self) if minElements: if minElements == 1: ret = self + makeOptionalList(optElements) else: - ret = And([self]*minElements) + makeOptionalList(optElements) + ret = And([self] * minElements) + makeOptionalList(optElements) else: ret = makeOptionalList(optElements) else: if minElements == 1: ret = self else: - ret = And([self]*minElements) + ret = And([self] * minElements) return ret def __rmul__(self, other): return self.__mul__(other) - def __or__(self, other ): + def __or__(self, other): """ Implementation of | operator - returns :class:`MatchFirst` """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): + if other is Ellipsis: + return _PendingSkip(self, must_skip=True) + + if isinstance(other, basestring): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) return None - return MatchFirst( [ self, other ] ) + return MatchFirst([self, other]) - def __ror__(self, other ): + def __ror__(self, other): """ Implementation of | operator when left operand is not a :class:`ParserElement` """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): + if isinstance(other, basestring): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) return None return other | self - def __xor__(self, other ): + def __xor__(self, other): """ Implementation of ^ operator - returns :class:`Or` """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): + if isinstance(other, basestring): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) return None - return Or( [ self, other ] ) + return Or([self, other]) - def __rxor__(self, other ): + def __rxor__(self, other): """ Implementation of ^ operator when left operand is not a :class:`ParserElement` """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): + if isinstance(other, basestring): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) return None return other ^ self - def __and__(self, other ): + def __and__(self, other): """ Implementation of & operator - returns :class:`Each` """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): + if isinstance(other, basestring): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) return None - return Each( [ self, other ] ) + return Each([self, other]) - def __rand__(self, other ): + def __rand__(self, other): """ Implementation of & operator when left operand is not a :class:`ParserElement` """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): + if isinstance(other, basestring): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) return None return other & self - def __invert__( self ): + def __invert__(self): """ Implementation of ~ operator - returns :class:`NotAny` """ - return NotAny( self ) + return NotAny(self) + + def __iter__(self): + # must implement __iter__ to override legacy use of sequential access to __getitem__ to + # iterate over a sequence + raise TypeError('%r object is not iterable' % self.__class__.__name__) + + def __getitem__(self, key): + """ + use ``[]`` indexing notation as a short form for expression repetition: + - ``expr[n]`` is equivalent to ``expr*n`` + - ``expr[m, n]`` is equivalent to ``expr*(m, n)`` + - ``expr[n, ...]`` or ``expr[n,]`` is equivalent + to ``expr*n + ZeroOrMore(expr)`` + (read as "at least n instances of ``expr``") + - ``expr[..., n]`` is equivalent to ``expr*(0, n)`` + (read as "0 to n instances of ``expr``") + - ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)`` + - ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)`` + ``None`` may be used in place of ``...``. + + Note that ``expr[..., n]`` and ``expr[m, n]``do not raise an exception + if more than ``n`` ``expr``s exist in the input stream. If this behavior is + desired, then write ``expr[..., n] + ~expr``. + """ + + # convert single arg keys to tuples + try: + if isinstance(key, str): + key = (key,) + iter(key) + except TypeError: + key = (key, key) + + if len(key) > 2: + warnings.warn("only 1 or 2 index arguments supported ({0}{1})".format(key[:5], + '... [{0}]'.format(len(key)) + if len(key) > 5 else '')) + + # clip to 2 elements + ret = self * tuple(key[:2]) + return ret def __call__(self, name=None): """ @@ -2208,22 +2401,22 @@ def __call__(self, name=None): Example:: # these are equivalent - userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno") - userdata = Word(alphas)("name") + Word(nums+"-")("socsecno") + userdata = Word(alphas).setResultsName("name") + Word(nums + "-").setResultsName("socsecno") + userdata = Word(alphas)("name") + Word(nums + "-")("socsecno") """ if name is not None: - return self.setResultsName(name) + return self._setResultsName(name) else: return self.copy() - def suppress( self ): + def suppress(self): """ Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from cluttering up returned output. """ - return Suppress( self ) + return Suppress(self) - def leaveWhitespace( self ): + def leaveWhitespace(self): """ Disables the skipping of whitespace before matching the characters in the :class:`ParserElement`'s defined pattern. This is normally only used internally by @@ -2232,7 +2425,7 @@ def leaveWhitespace( self ): self.skipWhitespace = False return self - def setWhitespaceChars( self, chars ): + def setWhitespaceChars(self, chars): """ Overrides the default whitespace chars """ @@ -2241,7 +2434,7 @@ def setWhitespaceChars( self, chars ): self.copyDefaultWhiteChars = False return self - def parseWithTabs( self ): + def parseWithTabs(self): """ Overrides default behavior to expand ``<TAB>``s to spaces before parsing the input string. Must be called before ``parseString`` when the input grammar contains elements that @@ -2250,7 +2443,7 @@ def parseWithTabs( self ): self.keepTabs = True return self - def ignore( self, other ): + def ignore(self, other): """ Define expression to be ignored (e.g., comments) while doing pattern matching; may be called repeatedly, to define multiple comment or other @@ -2267,14 +2460,14 @@ def ignore( self, other ): if isinstance(other, basestring): other = Suppress(other) - if isinstance( other, Suppress ): + if isinstance(other, Suppress): if other not in self.ignoreExprs: self.ignoreExprs.append(other) else: - self.ignoreExprs.append( Suppress( other.copy() ) ) + self.ignoreExprs.append(Suppress(other.copy())) return self - def setDebugActions( self, startAction, successAction, exceptionAction ): + def setDebugActions(self, startAction, successAction, exceptionAction): """ Enable display of debugging messages while doing pattern matching. """ @@ -2284,7 +2477,7 @@ def setDebugActions( self, startAction, successAction, exceptionAction ): self.debug = True return self - def setDebug( self, flag=True ): + def setDebug(self, flag=True): """ Enable display of debugging messages while doing pattern matching. Set ``flag`` to True to enable, False to disable. @@ -2322,32 +2515,32 @@ def setDebug( self, flag=True ): name created for the :class:`Word` expression without calling ``setName`` is ``"W:(ABCD...)"``. """ if flag: - self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction ) + self.setDebugActions(_defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction) else: self.debug = False return self - def __str__( self ): + def __str__(self): return self.name - def __repr__( self ): + def __repr__(self): return _ustr(self) - def streamline( self ): + def streamline(self): self.streamlined = True self.strRepr = None return self - def checkRecursion( self, parseElementList ): + def checkRecursion(self, parseElementList): pass - def validate( self, validateTrace=[] ): + def validate(self, validateTrace=None): """ Check defined expressions for valid structure, check for infinite recursive definitions. """ - self.checkRecursion( [] ) + self.checkRecursion([]) - def parseFile( self, file_or_filename, parseAll=False ): + def parseFile(self, file_or_filename, parseAll=False): """ Execute the parse expression on the given file or filename. If a filename is specified (instead of a file object), @@ -2367,24 +2560,27 @@ def parseFile( self, file_or_filename, parseAll=False ): # catch and re-raise exception from here, clears out pyparsing internal stack trace raise exc - def __eq__(self,other): + def __eq__(self, other): if isinstance(other, ParserElement): - return self is other or vars(self) == vars(other) + if PY_3: + self is other or super(ParserElement, self).__eq__(other) + else: + return self is other or vars(self) == vars(other) elif isinstance(other, basestring): return self.matches(other) else: - return super(ParserElement,self)==other + return super(ParserElement, self) == other - def __ne__(self,other): + def __ne__(self, other): return not (self == other) def __hash__(self): - return hash(id(self)) + return id(self) - def __req__(self,other): + def __req__(self, other): return self == other - def __rne__(self,other): + def __rne__(self, other): return not (self == other) def matches(self, testString, parseAll=True): @@ -2408,7 +2604,8 @@ def matches(self, testString, parseAll=True): return False def runTests(self, tests, parseAll=True, comment='#', - fullDump=True, printResults=True, failureTests=False, postParse=None): + fullDump=True, printResults=True, failureTests=False, postParse=None, + file=None): """ Execute the parse expression on a series of test strings, showing each test, the parsed results or where the parse failed. Quick and easy way to @@ -2425,6 +2622,8 @@ def runTests(self, tests, parseAll=True, comment='#', - failureTests - (default= ``False``) indicates if these tests are expected to fail parsing - postParse - (default= ``None``) optional callback for successful parse results; called as `fn(test_string, parse_results)` and returns a string to be added to the test output + - file - (default=``None``) optional file-like object to which test output will be written; + if None, will default to ``sys.stdout`` Returns: a (success, results) tuple, where success indicates that all tests succeeded (or failed if ``failureTests`` is True), and the results contain a list of lines of each @@ -2504,9 +2703,15 @@ def runTests(self, tests, parseAll=True, comment='#', tests = list(map(str.strip, tests.rstrip().splitlines())) if isinstance(comment, basestring): comment = Literal(comment) + if file is None: + file = sys.stdout + print_ = file.write + allResults = [] comments = [] success = True + NL = Literal(r'\n').addParseAction(replaceWith('\n')).ignore(quotedString) + BOM = u'\ufeff' for t in tests: if comment is not None and comment.matches(t, False) or comments and not t: comments.append(t) @@ -2517,24 +2722,15 @@ def runTests(self, tests, parseAll=True, comment='#', comments = [] try: # convert newline marks to actual newlines, and strip leading BOM if present - t = t.replace(r'\n','\n').lstrip('\ufeff') + t = NL.transformString(t.lstrip(BOM)) result = self.parseString(t, parseAll=parseAll) - out.append(result.dump(full=fullDump)) - success = success and not failureTests - if postParse is not None: - try: - pp_value = postParse(t, result) - if pp_value is not None: - out.append(str(pp_value)) - except Exception as e: - out.append("{0} failed: {1}: {2}".format(postParse.__name__, type(e).__name__, e)) except ParseBaseException as pe: fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else "" if '\n' in t: out.append(line(pe.loc, t)) - out.append(' '*(col(pe.loc,t)-1) + '^' + fatal) + out.append(' ' * (col(pe.loc, t) - 1) + '^' + fatal) else: - out.append(' '*pe.loc + '^' + fatal) + out.append(' ' * pe.loc + '^' + fatal) out.append("FAIL: " + str(pe)) success = success and failureTests result = pe @@ -2542,30 +2738,80 @@ def runTests(self, tests, parseAll=True, comment='#', out.append("FAIL-EXCEPTION: " + str(exc)) success = success and failureTests result = exc + else: + success = success and not failureTests + if postParse is not None: + try: + pp_value = postParse(t, result) + if pp_value is not None: + if isinstance(pp_value, ParseResults): + out.append(pp_value.dump()) + else: + out.append(str(pp_value)) + else: + out.append(result.dump()) + except Exception as e: + out.append(result.dump(full=fullDump)) + out.append("{0} failed: {1}: {2}".format(postParse.__name__, type(e).__name__, e)) + else: + out.append(result.dump(full=fullDump)) if printResults: if fullDump: out.append('') - print('\n'.join(out)) + print_('\n'.join(out)) allResults.append((t, result)) return success, allResults +class _PendingSkip(ParserElement): + # internal placeholder class to hold a place were '...' is added to a parser element, + # once another ParserElement is added, this placeholder will be replaced with a SkipTo + def __init__(self, expr, must_skip=False): + super(_PendingSkip, self).__init__() + self.strRepr = str(expr + Empty()).replace('Empty', '...') + self.name = self.strRepr + self.anchor = expr + self.must_skip = must_skip + + def __add__(self, other): + skipper = SkipTo(other).setName("...")("_skipped*") + if self.must_skip: + def must_skip(t): + if not t._skipped or t._skipped.asList() == ['']: + del t[0] + t.pop("_skipped", None) + def show_skip(t): + if t._skipped.asList()[-1:] == ['']: + skipped = t.pop('_skipped') + t['_skipped'] = 'missing <' + repr(self.anchor) + '>' + return (self.anchor + skipper().addParseAction(must_skip) + | skipper().addParseAction(show_skip)) + other + + return self.anchor + skipper + other + + def __repr__(self): + return self.strRepr + + def parseImpl(self, *args): + raise Exception("use of `...` expression without following SkipTo target expression") + + class Token(ParserElement): """Abstract :class:`ParserElement` subclass, for defining atomic matching patterns. """ - def __init__( self ): - super(Token,self).__init__( savelist=False ) + def __init__(self): + super(Token, self).__init__(savelist=False) class Empty(Token): """An empty token, will always match. """ - def __init__( self ): - super(Empty,self).__init__() + def __init__(self): + super(Empty, self).__init__() self.name = "Empty" self.mayReturnEmpty = True self.mayIndexError = False @@ -2574,14 +2820,14 @@ def __init__( self ): class NoMatch(Token): """A token that will never match. """ - def __init__( self ): - super(NoMatch,self).__init__() + def __init__(self): + super(NoMatch, self).__init__() self.name = "NoMatch" self.mayReturnEmpty = True self.mayIndexError = False self.errmsg = "Unmatchable token" - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): raise ParseException(instring, loc, self.errmsg, self) @@ -2599,8 +2845,8 @@ class Literal(Token): For keyword matching (force word break before and after the matched string), use :class:`Keyword` or :class:`CaselessKeyword`. """ - def __init__( self, matchString ): - super(Literal,self).__init__() + def __init__(self, matchString): + super(Literal, self).__init__() self.match = matchString self.matchLen = len(matchString) try: @@ -2614,15 +2860,22 @@ def __init__( self, matchString ): self.mayReturnEmpty = False self.mayIndexError = False - # Performance tuning: this routine gets called a *lot* - # if this is a single character match string and the first character matches, - # short-circuit as quickly as possible, and avoid calling startswith - #~ @profile - def parseImpl( self, instring, loc, doActions=True ): - if (instring[loc] == self.firstMatchChar and - (self.matchLen==1 or instring.startswith(self.match,loc)) ): - return loc+self.matchLen, self.match + # Performance tuning: modify __class__ to select + # a parseImpl optimized for single-character check + if self.matchLen == 1 and type(self) is Literal: + self.__class__ = _SingleCharLiteral + + def parseImpl(self, instring, loc, doActions=True): + if instring[loc] == self.firstMatchChar and instring.startswith(self.match, loc): + return loc + self.matchLen, self.match + raise ParseException(instring, loc, self.errmsg, self) + +class _SingleCharLiteral(Literal): + def parseImpl(self, instring, loc, doActions=True): + if instring[loc] == self.firstMatchChar: + return loc + 1, self.match raise ParseException(instring, loc, self.errmsg, self) + _L = Literal ParserElement._literalStringClass = Literal @@ -2651,10 +2904,10 @@ class Keyword(Token): For case-insensitive matching, use :class:`CaselessKeyword`. """ - DEFAULT_KEYWORD_CHARS = alphanums+"_$" + DEFAULT_KEYWORD_CHARS = alphanums + "_$" - def __init__( self, matchString, identChars=None, caseless=False ): - super(Keyword,self).__init__() + def __init__(self, matchString, identChars=None, caseless=False): + super(Keyword, self).__init__() if identChars is None: identChars = Keyword.DEFAULT_KEYWORD_CHARS self.match = matchString @@ -2663,7 +2916,7 @@ def __init__( self, matchString, identChars=None, caseless=False ): self.firstMatchChar = matchString[0] except IndexError: warnings.warn("null string passed to Keyword; use Empty() instead", - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) self.name = '"%s"' % self.match self.errmsg = "Expected " + self.name self.mayReturnEmpty = False @@ -2674,27 +2927,32 @@ def __init__( self, matchString, identChars=None, caseless=False ): identChars = identChars.upper() self.identChars = set(identChars) - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): if self.caseless: - if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and - (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and - (loc == 0 or instring[loc-1].upper() not in self.identChars) ): - return loc+self.matchLen, self.match + if ((instring[loc:loc + self.matchLen].upper() == self.caselessmatch) + and (loc >= len(instring) - self.matchLen + or instring[loc + self.matchLen].upper() not in self.identChars) + and (loc == 0 + or instring[loc - 1].upper() not in self.identChars)): + return loc + self.matchLen, self.match + else: - if (instring[loc] == self.firstMatchChar and - (self.matchLen==1 or instring.startswith(self.match,loc)) and - (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and - (loc == 0 or instring[loc-1] not in self.identChars) ): - return loc+self.matchLen, self.match + if instring[loc] == self.firstMatchChar: + if ((self.matchLen == 1 or instring.startswith(self.match, loc)) + and (loc >= len(instring) - self.matchLen + or instring[loc + self.matchLen] not in self.identChars) + and (loc == 0 or instring[loc - 1] not in self.identChars)): + return loc + self.matchLen, self.match + raise ParseException(instring, loc, self.errmsg, self) def copy(self): - c = super(Keyword,self).copy() + c = super(Keyword, self).copy() c.identChars = Keyword.DEFAULT_KEYWORD_CHARS return c @staticmethod - def setDefaultKeywordChars( chars ): + def setDefaultKeywordChars(chars): """Overrides the default Keyword chars """ Keyword.DEFAULT_KEYWORD_CHARS = chars @@ -2710,16 +2968,16 @@ class CaselessLiteral(Literal): (Contrast with example for :class:`CaselessKeyword`.) """ - def __init__( self, matchString ): - super(CaselessLiteral,self).__init__( matchString.upper() ) + def __init__(self, matchString): + super(CaselessLiteral, self).__init__(matchString.upper()) # Preserve the defining literal. self.returnString = matchString self.name = "'%s'" % self.returnString self.errmsg = "Expected " + self.name - def parseImpl( self, instring, loc, doActions=True ): - if instring[ loc:loc+self.matchLen ].upper() == self.match: - return loc+self.matchLen, self.returnString + def parseImpl(self, instring, loc, doActions=True): + if instring[loc:loc + self.matchLen].upper() == self.match: + return loc + self.matchLen, self.returnString raise ParseException(instring, loc, self.errmsg, self) class CaselessKeyword(Keyword): @@ -2732,8 +2990,8 @@ class CaselessKeyword(Keyword): (Contrast with example for :class:`CaselessLiteral`.) """ - def __init__( self, matchString, identChars=None ): - super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True ) + def __init__(self, matchString, identChars=None): + super(CaselessKeyword, self).__init__(matchString, identChars, caseless=True) class CloseMatch(Token): """A variation on :class:`Literal` which matches "close" matches, @@ -2769,7 +3027,7 @@ class CloseMatch(Token): patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']}) """ def __init__(self, match_string, maxMismatches=1): - super(CloseMatch,self).__init__() + super(CloseMatch, self).__init__() self.name = match_string self.match_string = match_string self.maxMismatches = maxMismatches @@ -2777,7 +3035,7 @@ def __init__(self, match_string, maxMismatches=1): self.mayIndexError = False self.mayReturnEmpty = False - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): start = loc instrlen = len(instring) maxloc = start + len(self.match_string) @@ -2788,8 +3046,8 @@ def parseImpl( self, instring, loc, doActions=True ): mismatches = [] maxMismatches = self.maxMismatches - for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)): - src,mat = s_m + for match_stringloc, s_m in enumerate(zip(instring[loc:maxloc], match_string)): + src, mat = s_m if src != mat: mismatches.append(match_stringloc) if len(mismatches) > maxMismatches: @@ -2797,7 +3055,7 @@ def parseImpl( self, instring, loc, doActions=True ): else: loc = match_stringloc + 1 results = ParseResults([instring[start:loc]]) - results['original'] = self.match_string + results['original'] = match_string results['mismatches'] = mismatches return loc, results @@ -2849,7 +3107,7 @@ class Word(Token): capital_word = Word(alphas.upper(), alphas.lower()) # hostnames are alphanumeric, with leading alpha, and '-' - hostname = Word(alphas, alphanums+'-') + hostname = Word(alphas, alphanums + '-') # roman numeral (not a strict parser, accepts invalid mix of characters) roman = Word("IVXLCDM") @@ -2857,15 +3115,16 @@ class Word(Token): # any string of non-whitespace characters, except for ',' csv_value = Word(printables, excludeChars=",") """ - def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ): - super(Word,self).__init__() + def __init__(self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None): + super(Word, self).__init__() if excludeChars: + excludeChars = set(excludeChars) initChars = ''.join(c for c in initChars if c not in excludeChars) if bodyChars: bodyChars = ''.join(c for c in bodyChars if c not in excludeChars) self.initCharsOrig = initChars self.initChars = set(initChars) - if bodyChars : + if bodyChars: self.bodyCharsOrig = bodyChars self.bodyChars = set(bodyChars) else: @@ -2893,34 +3152,28 @@ def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword= self.mayIndexError = False self.asKeyword = asKeyword - if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0): + if ' ' not in self.initCharsOrig + self.bodyCharsOrig and (min == 1 and max == 0 and exact == 0): if self.bodyCharsOrig == self.initCharsOrig: self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig) elif len(self.initCharsOrig) == 1: - self.reString = "%s[%s]*" % \ - (re.escape(self.initCharsOrig), - _escapeRegexRangeChars(self.bodyCharsOrig),) + self.reString = "%s[%s]*" % (re.escape(self.initCharsOrig), + _escapeRegexRangeChars(self.bodyCharsOrig),) else: - self.reString = "[%s][%s]*" % \ - (_escapeRegexRangeChars(self.initCharsOrig), - _escapeRegexRangeChars(self.bodyCharsOrig),) + self.reString = "[%s][%s]*" % (_escapeRegexRangeChars(self.initCharsOrig), + _escapeRegexRangeChars(self.bodyCharsOrig),) if self.asKeyword: - self.reString = r"\b"+self.reString+r"\b" + self.reString = r"\b" + self.reString + r"\b" + try: - self.re = re.compile( self.reString ) + self.re = re.compile(self.reString) except Exception: self.re = None + else: + self.re_match = self.re.match + self.__class__ = _WordRegex - def parseImpl( self, instring, loc, doActions=True ): - if self.re: - result = self.re.match(instring,loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - return loc, result.group() - - if not(instring[ loc ] in self.initChars): + def parseImpl(self, instring, loc, doActions=True): + if instring[loc] not in self.initChars: raise ParseException(instring, loc, self.errmsg, self) start = loc @@ -2928,17 +3181,18 @@ def parseImpl( self, instring, loc, doActions=True ): instrlen = len(instring) bodychars = self.bodyChars maxloc = start + self.maxLen - maxloc = min( maxloc, instrlen ) + maxloc = min(maxloc, instrlen) while loc < maxloc and instring[loc] in bodychars: loc += 1 throwException = False if loc - start < self.minLen: throwException = True - if self.maxSpecified and loc < instrlen and instring[loc] in bodychars: + elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars: throwException = True - if self.asKeyword: - if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars): + elif self.asKeyword: + if (start > 0 and instring[start - 1] in bodychars + or loc < instrlen and instring[loc] in bodychars): throwException = True if throwException: @@ -2946,38 +3200,49 @@ def parseImpl( self, instring, loc, doActions=True ): return loc, instring[start:loc] - def __str__( self ): + def __str__(self): try: - return super(Word,self).__str__() + return super(Word, self).__str__() except Exception: pass - if self.strRepr is None: def charsAsStr(s): - if len(s)>4: - return s[:4]+"..." + if len(s) > 4: + return s[:4] + "..." else: return s - if ( self.initCharsOrig != self.bodyCharsOrig ): - self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) ) + if self.initCharsOrig != self.bodyCharsOrig: + self.strRepr = "W:(%s, %s)" % (charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig)) else: self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig) return self.strRepr +class _WordRegex(Word): + def parseImpl(self, instring, loc, doActions=True): + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + return loc, result.group() + -class Char(Word): +class Char(_WordRegex): """A short-cut class for defining ``Word(characters, exact=1)``, when defining a match of any single character in a string of characters. """ - def __init__(self, charset): - super(Char, self).__init__(charset, exact=1) - self.reString = "[%s]" % _escapeRegexRangeChars(self.initCharsOrig) - self.re = re.compile( self.reString ) + def __init__(self, charset, asKeyword=False, excludeChars=None): + super(Char, self).__init__(charset, exact=1, asKeyword=asKeyword, excludeChars=excludeChars) + self.reString = "[%s]" % _escapeRegexRangeChars(''.join(self.initChars)) + if asKeyword: + self.reString = r"\b%s\b" % self.reString + self.re = re.compile(self.reString) + self.re_match = self.re.match class Regex(Token): @@ -2995,18 +3260,18 @@ class Regex(Token): roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") """ compiledREtype = type(re.compile("[A-Z]")) - def __init__( self, pattern, flags=0, asGroupList=False, asMatch=False): + def __init__(self, pattern, flags=0, asGroupList=False, asMatch=False): """The parameters ``pattern`` and ``flags`` are passed to the ``re.compile()`` function as-is. See the Python `re module <https://docs.python.org/3/library/re.html>`_ module for an explanation of the acceptable patterns and flags. """ - super(Regex,self).__init__() + super(Regex, self).__init__() if isinstance(pattern, basestring): if not pattern: warnings.warn("null string passed to Regex; use Empty() instead", - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) self.pattern = pattern self.flags = flags @@ -3016,46 +3281,64 @@ def __init__( self, pattern, flags=0, asGroupList=False, asMatch=False): self.reString = self.pattern except sre_constants.error: warnings.warn("invalid pattern (%s) passed to Regex" % pattern, - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) raise elif isinstance(pattern, Regex.compiledREtype): self.re = pattern - self.pattern = \ - self.reString = str(pattern) + self.pattern = self.reString = str(pattern) self.flags = flags else: raise ValueError("Regex may only be constructed with a string or a compiled RE object") + self.re_match = self.re.match + self.name = _ustr(self) self.errmsg = "Expected " + self.name self.mayIndexError = False self.mayReturnEmpty = True self.asGroupList = asGroupList self.asMatch = asMatch + if self.asGroupList: + self.parseImpl = self.parseImplAsGroupList + if self.asMatch: + self.parseImpl = self.parseImplAsMatch - def parseImpl( self, instring, loc, doActions=True ): - result = self.re.match(instring,loc) + def parseImpl(self, instring, loc, doActions=True): + result = self.re_match(instring, loc) if not result: raise ParseException(instring, loc, self.errmsg, self) loc = result.end() - if self.asMatch: - ret = result - elif self.asGroupList: - ret = result.groups() - else: - ret = ParseResults(result.group()) - d = result.groupdict() - if d: - for k, v in d.items(): - ret[k] = v - return loc,ret - - def __str__( self ): + ret = ParseResults(result.group()) + d = result.groupdict() + if d: + for k, v in d.items(): + ret[k] = v + return loc, ret + + def parseImplAsGroupList(self, instring, loc, doActions=True): + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + ret = result.groups() + return loc, ret + + def parseImplAsMatch(self, instring, loc, doActions=True): + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + ret = result + return loc, ret + + def __str__(self): try: - return super(Regex,self).__str__() + return super(Regex, self).__str__() except Exception: pass @@ -3065,7 +3348,7 @@ def __str__( self ): return self.strRepr def sub(self, repl): - """ + r""" Return Regex with an attached parse action to transform the parsed result as if called using `re.sub(expr, repl, string) <https://docs.python.org/3/library/re.html#re.sub>`_. @@ -3077,12 +3360,12 @@ def sub(self, repl): """ if self.asGroupList: warnings.warn("cannot use sub() with Regex(asGroupList=True)", - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) raise SyntaxError() if self.asMatch and callable(repl): warnings.warn("cannot use sub() with a callable with Regex(asMatch=True)", - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) raise SyntaxError() if self.asMatch: @@ -3102,20 +3385,20 @@ class QuotedString(Token): - quoteChar - string of one or more characters defining the quote delimiting string - escChar - character to escape quotes, typically backslash - (default= ``None`` ) + (default= ``None``) - escQuote - special quote sequence to escape an embedded quote string (such as SQL's ``""`` to escape an embedded ``"``) - (default= ``None`` ) + (default= ``None``) - multiline - boolean indicating whether quotes can span - multiple lines (default= ``False`` ) + multiple lines (default= ``False``) - unquoteResults - boolean indicating whether the matched text - should be unquoted (default= ``True`` ) + should be unquoted (default= ``True``) - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default= ``None`` => same as quoteChar) - convertWhitespaceEscapes - convert escaped whitespace (``'\t'``, ``'\n'``, etc.) to actual whitespace - (default= ``True`` ) + (default= ``True``) Example:: @@ -3132,13 +3415,14 @@ class QuotedString(Token): [['This is the "quote"']] [['This is the quote with "embedded" quotes']] """ - def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True): - super(QuotedString,self).__init__() + def __init__(self, quoteChar, escChar=None, escQuote=None, multiline=False, + unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True): + super(QuotedString, self).__init__() # remove white space from quote chars - wont work anyway quoteChar = quoteChar.strip() if not quoteChar: - warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2) + warnings.warn("quoteChar cannot be the empty string", SyntaxWarning, stacklevel=2) raise SyntaxError() if endQuoteChar is None: @@ -3146,7 +3430,7 @@ def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unq else: endQuoteChar = endQuoteChar.strip() if not endQuoteChar: - warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2) + warnings.warn("endQuoteChar cannot be the empty string", SyntaxWarning, stacklevel=2) raise SyntaxError() self.quoteChar = quoteChar @@ -3161,35 +3445,34 @@ def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unq if multiline: self.flags = re.MULTILINE | re.DOTALL - self.pattern = r'%s(?:[^%s%s]' % \ - ( re.escape(self.quoteChar), - _escapeRegexRangeChars(self.endQuoteChar[0]), - (escChar is not None and _escapeRegexRangeChars(escChar) or '') ) + self.pattern = r'%s(?:[^%s%s]' % (re.escape(self.quoteChar), + _escapeRegexRangeChars(self.endQuoteChar[0]), + (escChar is not None and _escapeRegexRangeChars(escChar) or '')) else: self.flags = 0 - self.pattern = r'%s(?:[^%s\n\r%s]' % \ - ( re.escape(self.quoteChar), - _escapeRegexRangeChars(self.endQuoteChar[0]), - (escChar is not None and _escapeRegexRangeChars(escChar) or '') ) + self.pattern = r'%s(?:[^%s\n\r%s]' % (re.escape(self.quoteChar), + _escapeRegexRangeChars(self.endQuoteChar[0]), + (escChar is not None and _escapeRegexRangeChars(escChar) or '')) if len(self.endQuoteChar) > 1: self.pattern += ( '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]), - _escapeRegexRangeChars(self.endQuoteChar[i])) - for i in range(len(self.endQuoteChar)-1,0,-1)) + ')' - ) + _escapeRegexRangeChars(self.endQuoteChar[i])) + for i in range(len(self.endQuoteChar) - 1, 0, -1)) + ')') + if escQuote: self.pattern += (r'|(?:%s)' % re.escape(escQuote)) if escChar: self.pattern += (r'|(?:%s.)' % re.escape(escChar)) - self.escCharReplacePattern = re.escape(self.escChar)+"(.)" + self.escCharReplacePattern = re.escape(self.escChar) + "(.)" self.pattern += (r')*%s' % re.escape(self.endQuoteChar)) try: self.re = re.compile(self.pattern, self.flags) self.reString = self.pattern + self.re_match = self.re.match except sre_constants.error: warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern, - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) raise self.name = _ustr(self) @@ -3197,8 +3480,8 @@ def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unq self.mayIndexError = False self.mayReturnEmpty = True - def parseImpl( self, instring, loc, doActions=True ): - result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None + def parseImpl(self, instring, loc, doActions=True): + result = instring[loc] == self.firstQuoteChar and self.re_match(instring, loc) or None if not result: raise ParseException(instring, loc, self.errmsg, self) @@ -3208,18 +3491,18 @@ def parseImpl( self, instring, loc, doActions=True ): if self.unquoteResults: # strip off quotes - ret = ret[self.quoteCharLen:-self.endQuoteCharLen] + ret = ret[self.quoteCharLen: -self.endQuoteCharLen] - if isinstance(ret,basestring): + if isinstance(ret, basestring): # replace escaped whitespace if '\\' in ret and self.convertWhitespaceEscapes: ws_map = { - r'\t' : '\t', - r'\n' : '\n', - r'\f' : '\f', - r'\r' : '\r', + r'\t': '\t', + r'\n': '\n', + r'\f': '\f', + r'\r': '\r', } - for wslit,wschar in ws_map.items(): + for wslit, wschar in ws_map.items(): ret = ret.replace(wslit, wschar) # replace escaped characters @@ -3232,9 +3515,9 @@ def parseImpl( self, instring, loc, doActions=True ): return loc, ret - def __str__( self ): + def __str__(self): try: - return super(QuotedString,self).__str__() + return super(QuotedString, self).__str__() except Exception: pass @@ -3264,15 +3547,14 @@ class CharsNotIn(Token): ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] """ - def __init__( self, notChars, min=1, max=0, exact=0 ): - super(CharsNotIn,self).__init__() + def __init__(self, notChars, min=1, max=0, exact=0): + super(CharsNotIn, self).__init__() self.skipWhitespace = False self.notChars = notChars if min < 1: - raise ValueError( - "cannot specify a minimum length < 1; use " + - "Optional(CharsNotIn()) if zero-length char group is permitted") + raise ValueError("cannot specify a minimum length < 1; use " + "Optional(CharsNotIn()) if zero-length char group is permitted") self.minLen = min @@ -3287,19 +3569,18 @@ def __init__( self, notChars, min=1, max=0, exact=0 ): self.name = _ustr(self) self.errmsg = "Expected " + self.name - self.mayReturnEmpty = ( self.minLen == 0 ) + self.mayReturnEmpty = (self.minLen == 0) self.mayIndexError = False - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): if instring[loc] in self.notChars: raise ParseException(instring, loc, self.errmsg, self) start = loc loc += 1 notchars = self.notChars - maxlen = min( start+self.maxLen, len(instring) ) - while loc < maxlen and \ - (instring[loc] not in notchars): + maxlen = min(start + self.maxLen, len(instring)) + while loc < maxlen and instring[loc] not in notchars: loc += 1 if loc - start < self.minLen: @@ -3307,7 +3588,7 @@ def parseImpl( self, instring, loc, doActions=True ): return loc, instring[start:loc] - def __str__( self ): + def __str__(self): try: return super(CharsNotIn, self).__str__() except Exception: @@ -3356,10 +3637,10 @@ class White(Token): 'u\3000': '<IDEOGRAPHIC_SPACE>', } def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0): - super(White,self).__init__() + super(White, self).__init__() self.matchWhite = ws - self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) ) - #~ self.leaveWhitespace() + self.setWhitespaceChars("".join(c for c in self.whiteChars if c not in self.matchWhite)) + # ~ self.leaveWhitespace() self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite)) self.mayReturnEmpty = True self.errmsg = "Expected " + self.name @@ -3375,13 +3656,13 @@ def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0): self.maxLen = exact self.minLen = exact - def parseImpl( self, instring, loc, doActions=True ): - if not(instring[ loc ] in self.matchWhite): + def parseImpl(self, instring, loc, doActions=True): + if instring[loc] not in self.matchWhite: raise ParseException(instring, loc, self.errmsg, self) start = loc loc += 1 maxloc = start + self.maxLen - maxloc = min( maxloc, len(instring) ) + maxloc = min(maxloc, len(instring)) while loc < maxloc and instring[loc] in self.matchWhite: loc += 1 @@ -3392,9 +3673,9 @@ def parseImpl( self, instring, loc, doActions=True ): class _PositionToken(Token): - def __init__( self ): - super(_PositionToken,self).__init__() - self.name=self.__class__.__name__ + def __init__(self): + super(_PositionToken, self).__init__() + self.name = self.__class__.__name__ self.mayReturnEmpty = True self.mayIndexError = False @@ -3402,30 +3683,30 @@ class GoToColumn(_PositionToken): """Token to advance to a specific column of input text; useful for tabular report scraping. """ - def __init__( self, colno ): - super(GoToColumn,self).__init__() + def __init__(self, colno): + super(GoToColumn, self).__init__() self.col = colno - def preParse( self, instring, loc ): - if col(loc,instring) != self.col: + def preParse(self, instring, loc): + if col(loc, instring) != self.col: instrlen = len(instring) if self.ignoreExprs: - loc = self._skipIgnorables( instring, loc ) - while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col : + loc = self._skipIgnorables(instring, loc) + while loc < instrlen and instring[loc].isspace() and col(loc, instring) != self.col: loc += 1 return loc - def parseImpl( self, instring, loc, doActions=True ): - thiscol = col( loc, instring ) + def parseImpl(self, instring, loc, doActions=True): + thiscol = col(loc, instring) if thiscol > self.col: - raise ParseException( instring, loc, "Text not in expected column", self ) + raise ParseException(instring, loc, "Text not in expected column", self) newloc = loc + self.col - thiscol - ret = instring[ loc: newloc ] + ret = instring[loc: newloc] return newloc, ret class LineStart(_PositionToken): - """Matches if current position is at the beginning of a line within + r"""Matches if current position is at the beginning of a line within the parse string Example:: @@ -3446,11 +3727,11 @@ class LineStart(_PositionToken): ['AAA', ' and this line'] """ - def __init__( self ): - super(LineStart,self).__init__() + def __init__(self): + super(LineStart, self).__init__() self.errmsg = "Expected start of line" - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): if col(loc, instring) == 1: return loc, [] raise ParseException(instring, loc, self.errmsg, self) @@ -3459,19 +3740,19 @@ class LineEnd(_PositionToken): """Matches if current position is at the end of a line within the parse string """ - def __init__( self ): - super(LineEnd,self).__init__() - self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") ) + def __init__(self): + super(LineEnd, self).__init__() + self.setWhitespaceChars(ParserElement.DEFAULT_WHITE_CHARS.replace("\n", "")) self.errmsg = "Expected end of line" - def parseImpl( self, instring, loc, doActions=True ): - if loc<len(instring): + def parseImpl(self, instring, loc, doActions=True): + if loc < len(instring): if instring[loc] == "\n": - return loc+1, "\n" + return loc + 1, "\n" else: raise ParseException(instring, loc, self.errmsg, self) elif loc == len(instring): - return loc+1, [] + return loc + 1, [] else: raise ParseException(instring, loc, self.errmsg, self) @@ -3479,29 +3760,29 @@ class StringStart(_PositionToken): """Matches if current position is at the beginning of the parse string """ - def __init__( self ): - super(StringStart,self).__init__() + def __init__(self): + super(StringStart, self).__init__() self.errmsg = "Expected start of text" - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): if loc != 0: # see if entire string up to here is just whitespace and ignoreables - if loc != self.preParse( instring, 0 ): + if loc != self.preParse(instring, 0): raise ParseException(instring, loc, self.errmsg, self) return loc, [] class StringEnd(_PositionToken): """Matches if current position is at the end of the parse string """ - def __init__( self ): - super(StringEnd,self).__init__() + def __init__(self): + super(StringEnd, self).__init__() self.errmsg = "Expected end of text" - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): if loc < len(instring): raise ParseException(instring, loc, self.errmsg, self) elif loc == len(instring): - return loc+1, [] + return loc + 1, [] elif loc > len(instring): return loc, [] else: @@ -3516,15 +3797,15 @@ class WordStart(_PositionToken): the beginning of the string being parsed, or at the beginning of a line. """ - def __init__(self, wordChars = printables): - super(WordStart,self).__init__() + def __init__(self, wordChars=printables): + super(WordStart, self).__init__() self.wordChars = set(wordChars) self.errmsg = "Not at the start of a word" - def parseImpl(self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): if loc != 0: - if (instring[loc-1] in self.wordChars or - instring[loc] not in self.wordChars): + if (instring[loc - 1] in self.wordChars + or instring[loc] not in self.wordChars): raise ParseException(instring, loc, self.errmsg, self) return loc, [] @@ -3536,17 +3817,17 @@ class WordEnd(_PositionToken): will also match at the end of the string being parsed, or at the end of a line. """ - def __init__(self, wordChars = printables): - super(WordEnd,self).__init__() + def __init__(self, wordChars=printables): + super(WordEnd, self).__init__() self.wordChars = set(wordChars) self.skipWhitespace = False self.errmsg = "Not at the end of a word" - def parseImpl(self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): instrlen = len(instring) - if instrlen>0 and loc<instrlen: + if instrlen > 0 and loc < instrlen: if (instring[loc] in self.wordChars or - instring[loc-1] not in self.wordChars): + instring[loc - 1] not in self.wordChars): raise ParseException(instring, loc, self.errmsg, self) return loc, [] @@ -3555,90 +3836,89 @@ class ParseExpression(ParserElement): """Abstract subclass of ParserElement, for combining and post-processing parsed tokens. """ - def __init__( self, exprs, savelist = False ): - super(ParseExpression,self).__init__(savelist) - if isinstance( exprs, _generatorType ): + def __init__(self, exprs, savelist=False): + super(ParseExpression, self).__init__(savelist) + if isinstance(exprs, _generatorType): exprs = list(exprs) - if isinstance( exprs, basestring ): - self.exprs = [ ParserElement._literalStringClass( exprs ) ] - elif isinstance( exprs, Iterable ): + if isinstance(exprs, basestring): + self.exprs = [self._literalStringClass(exprs)] + elif isinstance(exprs, ParserElement): + self.exprs = [exprs] + elif isinstance(exprs, Iterable): exprs = list(exprs) # if sequence of strings provided, wrap with Literal - if all(isinstance(expr, basestring) for expr in exprs): - exprs = map(ParserElement._literalStringClass, exprs) + if any(isinstance(expr, basestring) for expr in exprs): + exprs = (self._literalStringClass(e) if isinstance(e, basestring) else e for e in exprs) self.exprs = list(exprs) else: try: - self.exprs = list( exprs ) + self.exprs = list(exprs) except TypeError: - self.exprs = [ exprs ] + self.exprs = [exprs] self.callPreparse = False - def __getitem__( self, i ): - return self.exprs[i] - - def append( self, other ): - self.exprs.append( other ) + def append(self, other): + self.exprs.append(other) self.strRepr = None return self - def leaveWhitespace( self ): + def leaveWhitespace(self): """Extends ``leaveWhitespace`` defined in base class, and also invokes ``leaveWhitespace`` on all contained expressions.""" self.skipWhitespace = False - self.exprs = [ e.copy() for e in self.exprs ] + self.exprs = [e.copy() for e in self.exprs] for e in self.exprs: e.leaveWhitespace() return self - def ignore( self, other ): - if isinstance( other, Suppress ): + def ignore(self, other): + if isinstance(other, Suppress): if other not in self.ignoreExprs: - super( ParseExpression, self).ignore( other ) + super(ParseExpression, self).ignore(other) for e in self.exprs: - e.ignore( self.ignoreExprs[-1] ) + e.ignore(self.ignoreExprs[-1]) else: - super( ParseExpression, self).ignore( other ) + super(ParseExpression, self).ignore(other) for e in self.exprs: - e.ignore( self.ignoreExprs[-1] ) + e.ignore(self.ignoreExprs[-1]) return self - def __str__( self ): + def __str__(self): try: - return super(ParseExpression,self).__str__() + return super(ParseExpression, self).__str__() except Exception: pass if self.strRepr is None: - self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) ) + self.strRepr = "%s:(%s)" % (self.__class__.__name__, _ustr(self.exprs)) return self.strRepr - def streamline( self ): - super(ParseExpression,self).streamline() + def streamline(self): + super(ParseExpression, self).streamline() for e in self.exprs: e.streamline() - # collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d ) + # collapse nested And's of the form And(And(And(a, b), c), d) to And(a, b, c, d) # but only if there are no parse actions or resultsNames on the nested And's # (likewise for Or's and MatchFirst's) - if ( len(self.exprs) == 2 ): + if len(self.exprs) == 2: other = self.exprs[0] - if ( isinstance( other, self.__class__ ) and - not(other.parseAction) and - other.resultsName is None and - not other.debug ): - self.exprs = other.exprs[:] + [ self.exprs[1] ] + if (isinstance(other, self.__class__) + and not other.parseAction + and other.resultsName is None + and not other.debug): + self.exprs = other.exprs[:] + [self.exprs[1]] self.strRepr = None self.mayReturnEmpty |= other.mayReturnEmpty self.mayIndexError |= other.mayIndexError other = self.exprs[-1] - if ( isinstance( other, self.__class__ ) and - not(other.parseAction) and - other.resultsName is None and - not other.debug ): + if (isinstance(other, self.__class__) + and not other.parseAction + and other.resultsName is None + and not other.debug): self.exprs = self.exprs[:-1] + other.exprs[:] self.strRepr = None self.mayReturnEmpty |= other.mayReturnEmpty @@ -3648,21 +3928,31 @@ def streamline( self ): return self - def setResultsName( self, name, listAllMatches=False ): - ret = super(ParseExpression,self).setResultsName(name,listAllMatches) - return ret - - def validate( self, validateTrace=[] ): - tmp = validateTrace[:]+[self] + def validate(self, validateTrace=None): + tmp = (validateTrace if validateTrace is not None else [])[:] + [self] for e in self.exprs: e.validate(tmp) - self.checkRecursion( [] ) + self.checkRecursion([]) def copy(self): - ret = super(ParseExpression,self).copy() + ret = super(ParseExpression, self).copy() ret.exprs = [e.copy() for e in self.exprs] return ret + def _setResultsName(self, name, listAllMatches=False): + if __diag__.warn_ungrouped_named_tokens_in_collection: + for e in self.exprs: + if isinstance(e, ParserElement) and e.resultsName: + warnings.warn("{0}: setting results name {1!r} on {2} expression " + "collides with {3!r} on contained expression".format("warn_ungrouped_named_tokens_in_collection", + name, + type(self).__name__, + e.resultsName), + stacklevel=3) + + return super(ParseExpression, self)._setResultsName(name, listAllMatches) + + class And(ParseExpression): """ Requires all given :class:`ParseExpression` s to be found in the given order. @@ -3676,33 +3966,58 @@ class And(ParseExpression): integer = Word(nums) name_expr = OneOrMore(Word(alphas)) - expr = And([integer("id"),name_expr("name"),integer("age")]) + expr = And([integer("id"), name_expr("name"), integer("age")]) # more easily written as: expr = integer("id") + name_expr("name") + integer("age") """ class _ErrorStop(Empty): def __init__(self, *args, **kwargs): - super(And._ErrorStop,self).__init__(*args, **kwargs) + super(And._ErrorStop, self).__init__(*args, **kwargs) self.name = '-' self.leaveWhitespace() - def __init__( self, exprs, savelist = True ): - super(And,self).__init__(exprs, savelist) + def __init__(self, exprs, savelist=True): + if exprs and Ellipsis in exprs: + tmp = [] + for i, expr in enumerate(exprs): + if expr is Ellipsis: + if i < len(exprs) - 1: + skipto_arg = (Empty() + exprs[i + 1]).exprs[-1] + tmp.append(SkipTo(skipto_arg)("_skipped*")) + else: + raise Exception("cannot construct And with sequence ending in ...") + else: + tmp.append(expr) + exprs[:] = tmp + super(And, self).__init__(exprs, savelist) self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - self.setWhitespaceChars( self.exprs[0].whiteChars ) + self.setWhitespaceChars(self.exprs[0].whiteChars) self.skipWhitespace = self.exprs[0].skipWhitespace self.callPreparse = True def streamline(self): + # collapse any _PendingSkip's + if self.exprs: + if any(isinstance(e, ParseExpression) and e.exprs and isinstance(e.exprs[-1], _PendingSkip) + for e in self.exprs[:-1]): + for i, e in enumerate(self.exprs[:-1]): + if e is None: + continue + if (isinstance(e, ParseExpression) + and e.exprs and isinstance(e.exprs[-1], _PendingSkip)): + e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1] + self.exprs[i + 1] = None + self.exprs = [e for e in self.exprs if e is not None] + super(And, self).streamline() self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) return self - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): # pass False as last arg to _parse for first element, since we already # pre-parsed the string as part of our And pre-parsing - loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False ) + loc, resultlist = self.exprs[0]._parse(instring, loc, doActions, callPreParse=False) errorStop = False for e in self.exprs[1:]: if isinstance(e, And._ErrorStop): @@ -3710,7 +4025,7 @@ def parseImpl( self, instring, loc, doActions=True ): continue if errorStop: try: - loc, exprtokens = e._parse( instring, loc, doActions ) + loc, exprtokens = e._parse(instring, loc, doActions) except ParseSyntaxException: raise except ParseBaseException as pe: @@ -3719,25 +4034,25 @@ def parseImpl( self, instring, loc, doActions=True ): except IndexError: raise ParseSyntaxException(instring, len(instring), self.errmsg, self) else: - loc, exprtokens = e._parse( instring, loc, doActions ) + loc, exprtokens = e._parse(instring, loc, doActions) if exprtokens or exprtokens.haskeys(): resultlist += exprtokens return loc, resultlist - def __iadd__(self, other ): - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - return self.append( other ) #And( [ self, other ] ) + def __iadd__(self, other): + if isinstance(other, basestring): + other = self._literalStringClass(other) + return self.append(other) # And([self, other]) - def checkRecursion( self, parseElementList ): - subRecCheckList = parseElementList[:] + [ self ] + def checkRecursion(self, parseElementList): + subRecCheckList = parseElementList[:] + [self] for e in self.exprs: - e.checkRecursion( subRecCheckList ) + e.checkRecursion(subRecCheckList) if not e.mayReturnEmpty: break - def __str__( self ): - if hasattr(self,"name"): + def __str__(self): + if hasattr(self, "name"): return self.name if self.strRepr is None: @@ -3763,8 +4078,8 @@ class Or(ParseExpression): [['123'], ['3.1416'], ['789']] """ - def __init__( self, exprs, savelist = False ): - super(Or,self).__init__(exprs, savelist) + def __init__(self, exprs, savelist=False): + super(Or, self).__init__(exprs, savelist) if self.exprs: self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) else: @@ -3772,16 +4087,17 @@ def __init__( self, exprs, savelist = False ): def streamline(self): super(Or, self).streamline() - self.saveAsList = any(e.saveAsList for e in self.exprs) + if __compat__.collect_all_And_tokens: + self.saveAsList = any(e.saveAsList for e in self.exprs) return self - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): maxExcLoc = -1 maxException = None matches = [] for e in self.exprs: try: - loc2 = e.tryParse( instring, loc ) + loc2 = e.tryParse(instring, loc) except ParseException as err: err.__traceback__ = None if err.loc > maxExcLoc: @@ -3789,22 +4105,45 @@ def parseImpl( self, instring, loc, doActions=True ): maxExcLoc = err.loc except IndexError: if len(instring) > maxExcLoc: - maxException = ParseException(instring,len(instring),e.errmsg,self) + maxException = ParseException(instring, len(instring), e.errmsg, self) maxExcLoc = len(instring) else: # save match among all matches, to retry longest to shortest matches.append((loc2, e)) if matches: - matches.sort(key=lambda x: -x[0]) - for _,e in matches: + # re-evaluate all matches in descending order of length of match, in case attached actions + # might change whether or how much they match of the input. + matches.sort(key=itemgetter(0), reverse=True) + + if not doActions: + # no further conditions or parse actions to change the selection of + # alternative, so the first match will be the best match + best_expr = matches[0][1] + return best_expr._parse(instring, loc, doActions) + + longest = -1, None + for loc1, expr1 in matches: + if loc1 <= longest[0]: + # already have a longer match than this one will deliver, we are done + return longest + try: - return e._parse( instring, loc, doActions ) + loc2, toks = expr1._parse(instring, loc, doActions) except ParseException as err: err.__traceback__ = None if err.loc > maxExcLoc: maxException = err maxExcLoc = err.loc + else: + if loc2 >= loc1: + return loc2, toks + # didn't match as much as before + elif loc2 > longest[0]: + longest = loc2, toks + + if longest != (-1, None): + return longest if maxException is not None: maxException.msg = self.errmsg @@ -3813,13 +4152,13 @@ def parseImpl( self, instring, loc, doActions=True ): raise ParseException(instring, loc, "no defined alternatives to match", self) - def __ixor__(self, other ): - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - return self.append( other ) #Or( [ self, other ] ) + def __ixor__(self, other): + if isinstance(other, basestring): + other = self._literalStringClass(other) + return self.append(other) # Or([self, other]) - def __str__( self ): - if hasattr(self,"name"): + def __str__(self): + if hasattr(self, "name"): return self.name if self.strRepr is None: @@ -3827,10 +4166,22 @@ def __str__( self ): return self.strRepr - def checkRecursion( self, parseElementList ): - subRecCheckList = parseElementList[:] + [ self ] + def checkRecursion(self, parseElementList): + subRecCheckList = parseElementList[:] + [self] for e in self.exprs: - e.checkRecursion( subRecCheckList ) + e.checkRecursion(subRecCheckList) + + def _setResultsName(self, name, listAllMatches=False): + if (not __compat__.collect_all_And_tokens + and __diag__.warn_multiple_tokens_in_named_alternation): + if any(isinstance(e, And) for e in self.exprs): + warnings.warn("{0}: setting results name {1!r} on {2} expression " + "may only return a single token for an And alternative, " + "in future will return the full list of tokens".format( + "warn_multiple_tokens_in_named_alternation", name, type(self).__name__), + stacklevel=3) + + return super(Or, self)._setResultsName(name, listAllMatches) class MatchFirst(ParseExpression): @@ -3850,25 +4201,25 @@ class MatchFirst(ParseExpression): number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums) print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']] """ - def __init__( self, exprs, savelist = False ): - super(MatchFirst,self).__init__(exprs, savelist) + def __init__(self, exprs, savelist=False): + super(MatchFirst, self).__init__(exprs, savelist) if self.exprs: self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - # self.saveAsList = any(e.saveAsList for e in self.exprs) else: self.mayReturnEmpty = True def streamline(self): super(MatchFirst, self).streamline() - self.saveAsList = any(e.saveAsList for e in self.exprs) + if __compat__.collect_all_And_tokens: + self.saveAsList = any(e.saveAsList for e in self.exprs) return self - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): maxExcLoc = -1 maxException = None for e in self.exprs: try: - ret = e._parse( instring, loc, doActions ) + ret = e._parse(instring, loc, doActions) return ret except ParseException as err: if err.loc > maxExcLoc: @@ -3876,7 +4227,7 @@ def parseImpl( self, instring, loc, doActions=True ): maxExcLoc = err.loc except IndexError: if len(instring) > maxExcLoc: - maxException = ParseException(instring,len(instring),e.errmsg,self) + maxException = ParseException(instring, len(instring), e.errmsg, self) maxExcLoc = len(instring) # only got here if no expression matched, raise exception for match that made it the furthest @@ -3887,13 +4238,13 @@ def parseImpl( self, instring, loc, doActions=True ): else: raise ParseException(instring, loc, "no defined alternatives to match", self) - def __ior__(self, other ): - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - return self.append( other ) #MatchFirst( [ self, other ] ) + def __ior__(self, other): + if isinstance(other, basestring): + other = self._literalStringClass(other) + return self.append(other) # MatchFirst([self, other]) - def __str__( self ): - if hasattr(self,"name"): + def __str__(self): + if hasattr(self, "name"): return self.name if self.strRepr is None: @@ -3901,10 +4252,22 @@ def __str__( self ): return self.strRepr - def checkRecursion( self, parseElementList ): - subRecCheckList = parseElementList[:] + [ self ] + def checkRecursion(self, parseElementList): + subRecCheckList = parseElementList[:] + [self] for e in self.exprs: - e.checkRecursion( subRecCheckList ) + e.checkRecursion(subRecCheckList) + + def _setResultsName(self, name, listAllMatches=False): + if (not __compat__.collect_all_And_tokens + and __diag__.warn_multiple_tokens_in_named_alternation): + if any(isinstance(e, And) for e in self.exprs): + warnings.warn("{0}: setting results name {1!r} on {2} expression " + "may only return a single token for an And alternative, " + "in future will return the full list of tokens".format( + "warn_multiple_tokens_in_named_alternation", name, type(self).__name__), + stacklevel=3) + + return super(MatchFirst, self)._setResultsName(name, listAllMatches) class Each(ParseExpression): @@ -3964,8 +4327,8 @@ class Each(ParseExpression): - shape: TRIANGLE - size: 20 """ - def __init__( self, exprs, savelist = True ): - super(Each,self).__init__(exprs, savelist) + def __init__(self, exprs, savelist=True): + super(Each, self).__init__(exprs, savelist) self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) self.skipWhitespace = True self.initExprGroups = True @@ -3976,15 +4339,15 @@ def streamline(self): self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) return self - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): if self.initExprGroups: - self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional)) - opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ] - opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)] + self.opt1map = dict((id(e.expr), e) for e in self.exprs if isinstance(e, Optional)) + opt1 = [e.expr for e in self.exprs if isinstance(e, Optional)] + opt2 = [e for e in self.exprs if e.mayReturnEmpty and not isinstance(e, Optional)] self.optionals = opt1 + opt2 - self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ] - self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ] - self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ] + self.multioptionals = [e.expr for e in self.exprs if isinstance(e, ZeroOrMore)] + self.multirequired = [e.expr for e in self.exprs if isinstance(e, OneOrMore)] + self.required = [e for e in self.exprs if not isinstance(e, (Optional, ZeroOrMore, OneOrMore))] self.required += self.multirequired self.initExprGroups = False tmpLoc = loc @@ -3998,11 +4361,11 @@ def parseImpl( self, instring, loc, doActions=True ): failed = [] for e in tmpExprs: try: - tmpLoc = e.tryParse( instring, tmpLoc ) + tmpLoc = e.tryParse(instring, tmpLoc) except ParseException: failed.append(e) else: - matchOrder.append(self.opt1map.get(id(e),e)) + matchOrder.append(self.opt1map.get(id(e), e)) if e in tmpReqd: tmpReqd.remove(e) elif e in tmpOpt: @@ -4012,21 +4375,21 @@ def parseImpl( self, instring, loc, doActions=True ): if tmpReqd: missing = ", ".join(_ustr(e) for e in tmpReqd) - raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing ) + raise ParseException(instring, loc, "Missing one or more required elements (%s)" % missing) # add any unmatched Optionals, in case they have default values defined - matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt] + matchOrder += [e for e in self.exprs if isinstance(e, Optional) and e.expr in tmpOpt] resultlist = [] for e in matchOrder: - loc,results = e._parse(instring,loc,doActions) + loc, results = e._parse(instring, loc, doActions) resultlist.append(results) finalResults = sum(resultlist, ParseResults([])) return loc, finalResults - def __str__( self ): - if hasattr(self,"name"): + def __str__(self): + if hasattr(self, "name"): return self.name if self.strRepr is None: @@ -4034,86 +4397,88 @@ def __str__( self ): return self.strRepr - def checkRecursion( self, parseElementList ): - subRecCheckList = parseElementList[:] + [ self ] + def checkRecursion(self, parseElementList): + subRecCheckList = parseElementList[:] + [self] for e in self.exprs: - e.checkRecursion( subRecCheckList ) + e.checkRecursion(subRecCheckList) class ParseElementEnhance(ParserElement): """Abstract subclass of :class:`ParserElement`, for combining and post-processing parsed tokens. """ - def __init__( self, expr, savelist=False ): - super(ParseElementEnhance,self).__init__(savelist) - if isinstance( expr, basestring ): - if issubclass(ParserElement._literalStringClass, Token): - expr = ParserElement._literalStringClass(expr) + def __init__(self, expr, savelist=False): + super(ParseElementEnhance, self).__init__(savelist) + if isinstance(expr, basestring): + if issubclass(self._literalStringClass, Token): + expr = self._literalStringClass(expr) else: - expr = ParserElement._literalStringClass(Literal(expr)) + expr = self._literalStringClass(Literal(expr)) self.expr = expr self.strRepr = None if expr is not None: self.mayIndexError = expr.mayIndexError self.mayReturnEmpty = expr.mayReturnEmpty - self.setWhitespaceChars( expr.whiteChars ) + self.setWhitespaceChars(expr.whiteChars) self.skipWhitespace = expr.skipWhitespace self.saveAsList = expr.saveAsList self.callPreparse = expr.callPreparse self.ignoreExprs.extend(expr.ignoreExprs) - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): if self.expr is not None: - return self.expr._parse( instring, loc, doActions, callPreParse=False ) + return self.expr._parse(instring, loc, doActions, callPreParse=False) else: - raise ParseException("",loc,self.errmsg,self) + raise ParseException("", loc, self.errmsg, self) - def leaveWhitespace( self ): + def leaveWhitespace(self): self.skipWhitespace = False self.expr = self.expr.copy() if self.expr is not None: self.expr.leaveWhitespace() return self - def ignore( self, other ): - if isinstance( other, Suppress ): + def ignore(self, other): + if isinstance(other, Suppress): if other not in self.ignoreExprs: - super( ParseElementEnhance, self).ignore( other ) + super(ParseElementEnhance, self).ignore(other) if self.expr is not None: - self.expr.ignore( self.ignoreExprs[-1] ) + self.expr.ignore(self.ignoreExprs[-1]) else: - super( ParseElementEnhance, self).ignore( other ) + super(ParseElementEnhance, self).ignore(other) if self.expr is not None: - self.expr.ignore( self.ignoreExprs[-1] ) + self.expr.ignore(self.ignoreExprs[-1]) return self - def streamline( self ): - super(ParseElementEnhance,self).streamline() + def streamline(self): + super(ParseElementEnhance, self).streamline() if self.expr is not None: self.expr.streamline() return self - def checkRecursion( self, parseElementList ): + def checkRecursion(self, parseElementList): if self in parseElementList: - raise RecursiveGrammarException( parseElementList+[self] ) - subRecCheckList = parseElementList[:] + [ self ] + raise RecursiveGrammarException(parseElementList + [self]) + subRecCheckList = parseElementList[:] + [self] if self.expr is not None: - self.expr.checkRecursion( subRecCheckList ) + self.expr.checkRecursion(subRecCheckList) - def validate( self, validateTrace=[] ): - tmp = validateTrace[:]+[self] + def validate(self, validateTrace=None): + if validateTrace is None: + validateTrace = [] + tmp = validateTrace[:] + [self] if self.expr is not None: self.expr.validate(tmp) - self.checkRecursion( [] ) + self.checkRecursion([]) - def __str__( self ): + def __str__(self): try: - return super(ParseElementEnhance,self).__str__() + return super(ParseElementEnhance, self).__str__() except Exception: pass if self.strRepr is None and self.expr is not None: - self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) ) + self.strRepr = "%s:(%s)" % (self.__class__.__name__, _ustr(self.expr)) return self.strRepr @@ -4139,13 +4504,16 @@ class FollowedBy(ParseElementEnhance): [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] """ - def __init__( self, expr ): - super(FollowedBy,self).__init__(expr) + def __init__(self, expr): + super(FollowedBy, self).__init__(expr) self.mayReturnEmpty = True - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): + # by using self._expr.parse and deleting the contents of the returned ParseResults list + # we keep any named results that were defined in the FollowedBy expression _, ret = self.expr._parse(instring, loc, doActions=doActions) del ret[:] + return loc, ret @@ -4210,9 +4578,9 @@ def parseImpl(self, instring, loc=0, doActions=True): test_expr = self.expr + StringEnd() instring_slice = instring[:loc] last_expr = ParseException(instring, loc, self.errmsg) - for offset in range(1, min(loc, self.retreat+1)): + for offset in range(1, min(loc, self.retreat + 1)): try: - _, ret = test_expr._parse(instring_slice, loc-offset) + _, ret = test_expr._parse(instring_slice, loc - offset) except ParseBaseException as pbe: last_expr = pbe else: @@ -4247,20 +4615,20 @@ class NotAny(ParseElementEnhance): # integers that are followed by "." are actually floats integer = Word(nums) + ~Char(".") """ - def __init__( self, expr ): - super(NotAny,self).__init__(expr) - #~ self.leaveWhitespace() + def __init__(self, expr): + super(NotAny, self).__init__(expr) + # ~ self.leaveWhitespace() self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs self.mayReturnEmpty = True - self.errmsg = "Found unwanted token, "+_ustr(self.expr) + self.errmsg = "Found unwanted token, " + _ustr(self.expr) - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): if self.expr.canParseNext(instring, loc): raise ParseException(instring, loc, self.errmsg, self) return loc, [] - def __str__( self ): - if hasattr(self,"name"): + def __str__(self): + if hasattr(self, "name"): return self.name if self.strRepr is None: @@ -4269,15 +4637,21 @@ def __str__( self ): return self.strRepr class _MultipleMatch(ParseElementEnhance): - def __init__( self, expr, stopOn=None): + def __init__(self, expr, stopOn=None): super(_MultipleMatch, self).__init__(expr) self.saveAsList = True ender = stopOn if isinstance(ender, basestring): - ender = ParserElement._literalStringClass(ender) + ender = self._literalStringClass(ender) + self.stopOn(ender) + + def stopOn(self, ender): + if isinstance(ender, basestring): + ender = self._literalStringClass(ender) self.not_ender = ~ender if ender is not None else None + return self - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): self_expr_parse = self.expr._parse self_skip_ignorables = self._skipIgnorables check_ender = self.not_ender is not None @@ -4288,24 +4662,38 @@ def parseImpl( self, instring, loc, doActions=True ): # if so, fail) if check_ender: try_not_ender(instring, loc) - loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False ) + loc, tokens = self_expr_parse(instring, loc, doActions, callPreParse=False) try: hasIgnoreExprs = (not not self.ignoreExprs) while 1: if check_ender: try_not_ender(instring, loc) if hasIgnoreExprs: - preloc = self_skip_ignorables( instring, loc ) + preloc = self_skip_ignorables(instring, loc) else: preloc = loc - loc, tmptokens = self_expr_parse( instring, preloc, doActions ) + loc, tmptokens = self_expr_parse(instring, preloc, doActions) if tmptokens or tmptokens.haskeys(): tokens += tmptokens - except (ParseException,IndexError): + except (ParseException, IndexError): pass return loc, tokens + def _setResultsName(self, name, listAllMatches=False): + if __diag__.warn_ungrouped_named_tokens_in_collection: + for e in [self.expr] + getattr(self.expr, 'exprs', []): + if isinstance(e, ParserElement) and e.resultsName: + warnings.warn("{0}: setting results name {1!r} on {2} expression " + "collides with {3!r} on contained expression".format("warn_ungrouped_named_tokens_in_collection", + name, + type(self).__name__, + e.resultsName), + stacklevel=3) + + return super(_MultipleMatch, self)._setResultsName(name, listAllMatches) + + class OneOrMore(_MultipleMatch): """Repetition of one or more of the given expression. @@ -4332,8 +4720,8 @@ class OneOrMore(_MultipleMatch): (attr_expr * (1,)).parseString(text).pprint() """ - def __str__( self ): - if hasattr(self,"name"): + def __str__(self): + if hasattr(self, "name"): return self.name if self.strRepr is None: @@ -4352,18 +4740,18 @@ class ZeroOrMore(_MultipleMatch): Example: similar to :class:`OneOrMore` """ - def __init__( self, expr, stopOn=None): - super(ZeroOrMore,self).__init__(expr, stopOn=stopOn) + def __init__(self, expr, stopOn=None): + super(ZeroOrMore, self).__init__(expr, stopOn=stopOn) self.mayReturnEmpty = True - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): try: return super(ZeroOrMore, self).parseImpl(instring, loc, doActions) - except (ParseException,IndexError): + except (ParseException, IndexError): return loc, [] - def __str__( self ): - if hasattr(self,"name"): + def __str__(self): + if hasattr(self, "name"): return self.name if self.strRepr is None: @@ -4371,6 +4759,7 @@ def __str__( self ): return self.strRepr + class _NullToken(object): def __bool__(self): return False @@ -4378,7 +4767,6 @@ def __bool__(self): def __str__(self): return "" -_optionalNotMatched = _NullToken() class Optional(ParseElementEnhance): """Optional matching of the given expression. @@ -4416,28 +4804,30 @@ class Optional(ParseElementEnhance): ^ FAIL: Expected end of text (at char 5), (line:1, col:6) """ - def __init__( self, expr, default=_optionalNotMatched ): - super(Optional,self).__init__( expr, savelist=False ) + __optionalNotMatched = _NullToken() + + def __init__(self, expr, default=__optionalNotMatched): + super(Optional, self).__init__(expr, savelist=False) self.saveAsList = self.expr.saveAsList self.defaultValue = default self.mayReturnEmpty = True - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): try: - loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False ) - except (ParseException,IndexError): - if self.defaultValue is not _optionalNotMatched: + loc, tokens = self.expr._parse(instring, loc, doActions, callPreParse=False) + except (ParseException, IndexError): + if self.defaultValue is not self.__optionalNotMatched: if self.expr.resultsName: - tokens = ParseResults([ self.defaultValue ]) + tokens = ParseResults([self.defaultValue]) tokens[self.expr.resultsName] = self.defaultValue else: - tokens = [ self.defaultValue ] + tokens = [self.defaultValue] else: tokens = [] return loc, tokens - def __str__( self ): - if hasattr(self,"name"): + def __str__(self): + if hasattr(self, "name"): return self.name if self.strRepr is None: @@ -4503,20 +4893,20 @@ class SkipTo(ParseElementEnhance): - issue_num: 79 - sev: Minor """ - def __init__( self, other, include=False, ignore=None, failOn=None ): - super( SkipTo, self ).__init__( other ) + def __init__(self, other, include=False, ignore=None, failOn=None): + super(SkipTo, self).__init__(other) self.ignoreExpr = ignore self.mayReturnEmpty = True self.mayIndexError = False self.includeMatch = include self.saveAsList = False if isinstance(failOn, basestring): - self.failOn = ParserElement._literalStringClass(failOn) + self.failOn = self._literalStringClass(failOn) else: self.failOn = failOn - self.errmsg = "No match found for "+_ustr(self.expr) + self.errmsg = "No match found for " + _ustr(self.expr) - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): startloc = loc instrlen = len(instring) expr = self.expr @@ -4558,7 +4948,7 @@ def parseImpl( self, instring, loc, doActions=True ): skipresult = ParseResults(skiptext) if self.includeMatch: - loc, mat = expr_parse(instring,loc,doActions,callPreParse=False) + loc, mat = expr_parse(instring, loc, doActions, callPreParse=False) skipresult += mat return loc, skipresult @@ -4590,17 +4980,17 @@ class Forward(ParseElementEnhance): See :class:`ParseResults.pprint` for an example of a recursive parser created using ``Forward``. """ - def __init__( self, other=None ): - super(Forward,self).__init__( other, savelist=False ) + def __init__(self, other=None): + super(Forward, self).__init__(other, savelist=False) - def __lshift__( self, other ): - if isinstance( other, basestring ): - other = ParserElement._literalStringClass(other) + def __lshift__(self, other): + if isinstance(other, basestring): + other = self._literalStringClass(other) self.expr = other self.strRepr = None self.mayIndexError = self.expr.mayIndexError self.mayReturnEmpty = self.expr.mayReturnEmpty - self.setWhitespaceChars( self.expr.whiteChars ) + self.setWhitespaceChars(self.expr.whiteChars) self.skipWhitespace = self.expr.skipWhitespace self.saveAsList = self.expr.saveAsList self.ignoreExprs.extend(self.expr.ignoreExprs) @@ -4609,59 +4999,72 @@ def __lshift__( self, other ): def __ilshift__(self, other): return self << other - def leaveWhitespace( self ): + def leaveWhitespace(self): self.skipWhitespace = False return self - def streamline( self ): + def streamline(self): if not self.streamlined: self.streamlined = True if self.expr is not None: self.expr.streamline() return self - def validate( self, validateTrace=[] ): + def validate(self, validateTrace=None): + if validateTrace is None: + validateTrace = [] + if self not in validateTrace: - tmp = validateTrace[:]+[self] + tmp = validateTrace[:] + [self] if self.expr is not None: self.expr.validate(tmp) self.checkRecursion([]) - def __str__( self ): - if hasattr(self,"name"): + def __str__(self): + if hasattr(self, "name"): return self.name - return self.__class__.__name__ + ": ..." + if self.strRepr is not None: + return self.strRepr + + # Avoid infinite recursion by setting a temporary strRepr + self.strRepr = ": ..." - # stubbed out for now - creates awful memory and perf issues - self._revertClass = self.__class__ - self.__class__ = _ForwardNoRecurse + # Use the string representation of main expression. + retString = '...' try: if self.expr is not None: - retString = _ustr(self.expr) + retString = _ustr(self.expr)[:1000] else: retString = "None" finally: - self.__class__ = self._revertClass - return self.__class__.__name__ + ": " + retString + self.strRepr = self.__class__.__name__ + ": " + retString + return self.strRepr def copy(self): if self.expr is not None: - return super(Forward,self).copy() + return super(Forward, self).copy() else: ret = Forward() ret <<= self return ret -class _ForwardNoRecurse(Forward): - def __str__( self ): - return "..." + def _setResultsName(self, name, listAllMatches=False): + if __diag__.warn_name_set_on_empty_Forward: + if self.expr is None: + warnings.warn("{0}: setting results name {0!r} on {1} expression " + "that has no contained expression".format("warn_name_set_on_empty_Forward", + name, + type(self).__name__), + stacklevel=3) + + return super(Forward, self)._setResultsName(name, listAllMatches) class TokenConverter(ParseElementEnhance): """ Abstract subclass of :class:`ParseExpression`, for converting parsed results. """ - def __init__( self, expr, savelist=False ): - super(TokenConverter,self).__init__( expr )#, savelist ) + def __init__(self, expr, savelist=False): + super(TokenConverter, self).__init__(expr) # , savelist) self.saveAsList = False class Combine(TokenConverter): @@ -4682,8 +5085,8 @@ class Combine(TokenConverter): # no match when there are internal spaces print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...) """ - def __init__( self, expr, joinString="", adjacent=True ): - super(Combine,self).__init__( expr ) + def __init__(self, expr, joinString="", adjacent=True): + super(Combine, self).__init__(expr) # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself if adjacent: self.leaveWhitespace() @@ -4692,20 +5095,20 @@ def __init__( self, expr, joinString="", adjacent=True ): self.joinString = joinString self.callPreparse = True - def ignore( self, other ): + def ignore(self, other): if self.adjacent: ParserElement.ignore(self, other) else: - super( Combine, self).ignore( other ) + super(Combine, self).ignore(other) return self - def postParse( self, instring, loc, tokenlist ): + def postParse(self, instring, loc, tokenlist): retToks = tokenlist.copy() del retToks[:] - retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults) + retToks += ParseResults(["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults) if self.resultsName and retToks.haskeys(): - return [ retToks ] + return [retToks] else: return retToks @@ -4719,17 +5122,17 @@ class Group(TokenConverter): num = Word(nums) term = ident | num func = ident + Optional(delimitedList(term)) - print(func.parseString("fn a,b,100")) # -> ['fn', 'a', 'b', '100'] + print(func.parseString("fn a, b, 100")) # -> ['fn', 'a', 'b', '100'] func = ident + Group(Optional(delimitedList(term))) - print(func.parseString("fn a,b,100")) # -> ['fn', ['a', 'b', '100']] + print(func.parseString("fn a, b, 100")) # -> ['fn', ['a', 'b', '100']] """ - def __init__( self, expr ): - super(Group,self).__init__( expr ) - self.saveAsList = expr.saveAsList + def __init__(self, expr): + super(Group, self).__init__(expr) + self.saveAsList = True - def postParse( self, instring, loc, tokenlist ): - return [ tokenlist ] + def postParse(self, instring, loc, tokenlist): + return [tokenlist] class Dict(TokenConverter): """Converter to return a repetitive expression as a list, but also @@ -4770,31 +5173,31 @@ class Dict(TokenConverter): See more examples at :class:`ParseResults` of accessing fields by results name. """ - def __init__( self, expr ): - super(Dict,self).__init__( expr ) + def __init__(self, expr): + super(Dict, self).__init__(expr) self.saveAsList = True - def postParse( self, instring, loc, tokenlist ): - for i,tok in enumerate(tokenlist): + def postParse(self, instring, loc, tokenlist): + for i, tok in enumerate(tokenlist): if len(tok) == 0: continue ikey = tok[0] - if isinstance(ikey,int): + if isinstance(ikey, int): ikey = _ustr(tok[0]).strip() - if len(tok)==1: - tokenlist[ikey] = _ParseResultsWithOffset("",i) - elif len(tok)==2 and not isinstance(tok[1],ParseResults): - tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i) + if len(tok) == 1: + tokenlist[ikey] = _ParseResultsWithOffset("", i) + elif len(tok) == 2 and not isinstance(tok[1], ParseResults): + tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i) else: - dictvalue = tok.copy() #ParseResults(i) + dictvalue = tok.copy() # ParseResults(i) del dictvalue[0] - if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()): - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i) + if len(dictvalue) != 1 or (isinstance(dictvalue, ParseResults) and dictvalue.haskeys()): + tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i) else: - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i) + tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i) if self.resultsName: - return [ tokenlist ] + return [tokenlist] else: return tokenlist @@ -4821,10 +5224,10 @@ class Suppress(TokenConverter): (See also :class:`delimitedList`.) """ - def postParse( self, instring, loc, tokenlist ): + def postParse(self, instring, loc, tokenlist): return [] - def suppress( self ): + def suppress(self): return self @@ -4834,12 +5237,12 @@ class OnlyOnce(object): def __init__(self, methodCall): self.callable = _trim_arity(methodCall) self.called = False - def __call__(self,s,l,t): + def __call__(self, s, l, t): if not self.called: - results = self.callable(s,l,t) + results = self.callable(s, l, t) self.called = True return results - raise ParseException(s,l,"") + raise ParseException(s, l, "") def reset(self): self.called = False @@ -4871,16 +5274,16 @@ def remove_duplicate_chars(tokens): f = _trim_arity(f) def z(*paArgs): thisFunc = f.__name__ - s,l,t = paArgs[-3:] - if len(paArgs)>3: + s, l, t = paArgs[-3:] + if len(paArgs) > 3: thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc - sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) ) + sys.stderr.write(">>entering %s(line: '%s', %d, %r)\n" % (thisFunc, line(l, s), l, t)) try: ret = f(*paArgs) except Exception as exc: - sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) ) + sys.stderr.write("<<leaving %s (exception: %s)\n" % (thisFunc, exc)) raise - sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) ) + sys.stderr.write("<<leaving %s (ret: %r)\n" % (thisFunc, ret)) return ret try: z.__name__ = f.__name__ @@ -4891,7 +5294,7 @@ def z(*paArgs): # # global helpers # -def delimitedList( expr, delim=",", combine=False ): +def delimitedList(expr, delim=",", combine=False): """Helper to define a delimited list of expressions - the delimiter defaults to ','. By default, the list elements and delimiters can have intervening whitespace, and comments, but this can be @@ -4906,13 +5309,13 @@ def delimitedList( expr, delim=",", combine=False ): delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc'] delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] """ - dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..." + dlName = _ustr(expr) + " [" + _ustr(delim) + " " + _ustr(expr) + "]..." if combine: - return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName) + return Combine(expr + ZeroOrMore(delim + expr)).setName(dlName) else: - return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName) + return (expr + ZeroOrMore(Suppress(delim) + expr)).setName(dlName) -def countedArray( expr, intExpr=None ): +def countedArray(expr, intExpr=None): """Helper to define a counted list of expressions. This helper defines a pattern of the form:: @@ -4936,22 +5339,22 @@ def countedArray( expr, intExpr=None ): countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd'] """ arrayExpr = Forward() - def countFieldParseAction(s,l,t): + def countFieldParseAction(s, l, t): n = t[0] - arrayExpr << (n and Group(And([expr]*n)) or Group(empty)) + arrayExpr << (n and Group(And([expr] * n)) or Group(empty)) return [] if intExpr is None: - intExpr = Word(nums).setParseAction(lambda t:int(t[0])) + intExpr = Word(nums).setParseAction(lambda t: int(t[0])) else: intExpr = intExpr.copy() intExpr.setName("arrayLen") intExpr.addParseAction(countFieldParseAction, callDuringTry=True) - return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...') + return (intExpr + arrayExpr).setName('(len) ' + _ustr(expr) + '...') def _flatten(L): ret = [] for i in L: - if isinstance(i,list): + if isinstance(i, list): ret.extend(_flatten(i)) else: ret.append(i) @@ -4973,7 +5376,7 @@ def matchPreviousLiteral(expr): enabled. """ rep = Forward() - def copyTokenToRepeater(s,l,t): + def copyTokenToRepeater(s, l, t): if t: if len(t) == 1: rep << t[0] @@ -5005,26 +5408,26 @@ def matchPreviousExpr(expr): rep = Forward() e2 = expr.copy() rep <<= e2 - def copyTokenToRepeater(s,l,t): + def copyTokenToRepeater(s, l, t): matchTokens = _flatten(t.asList()) - def mustMatchTheseTokens(s,l,t): + def mustMatchTheseTokens(s, l, t): theseTokens = _flatten(t.asList()) - if theseTokens != matchTokens: - raise ParseException("",0,"") - rep.setParseAction( mustMatchTheseTokens, callDuringTry=True ) + if theseTokens != matchTokens: + raise ParseException('', 0, '') + rep.setParseAction(mustMatchTheseTokens, callDuringTry=True) expr.addParseAction(copyTokenToRepeater, callDuringTry=True) rep.setName('(prev) ' + _ustr(expr)) return rep def _escapeRegexRangeChars(s): - #~ escape these chars: ^-] + # ~ escape these chars: ^-] for c in r"\^-]": - s = s.replace(c,_bslash+c) - s = s.replace("\n",r"\n") - s = s.replace("\t",r"\t") + s = s.replace(c, _bslash + c) + s = s.replace("\n", r"\n") + s = s.replace("\t", r"\t") return _ustr(s) -def oneOf( strs, caseless=False, useRegex=True ): +def oneOf(strs, caseless=False, useRegex=True, asKeyword=False): """Helper to quickly define a set of alternative Literals, and makes sure to do longest-first testing when there is a conflict, regardless of the input order, but returns @@ -5038,8 +5441,10 @@ def oneOf( strs, caseless=False, useRegex=True ): caseless - useRegex - (default= ``True``) - as an optimization, will generate a Regex object; otherwise, will generate - a :class:`MatchFirst` object (if ``caseless=True``, or if + a :class:`MatchFirst` object (if ``caseless=True`` or ``asKeyword=True``, or if creating a :class:`Regex` raises an exception) + - asKeyword - (default=``False``) - enforce Keyword-style matching on the + generated expressions Example:: @@ -5054,57 +5459,62 @@ def oneOf( strs, caseless=False, useRegex=True ): [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] """ + if isinstance(caseless, basestring): + warnings.warn("More than one string argument passed to oneOf, pass " + "choices as a list or space-delimited string", stacklevel=2) + if caseless: - isequal = ( lambda a,b: a.upper() == b.upper() ) - masks = ( lambda a,b: b.upper().startswith(a.upper()) ) - parseElementClass = CaselessLiteral + isequal = (lambda a, b: a.upper() == b.upper()) + masks = (lambda a, b: b.upper().startswith(a.upper())) + parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral else: - isequal = ( lambda a,b: a == b ) - masks = ( lambda a,b: b.startswith(a) ) - parseElementClass = Literal + isequal = (lambda a, b: a == b) + masks = (lambda a, b: b.startswith(a)) + parseElementClass = Keyword if asKeyword else Literal symbols = [] - if isinstance(strs,basestring): + if isinstance(strs, basestring): symbols = strs.split() elif isinstance(strs, Iterable): symbols = list(strs) else: warnings.warn("Invalid argument to oneOf, expected string or iterable", - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) if not symbols: return NoMatch() - i = 0 - while i < len(symbols)-1: - cur = symbols[i] - for j,other in enumerate(symbols[i+1:]): - if ( isequal(other, cur) ): - del symbols[i+j+1] - break - elif ( masks(cur, other) ): - del symbols[i+j+1] - symbols.insert(i,other) - cur = other - break - else: - i += 1 + if not asKeyword: + # if not producing keywords, need to reorder to take care to avoid masking + # longer choices with shorter ones + i = 0 + while i < len(symbols) - 1: + cur = symbols[i] + for j, other in enumerate(symbols[i + 1:]): + if isequal(other, cur): + del symbols[i + j + 1] + break + elif masks(cur, other): + del symbols[i + j + 1] + symbols.insert(i, other) + break + else: + i += 1 - if not caseless and useRegex: - #~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] )) + if not (caseless or asKeyword) and useRegex: + # ~ print (strs, "->", "|".join([_escapeRegexChars(sym) for sym in symbols])) try: - if len(symbols)==len("".join(symbols)): - return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols)) + if len(symbols) == len("".join(symbols)): + return Regex("[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols)).setName(' | '.join(symbols)) else: - return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols)) + return Regex("|".join(re.escape(sym) for sym in symbols)).setName(' | '.join(symbols)) except Exception: warnings.warn("Exception creating Regex for oneOf, building MatchFirst", SyntaxWarning, stacklevel=2) - # last resort, just use MatchFirst return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols)) -def dictOf( key, value ): +def dictOf(key, value): """Helper to easily and clearly define a dictionary by specifying the respective patterns for the key and value. Takes care of defining the :class:`Dict`, :class:`ZeroOrMore`, and @@ -5162,8 +5572,8 @@ def originalTextFor(expr, asString=True): Example:: src = "this is test <b> bold <i>text</i> </b> normal text " - for tag in ("b","i"): - opener,closer = makeHTMLTags(tag) + for tag in ("b", "i"): + opener, closer = makeHTMLTags(tag) patt = originalTextFor(opener + SkipTo(closer) + closer) print(patt.searchString(src)[0]) @@ -5172,14 +5582,14 @@ def originalTextFor(expr, asString=True): ['<b> bold <i>text</i> </b>'] ['<i>text</i>'] """ - locMarker = Empty().setParseAction(lambda s,loc,t: loc) + locMarker = Empty().setParseAction(lambda s, loc, t: loc) endlocMarker = locMarker.copy() endlocMarker.callPreparse = False matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end") if asString: - extractText = lambda s,l,t: s[t._original_start:t._original_end] + extractText = lambda s, l, t: s[t._original_start: t._original_end] else: - def extractText(s,l,t): + def extractText(s, l, t): t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]] matchExpr.setParseAction(extractText) matchExpr.ignoreExprs = expr.ignoreExprs @@ -5189,7 +5599,7 @@ def ungroup(expr): """Helper to undo pyparsing's default grouping of And expressions, even if all but one are non-empty. """ - return TokenConverter(expr).setParseAction(lambda t:t[0]) + return TokenConverter(expr).addParseAction(lambda t: t[0]) def locatedExpr(expr): """Helper to decorate a returned token with its starting and ending @@ -5216,7 +5626,7 @@ def locatedExpr(expr): [[8, 'lksdjjf', 15]] [[18, 'lkkjj', 23]] """ - locator = Empty().setParseAction(lambda s,l,t: l) + locator = Empty().setParseAction(lambda s, l, t: l) return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end")) @@ -5227,12 +5637,12 @@ def locatedExpr(expr): stringStart = StringStart().setName("stringStart") stringEnd = StringEnd().setName("stringEnd") -_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1]) -_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16))) -_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8))) +_escapedPunc = Word(_bslash, r"\[]-*.$+^?()~ ", exact=2).setParseAction(lambda s, l, t: t[0][1]) +_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s, l, t: unichr(int(t[0].lstrip(r'\0x'), 16))) +_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s, l, t: unichr(int(t[0][1:], 8))) _singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1) _charRange = Group(_singleChar + Suppress("-") + _singleChar) -_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]" +_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group(OneOrMore(_charRange | _singleChar)).setResultsName("body") + "]" def srange(s): r"""Helper to easily define string ranges for use in Word @@ -5260,7 +5670,7 @@ def srange(s): - any combination of the above (``'aeiouy'``, ``'a-zA-Z0-9_$'``, etc.) """ - _expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1)) + _expanded = lambda p: p if not isinstance(p, ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]), ord(p[1]) + 1)) try: return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body) except Exception: @@ -5270,9 +5680,9 @@ def matchOnlyAtCol(n): """Helper method for defining parse actions that require matching at a specific column in the input text. """ - def verifyCol(strg,locn,toks): - if col(locn,strg) != n: - raise ParseException(strg,locn,"matched token not at column %d" % n) + def verifyCol(strg, locn, toks): + if col(locn, strg) != n: + raise ParseException(strg, locn, "matched token not at column %d" % n) return verifyCol def replaceWith(replStr): @@ -5288,9 +5698,9 @@ def replaceWith(replStr): OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234] """ - return lambda s,l,t: [replStr] + return lambda s, l, t: [replStr] -def removeQuotes(s,l,t): +def removeQuotes(s, l, t): """Helper parse action for removing quotation marks from parsed quoted strings. @@ -5341,7 +5751,7 @@ def tokenMap(func, *args): now is the winter of our discontent made glorious summer by this sun of york ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York'] """ - def pa(s,l,t): + def pa(s, l, t): return [func(tokn, *args) for tokn in t] try: @@ -5361,33 +5771,41 @@ def pa(s,l,t): """(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of :class:`pyparsing_common.downcaseTokens`""" -def _makeTags(tagStr, xml): +def _makeTags(tagStr, xml, + suppress_LT=Suppress("<"), + suppress_GT=Suppress(">")): """Internal helper to construct opening and closing tag expressions, given a tag name""" - if isinstance(tagStr,basestring): + if isinstance(tagStr, basestring): resname = tagStr tagStr = Keyword(tagStr, caseless=not xml) else: resname = tagStr.name - tagAttrName = Word(alphas,alphanums+"_-:") - if (xml): - tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes ) - openTag = Suppress("<") + tagStr("tag") + \ - Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \ - Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">") + tagAttrName = Word(alphas, alphanums + "_-:") + if xml: + tagAttrValue = dblQuotedString.copy().setParseAction(removeQuotes) + openTag = (suppress_LT + + tagStr("tag") + + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue))) + + Optional("/", default=[False])("empty").setParseAction(lambda s, l, t: t[0] == '/') + + suppress_GT) else: - printablesLessRAbrack = "".join(c for c in printables if c not in ">") - tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack) - openTag = Suppress("<") + tagStr("tag") + \ - Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \ - Optional( Suppress("=") + tagAttrValue ) ))) + \ - Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">") - closeTag = Combine(_L("</") + tagStr + ">") - - openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname) - closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname) + tagAttrValue = quotedString.copy().setParseAction(removeQuotes) | Word(printables, excludeChars=">") + openTag = (suppress_LT + + tagStr("tag") + + Dict(ZeroOrMore(Group(tagAttrName.setParseAction(downcaseTokens) + + Optional(Suppress("=") + tagAttrValue)))) + + Optional("/", default=[False])("empty").setParseAction(lambda s, l, t: t[0] == '/') + + suppress_GT) + closeTag = Combine(_L("</") + tagStr + ">", adjacent=False) + + openTag.setName("<%s>" % resname) + # add start<tagname> results name in parse action now that ungrouped names are not reported at two levels + openTag.addParseAction(lambda t: t.__setitem__("start" + "".join(resname.replace(":", " ").title().split()), t.copy())) + closeTag = closeTag("end" + "".join(resname.replace(":", " ").title().split())).setName("</%s>" % resname) openTag.tag = resname closeTag.tag = resname + openTag.tag_body = SkipTo(closeTag()) return openTag, closeTag def makeHTMLTags(tagStr): @@ -5400,7 +5818,7 @@ def makeHTMLTags(tagStr): text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>' # makeHTMLTags returns pyparsing expressions for the opening and # closing tags as a 2-tuple - a,a_end = makeHTMLTags("A") + a, a_end = makeHTMLTags("A") link_expr = a + SkipTo(a_end)("link_text") + a_end for link in link_expr.searchString(text): @@ -5412,7 +5830,7 @@ def makeHTMLTags(tagStr): pyparsing -> https://github.com/pyparsing/pyparsing/wiki """ - return _makeTags( tagStr, False ) + return _makeTags(tagStr, False) def makeXMLTags(tagStr): """Helper to construct opening and closing tag expressions for XML, @@ -5420,9 +5838,9 @@ def makeXMLTags(tagStr): Example: similar to :class:`makeHTMLTags` """ - return _makeTags( tagStr, True ) + return _makeTags(tagStr, True) -def withAttribute(*args,**attrDict): +def withAttribute(*args, **attrDict): """Helper to create a validating parse action to be used with start tags created with :class:`makeXMLTags` or :class:`makeHTMLTags`. Use ``withAttribute`` to qualify @@ -5435,7 +5853,7 @@ def withAttribute(*args,**attrDict): - keyword arguments, as in ``(align="right")``, or - as an explicit dict with ``**`` operator, when an attribute name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}`` - - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align","right"))`` + - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))`` For attribute names with a namespace prefix, you must use the second form. Attribute names are matched insensitive to upper/lower case. @@ -5482,13 +5900,13 @@ def withAttribute(*args,**attrDict): attrs = args[:] else: attrs = attrDict.items() - attrs = [(k,v) for k,v in attrs] - def pa(s,l,tokens): - for attrName,attrValue in attrs: + attrs = [(k, v) for k, v in attrs] + def pa(s, l, tokens): + for attrName, attrValue in attrs: if attrName not in tokens: - raise ParseException(s,l,"no matching attribute " + attrName) + raise ParseException(s, l, "no matching attribute " + attrName) if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue: - raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" % + raise ParseException(s, l, "attribute '%s' has value '%s', must be '%s'" % (attrName, tokens[attrName], attrValue)) return pa withAttribute.ANY_VALUE = object() @@ -5529,13 +5947,13 @@ def withClass(classname, namespace=''): 1,3 2,3 1,1 """ classattr = "%s:class" % namespace if namespace else "class" - return withAttribute(**{classattr : classname}) + return withAttribute(**{classattr: classname}) opAssoc = SimpleNamespace() opAssoc.LEFT = object() opAssoc.RIGHT = object() -def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ): +def infixNotation(baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')')): """Helper method for constructing grammars of expressions made up of operators working in a precedence hierarchy. Operators may be unary or binary, left- or right-associative. Parse actions can also be @@ -5613,9 +6031,9 @@ def parseImpl(self, instring, loc, doActions=True): return loc, [] ret = Forward() - lastExpr = baseExpr | ( lpar + ret + rpar ) - for i,operDef in enumerate(opList): - opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4] + lastExpr = baseExpr | (lpar + ret + rpar) + for i, operDef in enumerate(opList): + opExpr, arity, rightLeftAssoc, pa = (operDef + (None, ))[:4] termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr if arity == 3: if opExpr is None or len(opExpr) != 2: @@ -5625,15 +6043,15 @@ def parseImpl(self, instring, loc, doActions=True): thisExpr = Forward().setName(termName) if rightLeftAssoc == opAssoc.LEFT: if arity == 1: - matchExpr = _FB(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) ) + matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + OneOrMore(opExpr)) elif arity == 2: if opExpr is not None: - matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) ) + matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group(lastExpr + OneOrMore(opExpr + lastExpr)) else: - matchExpr = _FB(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) ) + matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr + OneOrMore(lastExpr)) elif arity == 3: - matchExpr = _FB(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \ - Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr ) + matchExpr = (_FB(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + + Group(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr)) else: raise ValueError("operator must be unary (1), binary (2), or ternary (3)") elif rightLeftAssoc == opAssoc.RIGHT: @@ -5641,15 +6059,15 @@ def parseImpl(self, instring, loc, doActions=True): # try to avoid LR with this extra test if not isinstance(opExpr, Optional): opExpr = Optional(opExpr) - matchExpr = _FB(opExpr.expr + thisExpr) + Group( opExpr + thisExpr ) + matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr) elif arity == 2: if opExpr is not None: - matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) ) + matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group(lastExpr + OneOrMore(opExpr + thisExpr)) else: - matchExpr = _FB(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) ) + matchExpr = _FB(lastExpr + thisExpr) + Group(lastExpr + OneOrMore(thisExpr)) elif arity == 3: - matchExpr = _FB(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \ - Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr ) + matchExpr = (_FB(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr)) else: raise ValueError("operator must be unary (1), binary (2), or ternary (3)") else: @@ -5659,7 +6077,7 @@ def parseImpl(self, instring, loc, doActions=True): matchExpr.setParseAction(*pa) else: matchExpr.setParseAction(pa) - thisExpr <<= ( matchExpr.setName(termName) | lastExpr ) + thisExpr <<= (matchExpr.setName(termName) | lastExpr) lastExpr = thisExpr ret <<= lastExpr return ret @@ -5668,10 +6086,10 @@ def parseImpl(self, instring, loc, doActions=True): """(Deprecated) Former name of :class:`infixNotation`, will be dropped in a future release.""" -dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes") -sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes") -quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'| - Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes") +dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"').setName("string enclosed in double quotes") +sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'").setName("string enclosed in single quotes") +quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' + | Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'").setName("quotedString using single or double quotes") unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal") def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()): @@ -5707,7 +6125,7 @@ def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.cop ident = Word(alphas+'_', alphanums+'_') number = pyparsing_common.number arg = Group(decl_data_type + ident) - LPAR,RPAR = map(Suppress, "()") + LPAR, RPAR = map(Suppress, "()") code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment)) @@ -5742,33 +6160,40 @@ def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.cop if opener == closer: raise ValueError("opening and closing strings cannot be the same") if content is None: - if isinstance(opener,basestring) and isinstance(closer,basestring): - if len(opener) == 1 and len(closer)==1: + if isinstance(opener, basestring) and isinstance(closer, basestring): + if len(opener) == 1 and len(closer) == 1: if ignoreExpr is not None: - content = (Combine(OneOrMore(~ignoreExpr + - CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1)) - ).setParseAction(lambda t:t[0].strip())) + content = (Combine(OneOrMore(~ignoreExpr + + CharsNotIn(opener + + closer + + ParserElement.DEFAULT_WHITE_CHARS, exact=1) + ) + ).setParseAction(lambda t: t[0].strip())) else: - content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS - ).setParseAction(lambda t:t[0].strip())) + content = (empty.copy() + CharsNotIn(opener + + closer + + ParserElement.DEFAULT_WHITE_CHARS + ).setParseAction(lambda t: t[0].strip())) else: if ignoreExpr is not None: - content = (Combine(OneOrMore(~ignoreExpr + - ~Literal(opener) + ~Literal(closer) + - CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) - ).setParseAction(lambda t:t[0].strip())) + content = (Combine(OneOrMore(~ignoreExpr + + ~Literal(opener) + + ~Literal(closer) + + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)) + ).setParseAction(lambda t: t[0].strip())) else: - content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) + - CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) - ).setParseAction(lambda t:t[0].strip())) + content = (Combine(OneOrMore(~Literal(opener) + + ~Literal(closer) + + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)) + ).setParseAction(lambda t: t[0].strip())) else: raise ValueError("opening and closing arguments must be strings if no content expression is given") ret = Forward() if ignoreExpr is not None: - ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) ) + ret <<= Group(Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer)) else: - ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) ) - ret.setName('nested %s%s expression' % (opener,closer)) + ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer)) + ret.setName('nested %s%s expression' % (opener, closer)) return ret def indentedBlock(blockStatementExpr, indentStack, indent=True): @@ -5783,7 +6208,7 @@ def indentedBlock(blockStatementExpr, indentStack, indent=True): (multiple statementWithIndentedBlock expressions within a single grammar should share a common indentStack) - indent - boolean indicating whether block must be indented beyond - the the current level; set to False for block of left-most + the current level; set to False for block of left-most statements (default= ``True``) A valid block must contain at least one ``blockStatement``. @@ -5816,15 +6241,15 @@ def eggs(z): stmt = Forward() identifier = Word(alphas, alphanums) - funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":") + funcDecl = ("def" + identifier + Group("(" + Optional(delimitedList(identifier)) + ")") + ":") func_body = indentedBlock(stmt, indentStack) - funcDef = Group( funcDecl + func_body ) + funcDef = Group(funcDecl + func_body) rvalue = Forward() funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")") rvalue << (funcCall | identifier | Word(nums)) assignment = Group(identifier + "=" + rvalue) - stmt << ( funcDef | assignment | identifier ) + stmt << (funcDef | assignment | identifier) module_body = OneOrMore(stmt) @@ -5852,47 +6277,56 @@ def eggs(z): ':', [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] """ - def checkPeerIndent(s,l,t): + backup_stack = indentStack[:] + + def reset_stack(): + indentStack[:] = backup_stack + + def checkPeerIndent(s, l, t): if l >= len(s): return - curCol = col(l,s) + curCol = col(l, s) if curCol != indentStack[-1]: if curCol > indentStack[-1]: - raise ParseFatalException(s,l,"illegal nesting") - raise ParseException(s,l,"not a peer entry") + raise ParseException(s, l, "illegal nesting") + raise ParseException(s, l, "not a peer entry") - def checkSubIndent(s,l,t): - curCol = col(l,s) + def checkSubIndent(s, l, t): + curCol = col(l, s) if curCol > indentStack[-1]: - indentStack.append( curCol ) + indentStack.append(curCol) else: - raise ParseException(s,l,"not a subentry") + raise ParseException(s, l, "not a subentry") - def checkUnindent(s,l,t): + def checkUnindent(s, l, t): if l >= len(s): return - curCol = col(l,s) - if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]): - raise ParseException(s,l,"not an unindent") - indentStack.pop() + curCol = col(l, s) + if not(indentStack and curCol in indentStack): + raise ParseException(s, l, "not an unindent") + if curCol < indentStack[-1]: + indentStack.pop() NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress()) INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT') PEER = Empty().setParseAction(checkPeerIndent).setName('') UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT') if indent: - smExpr = Group( Optional(NL) + - #~ FollowedBy(blockStatementExpr) + - INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT) + smExpr = Group(Optional(NL) + + INDENT + + OneOrMore(PEER + Group(blockStatementExpr) + Optional(NL)) + + UNDENT) else: - smExpr = Group( Optional(NL) + - (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) ) + smExpr = Group(Optional(NL) + + OneOrMore(PEER + Group(blockStatementExpr) + Optional(NL)) + + UNDENT) + smExpr.setFailAction(lambda a, b, c, d: reset_stack()) blockStatementExpr.ignore(_bslash + LineEnd()) return smExpr.setName('indented block') alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") -anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag')) -_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\'')) +anyOpenTag, anyCloseTag = makeHTMLTags(Word(alphas, alphanums + "_:").setName('any tag')) +_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(), '><& "\'')) commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity") def replaceHTMLEntity(t): """Helper parser action to replace common HTML entities with their special characters""" @@ -5909,7 +6343,7 @@ def replaceHTMLEntity(t): dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment") "Comment of the form ``// ... (to end of line)``" -cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment") +cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/' | dblSlashComment).setName("C++ style comment") "Comment of either form :class:`cStyleComment` or :class:`dblSlashComment`" javaStyleComment = cppStyleComment @@ -5918,10 +6352,10 @@ def replaceHTMLEntity(t): pythonStyleComment = Regex(r"#.*").setName("Python style comment") "Comment of the form ``# ... (to end of line)``" -_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') + - Optional( Word(" \t") + - ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem") -commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList") +_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') + + Optional(Word(" \t") + + ~Literal(",") + ~LineEnd()))).streamline().setName("commaItem") +commaSeparatedList = delimitedList(Optional(quotedString.copy() | _commasepitem, default="")).setName("commaSeparatedList") """(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas. @@ -6087,7 +6521,7 @@ class pyparsing_common: integer = Word(nums).setName("integer").setParseAction(convertToInteger) """expression that parses an unsigned integer, returns an int""" - hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16)) + hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int, 16)) """expression that parses a hexadecimal integer, returns an int""" signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger) @@ -6101,10 +6535,10 @@ class pyparsing_common: """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" mixed_integer.addParseAction(sum) - real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat) + real = Regex(r'[+-]?(:?\d+\.\d*|\.\d+)').setName("real number").setParseAction(convertToFloat) """expression that parses a floating point number and returns a float""" - sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat) + sci_real = Regex(r'[+-]?(:?\d+(:?[eE][+-]?\d+)|(:?\d+\.\d*|\.\d+)(:?[eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat) """expression that parses a floating point number with optional scientific notation and returns a float""" @@ -6115,15 +6549,18 @@ class pyparsing_common: fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat) """any int or real number, returned as float""" - identifier = Word(alphas+'_', alphanums+'_').setName("identifier") + identifier = Word(alphas + '_', alphanums + '_').setName("identifier") """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address") "IPv4 address (``0.0.0.0 - 255.255.255.255``)" _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer") - _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address") - _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address") + _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part) * 7).setName("full IPv6 address") + _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part) * (0, 6)) + + "::" + + Optional(_ipv6_part + (':' + _ipv6_part) * (0, 6)) + ).setName("short IPv6 address") _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8) _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address") ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address") @@ -6150,7 +6587,7 @@ def convertToDate(fmt="%Y-%m-%d"): [datetime.date(1999, 12, 31)] """ - def cvt_fn(s,l,t): + def cvt_fn(s, l, t): try: return datetime.strptime(t[0], fmt).date() except ValueError as ve: @@ -6175,7 +6612,7 @@ def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"): [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] """ - def cvt_fn(s,l,t): + def cvt_fn(s, l, t): try: return datetime.strptime(t[0], fmt) except ValueError as ve: @@ -6200,7 +6637,7 @@ def stripHTMLTags(s, l, tokens): # strip HTML links from normal text text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>' - td,td_end = makeHTMLTags("TD") + td, td_end = makeHTMLTags("TD") table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end print(table_text.parseString(text).body) @@ -6210,9 +6647,13 @@ def stripHTMLTags(s, l, tokens): """ return pyparsing_common._html_stripper.transformString(tokens[0]) - _commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',') - + Optional( White(" \t") ) ) ).streamline().setName("commaItem") - comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list") + _commasepitem = Combine(OneOrMore(~Literal(",") + + ~LineEnd() + + Word(printables, excludeChars=',') + + Optional(White(" \t")))).streamline().setName("commaItem") + comma_separated_list = delimitedList(Optional(quotedString.copy() + | _commasepitem, default='') + ).setName("comma separated list") """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper())) @@ -6231,7 +6672,8 @@ def __init__(self, fn): def __get__(self, obj, cls): if cls is None: cls = type(obj) - if not hasattr(cls, '_intern') or any(cls._intern is getattr(superclass, '_intern', []) for superclass in cls.__mro__[1:]): + if not hasattr(cls, '_intern') or any(cls._intern is getattr(superclass, '_intern', []) + for superclass in cls.__mro__[1:]): cls._intern = {} attrname = self.fn.__name__ if attrname not in cls._intern: @@ -6262,7 +6704,7 @@ def _get_chars_for_ranges(cls): if cc is unicode_set: break for rr in cc._ranges: - ret.extend(range(rr[0], rr[-1]+1)) + ret.extend(range(rr[0], rr[-1] + 1)) return [unichr(c) for c in sorted(set(ret))] @_lazyclassproperty @@ -6318,27 +6760,27 @@ class Cyrillic(unicode_set): class Chinese(unicode_set): "Unicode set for Chinese Unicode Character Range" - _ranges = [(0x4e00, 0x9fff), (0x3000, 0x303f), ] + _ranges = [(0x4e00, 0x9fff), (0x3000, 0x303f),] class Japanese(unicode_set): "Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges" - _ranges = [ ] + _ranges = [] class Kanji(unicode_set): "Unicode set for Kanji Unicode Character Range" - _ranges = [(0x4E00, 0x9Fbf), (0x3000, 0x303f), ] + _ranges = [(0x4E00, 0x9Fbf), (0x3000, 0x303f),] class Hiragana(unicode_set): "Unicode set for Hiragana Unicode Character Range" - _ranges = [(0x3040, 0x309f), ] + _ranges = [(0x3040, 0x309f),] class Katakana(unicode_set): "Unicode set for Katakana Unicode Character Range" - _ranges = [(0x30a0, 0x30ff), ] + _ranges = [(0x30a0, 0x30ff),] class Korean(unicode_set): "Unicode set for Korean Unicode Character Range" - _ranges = [(0xac00, 0xd7af), (0x1100, 0x11ff), (0x3130, 0x318f), (0xa960, 0xa97f), (0xd7b0, 0xd7ff), (0x3000, 0x303f), ] + _ranges = [(0xac00, 0xd7af), (0x1100, 0x11ff), (0x3130, 0x318f), (0xa960, 0xa97f), (0xd7b0, 0xd7ff), (0x3000, 0x303f),] class CJK(Chinese, Japanese, Korean): "Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range" @@ -6346,15 +6788,15 @@ class CJK(Chinese, Japanese, Korean): class Thai(unicode_set): "Unicode set for Thai Unicode Character Range" - _ranges = [(0x0e01, 0x0e3a), (0x0e3f, 0x0e5b), ] + _ranges = [(0x0e01, 0x0e3a), (0x0e3f, 0x0e5b),] class Arabic(unicode_set): "Unicode set for Arabic Unicode Character Range" - _ranges = [(0x0600, 0x061b), (0x061e, 0x06ff), (0x0700, 0x077f), ] + _ranges = [(0x0600, 0x061b), (0x061e, 0x06ff), (0x0700, 0x077f),] class Hebrew(unicode_set): "Unicode set for Hebrew Unicode Character Range" - _ranges = [(0x0590, 0x05ff), ] + _ranges = [(0x0590, 0x05ff),] class Devanagari(unicode_set): "Unicode set for Devanagari Unicode Character Range" @@ -6366,18 +6808,18 @@ class Devanagari(unicode_set): # define ranges in language character sets if PY_3: - setattr(pyparsing_unicode, "العربية", pyparsing_unicode.Arabic) - setattr(pyparsing_unicode, "中文", pyparsing_unicode.Chinese) - setattr(pyparsing_unicode, "кириллица", pyparsing_unicode.Cyrillic) - setattr(pyparsing_unicode, "Ελληνικά", pyparsing_unicode.Greek) - setattr(pyparsing_unicode, "עִברִית", pyparsing_unicode.Hebrew) - setattr(pyparsing_unicode, "日本語", pyparsing_unicode.Japanese) - setattr(pyparsing_unicode.Japanese, "漢字", pyparsing_unicode.Japanese.Kanji) - setattr(pyparsing_unicode.Japanese, "カタカナ", pyparsing_unicode.Japanese.Katakana) - setattr(pyparsing_unicode.Japanese, "ひらがな", pyparsing_unicode.Japanese.Hiragana) - setattr(pyparsing_unicode, "한국어", pyparsing_unicode.Korean) - setattr(pyparsing_unicode, "ไทย", pyparsing_unicode.Thai) - setattr(pyparsing_unicode, "देवनागरी", pyparsing_unicode.Devanagari) + setattr(pyparsing_unicode, u"العربية", pyparsing_unicode.Arabic) + setattr(pyparsing_unicode, u"中文", pyparsing_unicode.Chinese) + setattr(pyparsing_unicode, u"кириллица", pyparsing_unicode.Cyrillic) + setattr(pyparsing_unicode, u"Ελληνικά", pyparsing_unicode.Greek) + setattr(pyparsing_unicode, u"עִברִית", pyparsing_unicode.Hebrew) + setattr(pyparsing_unicode, u"日本語", pyparsing_unicode.Japanese) + setattr(pyparsing_unicode.Japanese, u"漢字", pyparsing_unicode.Japanese.Kanji) + setattr(pyparsing_unicode.Japanese, u"カタカナ", pyparsing_unicode.Japanese.Katakana) + setattr(pyparsing_unicode.Japanese, u"ひらがな", pyparsing_unicode.Japanese.Hiragana) + setattr(pyparsing_unicode, u"한국어", pyparsing_unicode.Korean) + setattr(pyparsing_unicode, u"ไทย", pyparsing_unicode.Thai) + setattr(pyparsing_unicode, u"देवनागरी", pyparsing_unicode.Devanagari) if __name__ == "__main__": diff --git a/pipenv/patched/notpip/_vendor/pytoml/parser.py b/pipenv/patched/notpip/_vendor/pytoml/parser.py index 3493aa644c..f074317ff0 100644 --- a/pipenv/patched/notpip/_vendor/pytoml/parser.py +++ b/pipenv/patched/notpip/_vendor/pytoml/parser.py @@ -1,4 +1,4 @@ -import string, re, sys, datetime +import re, sys from .core import TomlError from .utils import rfc3339_re, parse_rfc3339_re @@ -28,8 +28,6 @@ def error(msg): def process_value(v, object_pairs_hook): kind, text, value, pos = v - if kind == 'str' and value.startswith('\n'): - value = value[1:] if kind == 'array': if value and any(k != value[0][0] for k, t, v, p in value[1:]): error('array-type-mismatch') @@ -215,6 +213,7 @@ def _p_key(s): return r if s.consume('\''): if s.consume('\'\''): + s.consume('\n') r = s.expect_re(_litstr_ml_re).group(0) s.expect('\'\'\'') else: @@ -238,6 +237,7 @@ def _p_value(s, object_pairs_hook): if s.consume('"'): if s.consume('""'): + s.consume('\n') r = _p_basicstr_content(s, _basicstr_ml_re) s.expect('"""') else: @@ -247,6 +247,7 @@ def _p_value(s, object_pairs_hook): if s.consume('\''): if s.consume('\'\''): + s.consume('\n') r = s.expect_re(_litstr_ml_re).group(0) s.expect('\'\'\'') else: diff --git a/pipenv/patched/notpip/_vendor/pytoml/writer.py b/pipenv/patched/notpip/_vendor/pytoml/writer.py index 73b5089c24..d2e849f619 100644 --- a/pipenv/patched/notpip/_vendor/pytoml/writer.py +++ b/pipenv/patched/notpip/_vendor/pytoml/writer.py @@ -3,6 +3,12 @@ from .utils import format_rfc3339 +try: + from pathlib import PurePath as _path_types +except ImportError: + _path_types = () + + if sys.version_info[0] == 3: long = int unicode = str @@ -66,6 +72,8 @@ def _format_value(v): return '[{0}]'.format(', '.join(_format_value(obj) for obj in v)) elif isinstance(v, dict): return '{{{0}}}'.format(', '.join('{} = {}'.format(_escape_id(k), _format_value(obj)) for k, obj in v.items())) + elif isinstance(v, _path_types): + return _escape_string(str(v)) else: raise RuntimeError(v) diff --git a/pipenv/patched/notpip/_vendor/requests/__init__.py b/pipenv/patched/notpip/_vendor/requests/__init__.py index 1544bc8982..bcb74535c8 100644 --- a/pipenv/patched/notpip/_vendor/requests/__init__.py +++ b/pipenv/patched/notpip/_vendor/requests/__init__.py @@ -57,10 +57,10 @@ def check_compatibility(urllib3_version, chardet_version): # Check urllib3 for compatibility. major, minor, patch = urllib3_version # noqa: F811 major, minor, patch = int(major), int(minor), int(patch) - # urllib3 >= 1.21.1, <= 1.24 + # urllib3 >= 1.21.1, <= 1.25 assert major == 1 assert minor >= 21 - assert minor <= 24 + assert minor <= 25 # Check chardet for compatibility. major, minor, patch = chardet_version.split('.')[:3] diff --git a/pipenv/patched/notpip/_vendor/requests/__version__.py b/pipenv/patched/notpip/_vendor/requests/__version__.py index f5b5d03671..9844f740ab 100644 --- a/pipenv/patched/notpip/_vendor/requests/__version__.py +++ b/pipenv/patched/notpip/_vendor/requests/__version__.py @@ -5,10 +5,10 @@ __title__ = 'requests' __description__ = 'Python HTTP for Humans.' __url__ = 'http://python-requests.org' -__version__ = '2.21.0' -__build__ = 0x022100 +__version__ = '2.22.0' +__build__ = 0x022200 __author__ = 'Kenneth Reitz' __author_email__ = 'me@kennethreitz.org' __license__ = 'Apache 2.0' -__copyright__ = 'Copyright 2018 Kenneth Reitz' +__copyright__ = 'Copyright 2019 Kenneth Reitz' __cake__ = u'\u2728 \U0001f370 \u2728' diff --git a/pipenv/patched/notpip/_vendor/requests/api.py b/pipenv/patched/notpip/_vendor/requests/api.py index abada96d46..ef71d0759e 100644 --- a/pipenv/patched/notpip/_vendor/requests/api.py +++ b/pipenv/patched/notpip/_vendor/requests/api.py @@ -19,7 +19,7 @@ def request(method, url, **kwargs): :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary, list of tuples or bytes to send - in the body of the :class:`Request`. + in the query string for the :class:`Request`. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. @@ -65,7 +65,7 @@ def get(url, params=None, **kwargs): :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary, list of tuples or bytes to send - in the body of the :class:`Request`. + in the query string for the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response diff --git a/pipenv/patched/notpip/_vendor/requests/packages.py b/pipenv/patched/notpip/_vendor/requests/packages.py index 258c89ed33..5fb6f07dfc 100644 --- a/pipenv/patched/notpip/_vendor/requests/packages.py +++ b/pipenv/patched/notpip/_vendor/requests/packages.py @@ -4,13 +4,13 @@ # I don't like it either. Just look the other way. :) for package in ('urllib3', 'idna', 'chardet'): - vendored_package = "notpip._vendor." + package + vendored_package = "pipenv.patched.notpip._vendor." + package locals()[package] = __import__(vendored_package) # This traversal is apparently necessary such that the identities are # preserved (requests.packages.urllib3.* is urllib3.*) for mod in list(sys.modules): if mod == vendored_package or mod.startswith(vendored_package + '.'): - unprefixed_mod = mod[len("notpip._vendor."):] - sys.modules['notpip._vendor.requests.packages.' + unprefixed_mod] = sys.modules[mod] + unprefixed_mod = mod[len("pipenv.patched.notpip._vendor."):] + sys.modules['pipenv.patched.notpip._vendor.requests.packages.' + unprefixed_mod] = sys.modules[mod] # Kinda cool, though, right? diff --git a/pipenv/patched/notpip/_vendor/urllib3/__init__.py b/pipenv/patched/notpip/_vendor/urllib3/__init__.py index 148a9c31a7..8f5a21f346 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/__init__.py +++ b/pipenv/patched/notpip/_vendor/urllib3/__init__.py @@ -1,15 +1,10 @@ """ urllib3 - Thread-safe connection pooling and re-using. """ - from __future__ import absolute_import import warnings -from .connectionpool import ( - HTTPConnectionPool, - HTTPSConnectionPool, - connection_from_url -) +from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, connection_from_url from . import exceptions from .filepost import encode_multipart_formdata @@ -25,25 +20,25 @@ import logging from logging import NullHandler -__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)' -__license__ = 'MIT' -__version__ = '1.24.1' +__author__ = "Andrey Petrov (andrey.petrov@shazow.net)" +__license__ = "MIT" +__version__ = "1.25.6" __all__ = ( - 'HTTPConnectionPool', - 'HTTPSConnectionPool', - 'PoolManager', - 'ProxyManager', - 'HTTPResponse', - 'Retry', - 'Timeout', - 'add_stderr_logger', - 'connection_from_url', - 'disable_warnings', - 'encode_multipart_formdata', - 'get_host', - 'make_headers', - 'proxy_from_url', + "HTTPConnectionPool", + "HTTPSConnectionPool", + "PoolManager", + "ProxyManager", + "HTTPResponse", + "Retry", + "Timeout", + "add_stderr_logger", + "connection_from_url", + "disable_warnings", + "encode_multipart_formdata", + "get_host", + "make_headers", + "proxy_from_url", ) logging.getLogger(__name__).addHandler(NullHandler()) @@ -60,10 +55,10 @@ def add_stderr_logger(level=logging.DEBUG): # even if urllib3 is vendored within another package. logger = logging.getLogger(__name__) handler = logging.StreamHandler() - handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s')) + handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(message)s")) logger.addHandler(handler) logger.setLevel(level) - logger.debug('Added a stderr logging handler to logger: %s', __name__) + logger.debug("Added a stderr logging handler to logger: %s", __name__) return handler @@ -75,18 +70,17 @@ def add_stderr_logger(level=logging.DEBUG): # shouldn't be: otherwise, it's very hard for users to use most Python # mechanisms to silence them. # SecurityWarning's always go off by default. -warnings.simplefilter('always', exceptions.SecurityWarning, append=True) +warnings.simplefilter("always", exceptions.SecurityWarning, append=True) # SubjectAltNameWarning's should go off once per host -warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True) +warnings.simplefilter("default", exceptions.SubjectAltNameWarning, append=True) # InsecurePlatformWarning's don't vary between requests, so we keep it default. -warnings.simplefilter('default', exceptions.InsecurePlatformWarning, - append=True) +warnings.simplefilter("default", exceptions.InsecurePlatformWarning, append=True) # SNIMissingWarnings should go off only once. -warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True) +warnings.simplefilter("default", exceptions.SNIMissingWarning, append=True) def disable_warnings(category=exceptions.HTTPWarning): """ Helper for quickly disabling all urllib3 warnings. """ - warnings.simplefilter('ignore', category) + warnings.simplefilter("ignore", category) diff --git a/pipenv/patched/notpip/_vendor/urllib3/_collections.py b/pipenv/patched/notpip/_vendor/urllib3/_collections.py index 34f23811c6..019d1511d5 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/_collections.py +++ b/pipenv/patched/notpip/_vendor/urllib3/_collections.py @@ -1,4 +1,5 @@ from __future__ import absolute_import + try: from collections.abc import Mapping, MutableMapping except ImportError: @@ -6,6 +7,7 @@ try: from threading import RLock except ImportError: # Platform-specific: No threads available + class RLock: def __enter__(self): pass @@ -19,7 +21,7 @@ def __exit__(self, exc_type, exc_value, traceback): from .packages.six import iterkeys, itervalues, PY3 -__all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict'] +__all__ = ["RecentlyUsedContainer", "HTTPHeaderDict"] _Null = object() @@ -82,7 +84,9 @@ def __len__(self): return len(self._container) def __iter__(self): - raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.') + raise NotImplementedError( + "Iteration over this class is unlikely to be threadsafe." + ) def clear(self): with self.lock: @@ -150,7 +154,7 @@ def __setitem__(self, key, val): def __getitem__(self, key): val = self._container[key.lower()] - return ', '.join(val[1:]) + return ", ".join(val[1:]) def __delitem__(self, key): del self._container[key.lower()] @@ -159,12 +163,13 @@ def __contains__(self, key): return key.lower() in self._container def __eq__(self, other): - if not isinstance(other, Mapping) and not hasattr(other, 'keys'): + if not isinstance(other, Mapping) and not hasattr(other, "keys"): return False if not isinstance(other, type(self)): other = type(self)(other) - return (dict((k.lower(), v) for k, v in self.itermerged()) == - dict((k.lower(), v) for k, v in other.itermerged())) + return dict((k.lower(), v) for k, v in self.itermerged()) == dict( + (k.lower(), v) for k, v in other.itermerged() + ) def __ne__(self, other): return not self.__eq__(other) @@ -184,9 +189,9 @@ def __iter__(self): yield vals[0] def pop(self, key, default=__marker): - '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value. + """D.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. - ''' + """ # Using the MutableMapping function directly fails due to the private marker. # Using ordinary dict.pop would expose the internal structures. # So let's reinvent the wheel. @@ -228,8 +233,10 @@ def extend(self, *args, **kwargs): with self.add instead of self.__setitem__ """ if len(args) > 1: - raise TypeError("extend() takes at most 1 positional " - "arguments ({0} given)".format(len(args))) + raise TypeError( + "extend() takes at most 1 positional " + "arguments ({0} given)".format(len(args)) + ) other = args[0] if len(args) >= 1 else () if isinstance(other, HTTPHeaderDict): @@ -295,7 +302,7 @@ def itermerged(self): """Iterate over all headers, merging duplicate ones together.""" for key in self: val = self._container[key.lower()] - yield val[0], ', '.join(val[1:]) + yield val[0], ", ".join(val[1:]) def items(self): return list(self.iteritems()) @@ -306,7 +313,7 @@ def from_httplib(cls, message): # Python 2 # python2.7 does not expose a proper API for exporting multiheaders # efficiently. This function re-reads raw lines from the message # object and extracts the multiheaders properly. - obs_fold_continued_leaders = (' ', '\t') + obs_fold_continued_leaders = (" ", "\t") headers = [] for line in message.headers: @@ -316,14 +323,14 @@ def from_httplib(cls, message): # Python 2 # in RFC-7230 S3.2.4. This indicates a multiline header, but # there exists no previous header to which we can attach it. raise InvalidHeader( - 'Header continuation with no previous header: %s' % line + "Header continuation with no previous header: %s" % line ) else: key, value = headers[-1] - headers[-1] = (key, value + ' ' + line.strip()) + headers[-1] = (key, value + " " + line.strip()) continue - key, value = line.split(':', 1) + key, value = line.split(":", 1) headers.append((key, value.strip())) return cls(headers) diff --git a/pipenv/patched/notpip/_vendor/urllib3/connection.py b/pipenv/patched/notpip/_vendor/urllib3/connection.py index 02b36654bd..3eeb1af58e 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/connection.py +++ b/pipenv/patched/notpip/_vendor/urllib3/connection.py @@ -11,6 +11,7 @@ try: # Compiled with SSL? import ssl + BaseSSLError = ssl.SSLError except (ImportError, AttributeError): # Platform-specific: No SSL. ssl = None @@ -19,10 +20,11 @@ class BaseSSLError(BaseException): pass -try: # Python 3: - # Not a no-op, we're adding this to the namespace so it can be imported. +try: + # Python 3: not a no-op, we're adding this to the namespace so it can be imported. ConnectionError = ConnectionError -except NameError: # Python 2: +except NameError: + # Python 2 class ConnectionError(Exception): pass @@ -40,7 +42,7 @@ class ConnectionError(Exception): resolve_ssl_version, assert_fingerprint, create_urllib3_context, - ssl_wrap_socket + ssl_wrap_socket, ) @@ -50,20 +52,16 @@ class ConnectionError(Exception): log = logging.getLogger(__name__) -port_by_scheme = { - 'http': 80, - 'https': 443, -} +port_by_scheme = {"http": 80, "https": 443} -# When updating RECENT_DATE, move it to within two years of the current date, -# and not less than 6 months ago. -# Example: if Today is 2018-01-01, then RECENT_DATE should be any date on or -# after 2016-01-01 (today - 2 years) AND before 2017-07-01 (today - 6 months) -RECENT_DATE = datetime.date(2017, 6, 30) +# When it comes time to update this value as a part of regular maintenance +# (ie test_recent_date is failing) update it to ~6 months before the current date. +RECENT_DATE = datetime.date(2019, 1, 1) class DummyConnection(object): """Used to detect a failed ConnectionCls import.""" + pass @@ -91,7 +89,7 @@ class HTTPConnection(_HTTPConnection, object): Or you may want to disable the defaults by passing an empty list (e.g., ``[]``). """ - default_port = port_by_scheme['http'] + default_port = port_by_scheme["http"] #: Disable Nagle's algorithm by default. #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]`` @@ -101,15 +99,15 @@ class HTTPConnection(_HTTPConnection, object): is_verified = False def __init__(self, *args, **kw): - if six.PY3: # Python 3 - kw.pop('strict', None) + if not six.PY2: + kw.pop("strict", None) # Pre-set source_address. - self.source_address = kw.get('source_address') + self.source_address = kw.get("source_address") #: The socket options provided by the user. If no options are #: provided, we use the default options. - self.socket_options = kw.pop('socket_options', self.default_socket_options) + self.socket_options = kw.pop("socket_options", self.default_socket_options) _HTTPConnection.__init__(self, *args, **kw) @@ -130,7 +128,7 @@ def host(self): those cases where it's appropriate (i.e., when doing DNS lookup to establish the actual TCP connection across which we're going to send HTTP requests). """ - return self._dns_host.rstrip('.') + return self._dns_host.rstrip(".") @host.setter def host(self, value): @@ -149,29 +147,34 @@ def _new_conn(self): """ extra_kw = {} if self.source_address: - extra_kw['source_address'] = self.source_address + extra_kw["source_address"] = self.source_address if self.socket_options: - extra_kw['socket_options'] = self.socket_options + extra_kw["socket_options"] = self.socket_options try: conn = connection.create_connection( - (self._dns_host, self.port), self.timeout, **extra_kw) + (self._dns_host, self.port), self.timeout, **extra_kw + ) - except SocketTimeout as e: + except SocketTimeout: raise ConnectTimeoutError( - self, "Connection to %s timed out. (connect timeout=%s)" % - (self.host, self.timeout)) + self, + "Connection to %s timed out. (connect timeout=%s)" + % (self.host, self.timeout), + ) except SocketError as e: raise NewConnectionError( - self, "Failed to establish a new connection: %s" % e) + self, "Failed to establish a new connection: %s" % e + ) return conn def _prepare_conn(self, conn): self.sock = conn - if self._tunnel_host: + # Google App Engine's httplib does not define _tunnel_host + if getattr(self, "_tunnel_host", None): # TODO: Fix tunnel so it doesn't depend on self.sock state. self._tunnel() # Mark this connection as not reusable @@ -187,18 +190,15 @@ def request_chunked(self, method, url, body=None, headers=None): body with chunked encoding and not as one block """ headers = HTTPHeaderDict(headers if headers is not None else {}) - skip_accept_encoding = 'accept-encoding' in headers - skip_host = 'host' in headers + skip_accept_encoding = "accept-encoding" in headers + skip_host = "host" in headers self.putrequest( - method, - url, - skip_accept_encoding=skip_accept_encoding, - skip_host=skip_host + method, url, skip_accept_encoding=skip_accept_encoding, skip_host=skip_host ) for header, value in headers.items(): self.putheader(header, value) - if 'transfer-encoding' not in headers: - self.putheader('Transfer-Encoding', 'chunked') + if "transfer-encoding" not in headers: + self.putheader("Transfer-Encoding", "chunked") self.endheaders() if body is not None: @@ -209,54 +209,80 @@ def request_chunked(self, method, url, body=None, headers=None): if not chunk: continue if not isinstance(chunk, bytes): - chunk = chunk.encode('utf8') + chunk = chunk.encode("utf8") len_str = hex(len(chunk))[2:] - self.send(len_str.encode('utf-8')) - self.send(b'\r\n') + self.send(len_str.encode("utf-8")) + self.send(b"\r\n") self.send(chunk) - self.send(b'\r\n') + self.send(b"\r\n") # After the if clause, to always have a closed body - self.send(b'0\r\n\r\n') + self.send(b"0\r\n\r\n") class HTTPSConnection(HTTPConnection): - default_port = port_by_scheme['https'] + default_port = port_by_scheme["https"] ssl_version = None - def __init__(self, host, port=None, key_file=None, cert_file=None, - strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, - ssl_context=None, server_hostname=None, **kw): - - HTTPConnection.__init__(self, host, port, strict=strict, - timeout=timeout, **kw) + def __init__( + self, + host, + port=None, + key_file=None, + cert_file=None, + key_password=None, + strict=None, + timeout=socket._GLOBAL_DEFAULT_TIMEOUT, + ssl_context=None, + server_hostname=None, + **kw + ): + + HTTPConnection.__init__(self, host, port, strict=strict, timeout=timeout, **kw) self.key_file = key_file self.cert_file = cert_file + self.key_password = key_password self.ssl_context = ssl_context self.server_hostname = server_hostname # Required property for Google AppEngine 1.9.0 which otherwise causes # HTTPS requests to go out as HTTP. (See Issue #356) - self._protocol = 'https' + self._protocol = "https" def connect(self): conn = self._new_conn() self._prepare_conn(conn) + # Wrap socket using verification with the root certs in + # trusted_root_certs + default_ssl_context = False if self.ssl_context is None: + default_ssl_context = True self.ssl_context = create_urllib3_context( - ssl_version=resolve_ssl_version(None), - cert_reqs=resolve_cert_reqs(None), + ssl_version=resolve_ssl_version(self.ssl_version), + cert_reqs=resolve_cert_reqs(self.cert_reqs), ) + # Try to load OS default certs if none are given. + # Works well on Windows (requires Python3.4+) + context = self.ssl_context + if ( + not self.ca_certs + and not self.ca_cert_dir + and default_ssl_context + and hasattr(context, "load_default_certs") + ): + context.load_default_certs() + self.sock = ssl_wrap_socket( sock=conn, keyfile=self.key_file, certfile=self.cert_file, + key_password=self.key_password, ssl_context=self.ssl_context, - server_hostname=self.server_hostname + server_hostname=self.server_hostname, ) @@ -265,32 +291,39 @@ class VerifiedHTTPSConnection(HTTPSConnection): Based on httplib.HTTPSConnection but wraps the socket with SSL certification. """ + cert_reqs = None ca_certs = None ca_cert_dir = None ssl_version = None assert_fingerprint = None - def set_cert(self, key_file=None, cert_file=None, - cert_reqs=None, ca_certs=None, - assert_hostname=None, assert_fingerprint=None, - ca_cert_dir=None): + def set_cert( + self, + key_file=None, + cert_file=None, + cert_reqs=None, + key_password=None, + ca_certs=None, + assert_hostname=None, + assert_fingerprint=None, + ca_cert_dir=None, + ): """ This method should only be called once, before the connection is used. """ - # If cert_reqs is not provided, we can try to guess. If the user gave - # us a cert database, we assume they want to use it: otherwise, if - # they gave us an SSL Context object we should use whatever is set for - # it. + # If cert_reqs is not provided we'll assume CERT_REQUIRED unless we also + # have an SSLContext object in which case we'll use its verify_mode. if cert_reqs is None: - if ca_certs or ca_cert_dir: - cert_reqs = 'CERT_REQUIRED' - elif self.ssl_context is not None: + if self.ssl_context is not None: cert_reqs = self.ssl_context.verify_mode + else: + cert_reqs = resolve_cert_reqs(None) self.key_file = key_file self.cert_file = cert_file self.cert_reqs = cert_reqs + self.key_password = key_password self.assert_hostname = assert_hostname self.assert_fingerprint = assert_fingerprint self.ca_certs = ca_certs and os.path.expanduser(ca_certs) @@ -301,7 +334,8 @@ def connect(self): conn = self._new_conn() hostname = self.host - if self._tunnel_host: + # Google App Engine's httplib does not define _tunnel_host + if getattr(self, "_tunnel_host", None): self.sock = conn # Calls self._set_hostport(), so self.host is # self._tunnel_host below. @@ -318,15 +352,19 @@ def connect(self): is_time_off = datetime.date.today() < RECENT_DATE if is_time_off: - warnings.warn(( - 'System time is way off (before {0}). This will probably ' - 'lead to SSL verification errors').format(RECENT_DATE), - SystemTimeWarning + warnings.warn( + ( + "System time is way off (before {0}). This will probably " + "lead to SSL verification errors" + ).format(RECENT_DATE), + SystemTimeWarning, ) # Wrap socket using verification with the root certs in # trusted_root_certs + default_ssl_context = False if self.ssl_context is None: + default_ssl_context = True self.ssl_context = create_urllib3_context( ssl_version=resolve_ssl_version(self.ssl_version), cert_reqs=resolve_cert_reqs(self.cert_reqs), @@ -334,38 +372,56 @@ def connect(self): context = self.ssl_context context.verify_mode = resolve_cert_reqs(self.cert_reqs) + + # Try to load OS default certs if none are given. + # Works well on Windows (requires Python3.4+) + if ( + not self.ca_certs + and not self.ca_cert_dir + and default_ssl_context + and hasattr(context, "load_default_certs") + ): + context.load_default_certs() + self.sock = ssl_wrap_socket( sock=conn, keyfile=self.key_file, certfile=self.cert_file, + key_password=self.key_password, ca_certs=self.ca_certs, ca_cert_dir=self.ca_cert_dir, server_hostname=server_hostname, - ssl_context=context) + ssl_context=context, + ) if self.assert_fingerprint: - assert_fingerprint(self.sock.getpeercert(binary_form=True), - self.assert_fingerprint) - elif context.verify_mode != ssl.CERT_NONE \ - and not getattr(context, 'check_hostname', False) \ - and self.assert_hostname is not False: + assert_fingerprint( + self.sock.getpeercert(binary_form=True), self.assert_fingerprint + ) + elif ( + context.verify_mode != ssl.CERT_NONE + and not getattr(context, "check_hostname", False) + and self.assert_hostname is not False + ): # While urllib3 attempts to always turn off hostname matching from # the TLS library, this cannot always be done. So we check whether # the TLS Library still thinks it's matching hostnames. cert = self.sock.getpeercert() - if not cert.get('subjectAltName', ()): - warnings.warn(( - 'Certificate for {0} has no `subjectAltName`, falling back to check for a ' - '`commonName` for now. This feature is being removed by major browsers and ' - 'deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 ' - 'for details.)'.format(hostname)), - SubjectAltNameWarning + if not cert.get("subjectAltName", ()): + warnings.warn( + ( + "Certificate for {0} has no `subjectAltName`, falling back to check for a " + "`commonName` for now. This feature is being removed by major browsers and " + "deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 " + "for details.)".format(hostname) + ), + SubjectAltNameWarning, ) _match_hostname(cert, self.assert_hostname or server_hostname) self.is_verified = ( - context.verify_mode == ssl.CERT_REQUIRED or - self.assert_fingerprint is not None + context.verify_mode == ssl.CERT_REQUIRED + or self.assert_fingerprint is not None ) @@ -373,9 +429,10 @@ def _match_hostname(cert, asserted_hostname): try: match_hostname(cert, asserted_hostname) except CertificateError as e: - log.error( - 'Certificate did not match expected hostname: %s. ' - 'Certificate: %s', asserted_hostname, cert + log.warning( + "Certificate did not match expected hostname: %s. " "Certificate: %s", + asserted_hostname, + cert, ) # Add cert to exception and reraise so client code can inspect # the cert when catching the exception, if they want to diff --git a/pipenv/patched/notpip/_vendor/urllib3/connectionpool.py b/pipenv/patched/notpip/_vendor/urllib3/connectionpool.py index f7a8f193d1..e73fa57a42 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/connectionpool.py +++ b/pipenv/patched/notpip/_vendor/urllib3/connectionpool.py @@ -29,8 +29,11 @@ from .connection import ( port_by_scheme, DummyConnection, - HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection, - HTTPException, BaseSSLError, + HTTPConnection, + HTTPSConnection, + VerifiedHTTPSConnection, + HTTPException, + BaseSSLError, ) from .request import RequestMethods from .response import HTTPResponse @@ -40,7 +43,13 @@ from .util.response import assert_header_parsing from .util.retry import Retry from .util.timeout import Timeout -from .util.url import get_host, Url, NORMALIZABLE_SCHEMES +from .util.url import ( + get_host, + parse_url, + Url, + _normalize_host as normalize_host, + _encode_target, +) from .util.queue import LifoQueue @@ -65,13 +74,12 @@ def __init__(self, host, port=None): if not host: raise LocationValueError("No host specified.") - self.host = _ipv6_host(host, self.scheme) + self.host = _normalize_host(host, scheme=self.scheme) self._proxy_host = host.lower() self.port = port def __str__(self): - return '%s(host=%r, port=%r)' % (type(self).__name__, - self.host, self.port) + return "%s(host=%r, port=%r)" % (type(self).__name__, self.host, self.port) def __enter__(self): return self @@ -152,15 +160,24 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): :class:`urllib3.connection.HTTPSConnection` instances. """ - scheme = 'http' + scheme = "http" ConnectionCls = HTTPConnection ResponseCls = HTTPResponse - def __init__(self, host, port=None, strict=False, - timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False, - headers=None, retries=None, - _proxy=None, _proxy_headers=None, - **conn_kw): + def __init__( + self, + host, + port=None, + strict=False, + timeout=Timeout.DEFAULT_TIMEOUT, + maxsize=1, + block=False, + headers=None, + retries=None, + _proxy=None, + _proxy_headers=None, + **conn_kw + ): ConnectionPool.__init__(self, host, port) RequestMethods.__init__(self, headers) @@ -194,19 +211,27 @@ def __init__(self, host, port=None, strict=False, # Enable Nagle's algorithm for proxies, to avoid packet fragmentation. # We cannot know if the user has added default socket options, so we cannot replace the # list. - self.conn_kw.setdefault('socket_options', []) + self.conn_kw.setdefault("socket_options", []) def _new_conn(self): """ Return a fresh :class:`HTTPConnection`. """ self.num_connections += 1 - log.debug("Starting new HTTP connection (%d): %s:%s", - self.num_connections, self.host, self.port or "80") - - conn = self.ConnectionCls(host=self.host, port=self.port, - timeout=self.timeout.connect_timeout, - strict=self.strict, **self.conn_kw) + log.debug( + "Starting new HTTP connection (%d): %s:%s", + self.num_connections, + self.host, + self.port or "80", + ) + + conn = self.ConnectionCls( + host=self.host, + port=self.port, + timeout=self.timeout.connect_timeout, + strict=self.strict, + **self.conn_kw + ) return conn def _get_conn(self, timeout=None): @@ -230,16 +255,17 @@ def _get_conn(self, timeout=None): except queue.Empty: if self.block: - raise EmptyPoolError(self, - "Pool reached maximum size and no more " - "connections are allowed.") + raise EmptyPoolError( + self, + "Pool reached maximum size and no more " "connections are allowed.", + ) pass # Oh well, we'll create a new connection then # If this is a persistent connection, check if it got disconnected if conn and is_connection_dropped(conn): log.debug("Resetting dropped connection: %s", self.host) conn.close() - if getattr(conn, 'auto_open', 1) == 0: + if getattr(conn, "auto_open", 1) == 0: # This is a proxied connection that has been mutated by # httplib._tunnel() and cannot be reused (since it would # attempt to bypass the proxy) @@ -269,9 +295,7 @@ def _put_conn(self, conn): pass except queue.Full: # This should never happen if self.block == True - log.warning( - "Connection pool is full, discarding connection: %s", - self.host) + log.warning("Connection pool is full, discarding connection: %s", self.host) # Connection never got put back into the pool, close it. if conn: @@ -303,21 +327,30 @@ def _raise_timeout(self, err, url, timeout_value): """Is the error actually a timeout? Will raise a ReadTimeout or pass""" if isinstance(err, SocketTimeout): - raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) + raise ReadTimeoutError( + self, url, "Read timed out. (read timeout=%s)" % timeout_value + ) # See the above comment about EAGAIN in Python 3. In Python 2 we have # to specifically catch it and throw the timeout error - if hasattr(err, 'errno') and err.errno in _blocking_errnos: - raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) + if hasattr(err, "errno") and err.errno in _blocking_errnos: + raise ReadTimeoutError( + self, url, "Read timed out. (read timeout=%s)" % timeout_value + ) # Catch possible read timeouts thrown as SSL errors. If not the # case, rethrow the original. We need to do this because of: # http://bugs.python.org/issue10272 - if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python < 2.7.4 - raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) - - def _make_request(self, conn, method, url, timeout=_Default, chunked=False, - **httplib_request_kw): + if "timed out" in str(err) or "did not complete (read)" in str( + err + ): # Python < 2.7.4 + raise ReadTimeoutError( + self, url, "Read timed out. (read timeout=%s)" % timeout_value + ) + + def _make_request( + self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw + ): """ Perform a request on a given urllib connection object taken from our pool. @@ -357,7 +390,7 @@ def _make_request(self, conn, method, url, timeout=_Default, chunked=False, read_timeout = timeout_obj.read_timeout # App Engine doesn't have a sock attr - if getattr(conn, 'sock', None): + if getattr(conn, "sock", None): # In Python 3 socket.py will catch EAGAIN and return None when you # try and read into the file pointer created by http.client, which # instead raises a BadStatusLine exception. Instead of catching @@ -365,7 +398,8 @@ def _make_request(self, conn, method, url, timeout=_Default, chunked=False, # timeouts, check for a zero timeout before making the request. if read_timeout == 0: raise ReadTimeoutError( - self, url, "Read timed out. (read timeout=%s)" % read_timeout) + self, url, "Read timed out. (read timeout=%s)" % read_timeout + ) if read_timeout is Timeout.DEFAULT_TIMEOUT: conn.sock.settimeout(socket.getdefaulttimeout()) else: # None or a value @@ -373,31 +407,45 @@ def _make_request(self, conn, method, url, timeout=_Default, chunked=False, # Receive the response from the server try: - try: # Python 2.7, use buffering of HTTP responses + try: + # Python 2.7, use buffering of HTTP responses httplib_response = conn.getresponse(buffering=True) - except TypeError: # Python 3 + except TypeError: + # Python 3 try: httplib_response = conn.getresponse() - except Exception as e: - # Remove the TypeError from the exception chain in Python 3; - # otherwise it looks like a programming error was the cause. + except BaseException as e: + # Remove the TypeError from the exception chain in + # Python 3 (including for exceptions like SystemExit). + # Otherwise it looks like a bug in the code. six.raise_from(e, None) except (SocketTimeout, BaseSSLError, SocketError) as e: self._raise_timeout(err=e, url=url, timeout_value=read_timeout) raise # AppEngine doesn't have a version attr. - http_version = getattr(conn, '_http_vsn_str', 'HTTP/?') - log.debug("%s://%s:%s \"%s %s %s\" %s %s", self.scheme, self.host, self.port, - method, url, http_version, httplib_response.status, - httplib_response.length) + http_version = getattr(conn, "_http_vsn_str", "HTTP/?") + log.debug( + '%s://%s:%s "%s %s %s" %s %s', + self.scheme, + self.host, + self.port, + method, + url, + http_version, + httplib_response.status, + httplib_response.length, + ) try: assert_header_parsing(httplib_response.msg) except (HeaderParsingError, TypeError) as hpe: # Platform-specific: Python 3 log.warning( - 'Failed to parse headers (url=%s): %s', - self._absolute_url(url), hpe, exc_info=True) + "Failed to parse headers (url=%s): %s", + self._absolute_url(url), + hpe, + exc_info=True, + ) return httplib_response @@ -427,13 +475,13 @@ def is_same_host(self, url): Check if the given ``url`` is a member of the same host as this connection pool. """ - if url.startswith('/'): + if url.startswith("/"): return True # TODO: Add optional support for socket.gethostbyname checking. scheme, host, port = get_host(url) - - host = _ipv6_host(host, self.scheme) + if host is not None: + host = _normalize_host(host, scheme=scheme) # Use explicit default port for comparison when none is given if self.port and not port: @@ -443,10 +491,22 @@ def is_same_host(self, url): return (scheme, host, port) == (self.scheme, self.host, self.port) - def urlopen(self, method, url, body=None, headers=None, retries=None, - redirect=True, assert_same_host=True, timeout=_Default, - pool_timeout=None, release_conn=None, chunked=False, - body_pos=None, **response_kw): + def urlopen( + self, + method, + url, + body=None, + headers=None, + retries=None, + redirect=True, + assert_same_host=True, + timeout=_Default, + pool_timeout=None, + release_conn=None, + chunked=False, + body_pos=None, + **response_kw + ): """ Get a connection from the pool and perform an HTTP request. This is the lowest level call for making a request, so you'll need to specify all @@ -544,12 +604,18 @@ def urlopen(self, method, url, body=None, headers=None, retries=None, retries = Retry.from_int(retries, redirect=redirect, default=self.retries) if release_conn is None: - release_conn = response_kw.get('preload_content', True) + release_conn = response_kw.get("preload_content", True) # Check host if assert_same_host and not self.is_same_host(url): raise HostChangedError(self, url, retries) + # Ensure that the URL we're connecting to is properly encoded + if url.startswith("/"): + url = six.ensure_str(_encode_target(url)) + else: + url = six.ensure_str(parse_url(url).url) + conn = None # Track whether `conn` needs to be released before @@ -566,7 +632,7 @@ def urlopen(self, method, url, body=None, headers=None, retries=None, # Merge the proxy headers. Only do this in HTTP. We have to copy the # headers dict so we can safely change it without those changes being # reflected in anyone else's copy. - if self.scheme == 'http': + if self.scheme == "http": headers = headers.copy() headers.update(self.proxy_headers) @@ -589,15 +655,22 @@ def urlopen(self, method, url, body=None, headers=None, retries=None, conn.timeout = timeout_obj.connect_timeout - is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None) + is_new_proxy_conn = self.proxy is not None and not getattr( + conn, "sock", None + ) if is_new_proxy_conn: self._prepare_proxy(conn) # Make the request on the httplib connection object. - httplib_response = self._make_request(conn, method, url, - timeout=timeout_obj, - body=body, headers=headers, - chunked=chunked) + httplib_response = self._make_request( + conn, + method, + url, + timeout=timeout_obj, + body=body, + headers=headers, + chunked=chunked, + ) # If we're going to release the connection in ``finally:``, then # the response doesn't need to know about the connection. Otherwise @@ -606,14 +679,16 @@ def urlopen(self, method, url, body=None, headers=None, retries=None, response_conn = conn if not release_conn else None # Pass method to Response for length checking - response_kw['request_method'] = method + response_kw["request_method"] = method # Import httplib's response into our own wrapper object - response = self.ResponseCls.from_httplib(httplib_response, - pool=self, - connection=response_conn, - retries=retries, - **response_kw) + response = self.ResponseCls.from_httplib( + httplib_response, + pool=self, + connection=response_conn, + retries=retries, + **response_kw + ) # Everything went great! clean_exit = True @@ -622,20 +697,28 @@ def urlopen(self, method, url, body=None, headers=None, retries=None, # Timed out by queue. raise EmptyPoolError(self, "No pool connections are available.") - except (TimeoutError, HTTPException, SocketError, ProtocolError, - BaseSSLError, SSLError, CertificateError) as e: + except ( + TimeoutError, + HTTPException, + SocketError, + ProtocolError, + BaseSSLError, + SSLError, + CertificateError, + ) as e: # Discard the connection for these exceptions. It will be # replaced during the next _get_conn() call. clean_exit = False if isinstance(e, (BaseSSLError, CertificateError)): e = SSLError(e) elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy: - e = ProxyError('Cannot connect to proxy.', e) + e = ProxyError("Cannot connect to proxy.", e) elif isinstance(e, (SocketError, HTTPException)): - e = ProtocolError('Connection aborted.', e) + e = ProtocolError("Connection aborted.", e) - retries = retries.increment(method, url, error=e, _pool=self, - _stacktrace=sys.exc_info()[2]) + retries = retries.increment( + method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2] + ) retries.sleep() # Keep track of the error for the retry warning. @@ -658,28 +741,47 @@ def urlopen(self, method, url, body=None, headers=None, retries=None, if not conn: # Try again - log.warning("Retrying (%r) after connection " - "broken by '%r': %s", retries, err, url) - return self.urlopen(method, url, body, headers, retries, - redirect, assert_same_host, - timeout=timeout, pool_timeout=pool_timeout, - release_conn=release_conn, body_pos=body_pos, - **response_kw) + log.warning( + "Retrying (%r) after connection " "broken by '%r': %s", + retries, + err, + url, + ) + return self.urlopen( + method, + url, + body, + headers, + retries, + redirect, + assert_same_host, + timeout=timeout, + pool_timeout=pool_timeout, + release_conn=release_conn, + body_pos=body_pos, + **response_kw + ) def drain_and_release_conn(response): try: # discard any remaining response body, the connection will be # released back to the pool once the entire response is read response.read() - except (TimeoutError, HTTPException, SocketError, ProtocolError, - BaseSSLError, SSLError) as e: + except ( + TimeoutError, + HTTPException, + SocketError, + ProtocolError, + BaseSSLError, + SSLError, + ): pass # Handle redirect? redirect_location = redirect and response.get_redirect_location() if redirect_location: if response.status == 303: - method = 'GET' + method = "GET" try: retries = retries.increment(method, url, response=response, _pool=self) @@ -697,15 +799,22 @@ def drain_and_release_conn(response): retries.sleep_for_retry(response) log.debug("Redirecting %s -> %s", url, redirect_location) return self.urlopen( - method, redirect_location, body, headers, - retries=retries, redirect=redirect, + method, + redirect_location, + body, + headers, + retries=retries, + redirect=redirect, assert_same_host=assert_same_host, - timeout=timeout, pool_timeout=pool_timeout, - release_conn=release_conn, body_pos=body_pos, - **response_kw) + timeout=timeout, + pool_timeout=pool_timeout, + release_conn=release_conn, + body_pos=body_pos, + **response_kw + ) # Check if we should retry the HTTP response. - has_retry_after = bool(response.getheader('Retry-After')) + has_retry_after = bool(response.getheader("Retry-After")) if retries.is_retry(method, response.status, has_retry_after): try: retries = retries.increment(method, url, response=response, _pool=self) @@ -723,12 +832,19 @@ def drain_and_release_conn(response): retries.sleep(response) log.debug("Retry: %s", url) return self.urlopen( - method, url, body, headers, - retries=retries, redirect=redirect, + method, + url, + body, + headers, + retries=retries, + redirect=redirect, assert_same_host=assert_same_host, - timeout=timeout, pool_timeout=pool_timeout, + timeout=timeout, + pool_timeout=pool_timeout, release_conn=release_conn, - body_pos=body_pos, **response_kw) + body_pos=body_pos, + **response_kw + ) return response @@ -746,33 +862,57 @@ class HTTPSConnectionPool(HTTPConnectionPool): If ``assert_hostname`` is False, no verification is done. The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``, - ``ca_cert_dir``, and ``ssl_version`` are only used if :mod:`ssl` is - available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade + ``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl` + is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade the connection socket into an SSL socket. """ - scheme = 'https' + scheme = "https" ConnectionCls = HTTPSConnection - def __init__(self, host, port=None, - strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, - block=False, headers=None, retries=None, - _proxy=None, _proxy_headers=None, - key_file=None, cert_file=None, cert_reqs=None, - ca_certs=None, ssl_version=None, - assert_hostname=None, assert_fingerprint=None, - ca_cert_dir=None, **conn_kw): - - HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize, - block, headers, retries, _proxy, _proxy_headers, - **conn_kw) - - if ca_certs and cert_reqs is None: - cert_reqs = 'CERT_REQUIRED' + def __init__( + self, + host, + port=None, + strict=False, + timeout=Timeout.DEFAULT_TIMEOUT, + maxsize=1, + block=False, + headers=None, + retries=None, + _proxy=None, + _proxy_headers=None, + key_file=None, + cert_file=None, + cert_reqs=None, + key_password=None, + ca_certs=None, + ssl_version=None, + assert_hostname=None, + assert_fingerprint=None, + ca_cert_dir=None, + **conn_kw + ): + + HTTPConnectionPool.__init__( + self, + host, + port, + strict, + timeout, + maxsize, + block, + headers, + retries, + _proxy, + _proxy_headers, + **conn_kw + ) self.key_file = key_file self.cert_file = cert_file self.cert_reqs = cert_reqs + self.key_password = key_password self.ca_certs = ca_certs self.ca_cert_dir = ca_cert_dir self.ssl_version = ssl_version @@ -786,13 +926,16 @@ def _prepare_conn(self, conn): """ if isinstance(conn, VerifiedHTTPSConnection): - conn.set_cert(key_file=self.key_file, - cert_file=self.cert_file, - cert_reqs=self.cert_reqs, - ca_certs=self.ca_certs, - ca_cert_dir=self.ca_cert_dir, - assert_hostname=self.assert_hostname, - assert_fingerprint=self.assert_fingerprint) + conn.set_cert( + key_file=self.key_file, + key_password=self.key_password, + cert_file=self.cert_file, + cert_reqs=self.cert_reqs, + ca_certs=self.ca_certs, + ca_cert_dir=self.ca_cert_dir, + assert_hostname=self.assert_hostname, + assert_fingerprint=self.assert_fingerprint, + ) conn.ssl_version = self.ssl_version return conn @@ -809,12 +952,17 @@ def _new_conn(self): Return a fresh :class:`httplib.HTTPSConnection`. """ self.num_connections += 1 - log.debug("Starting new HTTPS connection (%d): %s:%s", - self.num_connections, self.host, self.port or "443") + log.debug( + "Starting new HTTPS connection (%d): %s:%s", + self.num_connections, + self.host, + self.port or "443", + ) if not self.ConnectionCls or self.ConnectionCls is DummyConnection: - raise SSLError("Can't connect to HTTPS URL because the SSL " - "module is not available.") + raise SSLError( + "Can't connect to HTTPS URL because the SSL " "module is not available." + ) actual_host = self.host actual_port = self.port @@ -822,9 +970,16 @@ def _new_conn(self): actual_host = self.proxy.host actual_port = self.proxy.port - conn = self.ConnectionCls(host=actual_host, port=actual_port, - timeout=self.timeout.connect_timeout, - strict=self.strict, **self.conn_kw) + conn = self.ConnectionCls( + host=actual_host, + port=actual_port, + timeout=self.timeout.connect_timeout, + strict=self.strict, + cert_file=self.cert_file, + key_file=self.key_file, + key_password=self.key_password, + **self.conn_kw + ) return self._prepare_conn(conn) @@ -835,16 +990,19 @@ def _validate_conn(self, conn): super(HTTPSConnectionPool, self)._validate_conn(conn) # Force connect early to allow us to validate the connection. - if not getattr(conn, 'sock', None): # AppEngine might not have `.sock` + if not getattr(conn, "sock", None): # AppEngine might not have `.sock` conn.connect() if not conn.is_verified: - warnings.warn(( - 'Unverified HTTPS request is being made. ' - 'Adding certificate verification is strongly advised. See: ' - 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' - '#ssl-warnings'), - InsecureRequestWarning) + warnings.warn( + ( + "Unverified HTTPS request is being made. " + "Adding certificate verification is strongly advised. See: " + "https://urllib3.readthedocs.io/en/latest/advanced-usage.html" + "#ssl-warnings" + ), + InsecureRequestWarning, + ) def connection_from_url(url, **kw): @@ -869,28 +1027,25 @@ def connection_from_url(url, **kw): """ scheme, host, port = get_host(url) port = port or port_by_scheme.get(scheme, 80) - if scheme == 'https': + if scheme == "https": return HTTPSConnectionPool(host, port=port, **kw) else: return HTTPConnectionPool(host, port=port, **kw) -def _ipv6_host(host, scheme): +def _normalize_host(host, scheme): """ - Process IPv6 address literals + Normalize hosts for comparisons and use with sockets. """ + host = normalize_host(host, scheme) + # httplib doesn't like it when we include brackets in IPv6 addresses # Specifically, if we include brackets but also pass the port then # httplib crazily doubles up the square brackets on the Host header. # Instead, we need to make sure we never pass ``None`` as the port. # However, for backward compatibility reasons we can't actually # *assert* that. See http://bugs.python.org/issue28539 - # - # Also if an IPv6 address literal has a zone identifier, the - # percent sign might be URIencoded, convert it back into ASCII - if host.startswith('[') and host.endswith(']'): - host = host.replace('%25', '%').strip('[]') - if scheme in NORMALIZABLE_SCHEMES: - host = host.lower() + if host.startswith("[") and host.endswith("]"): + host = host[1:-1] return host diff --git a/pipenv/patched/notpip/_vendor/urllib3/contrib/_appengine_environ.py b/pipenv/patched/notpip/_vendor/urllib3/contrib/_appengine_environ.py index f3e00942cb..c909010bf2 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/contrib/_appengine_environ.py +++ b/pipenv/patched/notpip/_vendor/urllib3/contrib/_appengine_environ.py @@ -6,9 +6,7 @@ def is_appengine(): - return (is_local_appengine() or - is_prod_appengine() or - is_prod_appengine_mvms()) + return is_local_appengine() or is_prod_appengine() or is_prod_appengine_mvms() def is_appengine_sandbox(): @@ -16,15 +14,19 @@ def is_appengine_sandbox(): def is_local_appengine(): - return ('APPENGINE_RUNTIME' in os.environ and - 'Development/' in os.environ['SERVER_SOFTWARE']) + return ( + "APPENGINE_RUNTIME" in os.environ + and "Development/" in os.environ["SERVER_SOFTWARE"] + ) def is_prod_appengine(): - return ('APPENGINE_RUNTIME' in os.environ and - 'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and - not is_prod_appengine_mvms()) + return ( + "APPENGINE_RUNTIME" in os.environ + and "Google App Engine/" in os.environ["SERVER_SOFTWARE"] + and not is_prod_appengine_mvms() + ) def is_prod_appengine_mvms(): - return os.environ.get('GAE_VM', False) == 'true' + return os.environ.get("GAE_VM", False) == "true" diff --git a/pipenv/patched/notpip/_vendor/urllib3/contrib/_securetransport/bindings.py b/pipenv/patched/notpip/_vendor/urllib3/contrib/_securetransport/bindings.py index bcf41c02b2..b46e1e3b5d 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/contrib/_securetransport/bindings.py +++ b/pipenv/patched/notpip/_vendor/urllib3/contrib/_securetransport/bindings.py @@ -34,29 +34,35 @@ import platform from ctypes.util import find_library from ctypes import ( - c_void_p, c_int32, c_char_p, c_size_t, c_byte, c_uint32, c_ulong, c_long, - c_bool + c_void_p, + c_int32, + c_char_p, + c_size_t, + c_byte, + c_uint32, + c_ulong, + c_long, + c_bool, ) from ctypes import CDLL, POINTER, CFUNCTYPE -security_path = find_library('Security') +security_path = find_library("Security") if not security_path: - raise ImportError('The library Security could not be found') + raise ImportError("The library Security could not be found") -core_foundation_path = find_library('CoreFoundation') +core_foundation_path = find_library("CoreFoundation") if not core_foundation_path: - raise ImportError('The library CoreFoundation could not be found') + raise ImportError("The library CoreFoundation could not be found") version = platform.mac_ver()[0] -version_info = tuple(map(int, version.split('.'))) +version_info = tuple(map(int, version.split("."))) if version_info < (10, 8): raise OSError( - 'Only OS X 10.8 and newer are supported, not %s.%s' % ( - version_info[0], version_info[1] - ) + "Only OS X 10.8 and newer are supported, not %s.%s" + % (version_info[0], version_info[1]) ) Security = CDLL(security_path, use_errno=True) @@ -129,27 +135,19 @@ Security.SecKeyGetTypeID.argtypes = [] Security.SecKeyGetTypeID.restype = CFTypeID - Security.SecCertificateCreateWithData.argtypes = [ - CFAllocatorRef, - CFDataRef - ] + Security.SecCertificateCreateWithData.argtypes = [CFAllocatorRef, CFDataRef] Security.SecCertificateCreateWithData.restype = SecCertificateRef - Security.SecCertificateCopyData.argtypes = [ - SecCertificateRef - ] + Security.SecCertificateCopyData.argtypes = [SecCertificateRef] Security.SecCertificateCopyData.restype = CFDataRef - Security.SecCopyErrorMessageString.argtypes = [ - OSStatus, - c_void_p - ] + Security.SecCopyErrorMessageString.argtypes = [OSStatus, c_void_p] Security.SecCopyErrorMessageString.restype = CFStringRef Security.SecIdentityCreateWithCertificate.argtypes = [ CFTypeRef, SecCertificateRef, - POINTER(SecIdentityRef) + POINTER(SecIdentityRef), ] Security.SecIdentityCreateWithCertificate.restype = OSStatus @@ -159,201 +157,126 @@ c_void_p, Boolean, c_void_p, - POINTER(SecKeychainRef) + POINTER(SecKeychainRef), ] Security.SecKeychainCreate.restype = OSStatus - Security.SecKeychainDelete.argtypes = [ - SecKeychainRef - ] + Security.SecKeychainDelete.argtypes = [SecKeychainRef] Security.SecKeychainDelete.restype = OSStatus Security.SecPKCS12Import.argtypes = [ CFDataRef, CFDictionaryRef, - POINTER(CFArrayRef) + POINTER(CFArrayRef), ] Security.SecPKCS12Import.restype = OSStatus SSLReadFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, c_void_p, POINTER(c_size_t)) - SSLWriteFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t)) + SSLWriteFunc = CFUNCTYPE( + OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t) + ) - Security.SSLSetIOFuncs.argtypes = [ - SSLContextRef, - SSLReadFunc, - SSLWriteFunc - ] + Security.SSLSetIOFuncs.argtypes = [SSLContextRef, SSLReadFunc, SSLWriteFunc] Security.SSLSetIOFuncs.restype = OSStatus - Security.SSLSetPeerID.argtypes = [ - SSLContextRef, - c_char_p, - c_size_t - ] + Security.SSLSetPeerID.argtypes = [SSLContextRef, c_char_p, c_size_t] Security.SSLSetPeerID.restype = OSStatus - Security.SSLSetCertificate.argtypes = [ - SSLContextRef, - CFArrayRef - ] + Security.SSLSetCertificate.argtypes = [SSLContextRef, CFArrayRef] Security.SSLSetCertificate.restype = OSStatus - Security.SSLSetCertificateAuthorities.argtypes = [ - SSLContextRef, - CFTypeRef, - Boolean - ] + Security.SSLSetCertificateAuthorities.argtypes = [SSLContextRef, CFTypeRef, Boolean] Security.SSLSetCertificateAuthorities.restype = OSStatus - Security.SSLSetConnection.argtypes = [ - SSLContextRef, - SSLConnectionRef - ] + Security.SSLSetConnection.argtypes = [SSLContextRef, SSLConnectionRef] Security.SSLSetConnection.restype = OSStatus - Security.SSLSetPeerDomainName.argtypes = [ - SSLContextRef, - c_char_p, - c_size_t - ] + Security.SSLSetPeerDomainName.argtypes = [SSLContextRef, c_char_p, c_size_t] Security.SSLSetPeerDomainName.restype = OSStatus - Security.SSLHandshake.argtypes = [ - SSLContextRef - ] + Security.SSLHandshake.argtypes = [SSLContextRef] Security.SSLHandshake.restype = OSStatus - Security.SSLRead.argtypes = [ - SSLContextRef, - c_char_p, - c_size_t, - POINTER(c_size_t) - ] + Security.SSLRead.argtypes = [SSLContextRef, c_char_p, c_size_t, POINTER(c_size_t)] Security.SSLRead.restype = OSStatus - Security.SSLWrite.argtypes = [ - SSLContextRef, - c_char_p, - c_size_t, - POINTER(c_size_t) - ] + Security.SSLWrite.argtypes = [SSLContextRef, c_char_p, c_size_t, POINTER(c_size_t)] Security.SSLWrite.restype = OSStatus - Security.SSLClose.argtypes = [ - SSLContextRef - ] + Security.SSLClose.argtypes = [SSLContextRef] Security.SSLClose.restype = OSStatus - Security.SSLGetNumberSupportedCiphers.argtypes = [ - SSLContextRef, - POINTER(c_size_t) - ] + Security.SSLGetNumberSupportedCiphers.argtypes = [SSLContextRef, POINTER(c_size_t)] Security.SSLGetNumberSupportedCiphers.restype = OSStatus Security.SSLGetSupportedCiphers.argtypes = [ SSLContextRef, POINTER(SSLCipherSuite), - POINTER(c_size_t) + POINTER(c_size_t), ] Security.SSLGetSupportedCiphers.restype = OSStatus Security.SSLSetEnabledCiphers.argtypes = [ SSLContextRef, POINTER(SSLCipherSuite), - c_size_t + c_size_t, ] Security.SSLSetEnabledCiphers.restype = OSStatus - Security.SSLGetNumberEnabledCiphers.argtype = [ - SSLContextRef, - POINTER(c_size_t) - ] + Security.SSLGetNumberEnabledCiphers.argtype = [SSLContextRef, POINTER(c_size_t)] Security.SSLGetNumberEnabledCiphers.restype = OSStatus Security.SSLGetEnabledCiphers.argtypes = [ SSLContextRef, POINTER(SSLCipherSuite), - POINTER(c_size_t) + POINTER(c_size_t), ] Security.SSLGetEnabledCiphers.restype = OSStatus - Security.SSLGetNegotiatedCipher.argtypes = [ - SSLContextRef, - POINTER(SSLCipherSuite) - ] + Security.SSLGetNegotiatedCipher.argtypes = [SSLContextRef, POINTER(SSLCipherSuite)] Security.SSLGetNegotiatedCipher.restype = OSStatus Security.SSLGetNegotiatedProtocolVersion.argtypes = [ SSLContextRef, - POINTER(SSLProtocol) + POINTER(SSLProtocol), ] Security.SSLGetNegotiatedProtocolVersion.restype = OSStatus - Security.SSLCopyPeerTrust.argtypes = [ - SSLContextRef, - POINTER(SecTrustRef) - ] + Security.SSLCopyPeerTrust.argtypes = [SSLContextRef, POINTER(SecTrustRef)] Security.SSLCopyPeerTrust.restype = OSStatus - Security.SecTrustSetAnchorCertificates.argtypes = [ - SecTrustRef, - CFArrayRef - ] + Security.SecTrustSetAnchorCertificates.argtypes = [SecTrustRef, CFArrayRef] Security.SecTrustSetAnchorCertificates.restype = OSStatus - Security.SecTrustSetAnchorCertificatesOnly.argstypes = [ - SecTrustRef, - Boolean - ] + Security.SecTrustSetAnchorCertificatesOnly.argstypes = [SecTrustRef, Boolean] Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus - Security.SecTrustEvaluate.argtypes = [ - SecTrustRef, - POINTER(SecTrustResultType) - ] + Security.SecTrustEvaluate.argtypes = [SecTrustRef, POINTER(SecTrustResultType)] Security.SecTrustEvaluate.restype = OSStatus - Security.SecTrustGetCertificateCount.argtypes = [ - SecTrustRef - ] + Security.SecTrustGetCertificateCount.argtypes = [SecTrustRef] Security.SecTrustGetCertificateCount.restype = CFIndex - Security.SecTrustGetCertificateAtIndex.argtypes = [ - SecTrustRef, - CFIndex - ] + Security.SecTrustGetCertificateAtIndex.argtypes = [SecTrustRef, CFIndex] Security.SecTrustGetCertificateAtIndex.restype = SecCertificateRef Security.SSLCreateContext.argtypes = [ CFAllocatorRef, SSLProtocolSide, - SSLConnectionType + SSLConnectionType, ] Security.SSLCreateContext.restype = SSLContextRef - Security.SSLSetSessionOption.argtypes = [ - SSLContextRef, - SSLSessionOption, - Boolean - ] + Security.SSLSetSessionOption.argtypes = [SSLContextRef, SSLSessionOption, Boolean] Security.SSLSetSessionOption.restype = OSStatus - Security.SSLSetProtocolVersionMin.argtypes = [ - SSLContextRef, - SSLProtocol - ] + Security.SSLSetProtocolVersionMin.argtypes = [SSLContextRef, SSLProtocol] Security.SSLSetProtocolVersionMin.restype = OSStatus - Security.SSLSetProtocolVersionMax.argtypes = [ - SSLContextRef, - SSLProtocol - ] + Security.SSLSetProtocolVersionMax.argtypes = [SSLContextRef, SSLProtocol] Security.SSLSetProtocolVersionMax.restype = OSStatus - Security.SecCopyErrorMessageString.argtypes = [ - OSStatus, - c_void_p - ] + Security.SecCopyErrorMessageString.argtypes = [OSStatus, c_void_p] Security.SecCopyErrorMessageString.restype = CFStringRef Security.SSLReadFunc = SSLReadFunc @@ -369,64 +292,47 @@ Security.OSStatus = OSStatus Security.kSecImportExportPassphrase = CFStringRef.in_dll( - Security, 'kSecImportExportPassphrase' + Security, "kSecImportExportPassphrase" ) Security.kSecImportItemIdentity = CFStringRef.in_dll( - Security, 'kSecImportItemIdentity' + Security, "kSecImportItemIdentity" ) # CoreFoundation time! - CoreFoundation.CFRetain.argtypes = [ - CFTypeRef - ] + CoreFoundation.CFRetain.argtypes = [CFTypeRef] CoreFoundation.CFRetain.restype = CFTypeRef - CoreFoundation.CFRelease.argtypes = [ - CFTypeRef - ] + CoreFoundation.CFRelease.argtypes = [CFTypeRef] CoreFoundation.CFRelease.restype = None - CoreFoundation.CFGetTypeID.argtypes = [ - CFTypeRef - ] + CoreFoundation.CFGetTypeID.argtypes = [CFTypeRef] CoreFoundation.CFGetTypeID.restype = CFTypeID CoreFoundation.CFStringCreateWithCString.argtypes = [ CFAllocatorRef, c_char_p, - CFStringEncoding + CFStringEncoding, ] CoreFoundation.CFStringCreateWithCString.restype = CFStringRef - CoreFoundation.CFStringGetCStringPtr.argtypes = [ - CFStringRef, - CFStringEncoding - ] + CoreFoundation.CFStringGetCStringPtr.argtypes = [CFStringRef, CFStringEncoding] CoreFoundation.CFStringGetCStringPtr.restype = c_char_p CoreFoundation.CFStringGetCString.argtypes = [ CFStringRef, c_char_p, CFIndex, - CFStringEncoding + CFStringEncoding, ] CoreFoundation.CFStringGetCString.restype = c_bool - CoreFoundation.CFDataCreate.argtypes = [ - CFAllocatorRef, - c_char_p, - CFIndex - ] + CoreFoundation.CFDataCreate.argtypes = [CFAllocatorRef, c_char_p, CFIndex] CoreFoundation.CFDataCreate.restype = CFDataRef - CoreFoundation.CFDataGetLength.argtypes = [ - CFDataRef - ] + CoreFoundation.CFDataGetLength.argtypes = [CFDataRef] CoreFoundation.CFDataGetLength.restype = CFIndex - CoreFoundation.CFDataGetBytePtr.argtypes = [ - CFDataRef - ] + CoreFoundation.CFDataGetBytePtr.argtypes = [CFDataRef] CoreFoundation.CFDataGetBytePtr.restype = c_void_p CoreFoundation.CFDictionaryCreate.argtypes = [ @@ -435,14 +341,11 @@ POINTER(CFTypeRef), CFIndex, CFDictionaryKeyCallBacks, - CFDictionaryValueCallBacks + CFDictionaryValueCallBacks, ] CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef - CoreFoundation.CFDictionaryGetValue.argtypes = [ - CFDictionaryRef, - CFTypeRef - ] + CoreFoundation.CFDictionaryGetValue.argtypes = [CFDictionaryRef, CFTypeRef] CoreFoundation.CFDictionaryGetValue.restype = CFTypeRef CoreFoundation.CFArrayCreate.argtypes = [ @@ -456,36 +359,30 @@ CoreFoundation.CFArrayCreateMutable.argtypes = [ CFAllocatorRef, CFIndex, - CFArrayCallBacks + CFArrayCallBacks, ] CoreFoundation.CFArrayCreateMutable.restype = CFMutableArrayRef - CoreFoundation.CFArrayAppendValue.argtypes = [ - CFMutableArrayRef, - c_void_p - ] + CoreFoundation.CFArrayAppendValue.argtypes = [CFMutableArrayRef, c_void_p] CoreFoundation.CFArrayAppendValue.restype = None - CoreFoundation.CFArrayGetCount.argtypes = [ - CFArrayRef - ] + CoreFoundation.CFArrayGetCount.argtypes = [CFArrayRef] CoreFoundation.CFArrayGetCount.restype = CFIndex - CoreFoundation.CFArrayGetValueAtIndex.argtypes = [ - CFArrayRef, - CFIndex - ] + CoreFoundation.CFArrayGetValueAtIndex.argtypes = [CFArrayRef, CFIndex] CoreFoundation.CFArrayGetValueAtIndex.restype = c_void_p CoreFoundation.kCFAllocatorDefault = CFAllocatorRef.in_dll( - CoreFoundation, 'kCFAllocatorDefault' + CoreFoundation, "kCFAllocatorDefault" + ) + CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll( + CoreFoundation, "kCFTypeArrayCallBacks" ) - CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll(CoreFoundation, 'kCFTypeArrayCallBacks') CoreFoundation.kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll( - CoreFoundation, 'kCFTypeDictionaryKeyCallBacks' + CoreFoundation, "kCFTypeDictionaryKeyCallBacks" ) CoreFoundation.kCFTypeDictionaryValueCallBacks = c_void_p.in_dll( - CoreFoundation, 'kCFTypeDictionaryValueCallBacks' + CoreFoundation, "kCFTypeDictionaryValueCallBacks" ) CoreFoundation.CFTypeRef = CFTypeRef @@ -494,7 +391,7 @@ CoreFoundation.CFDictionaryRef = CFDictionaryRef except (AttributeError): - raise ImportError('Error initializing ctypes') + raise ImportError("Error initializing ctypes") class CFConst(object): @@ -502,6 +399,7 @@ class CFConst(object): A class object that acts as essentially a namespace for CoreFoundation constants. """ + kCFStringEncodingUTF8 = CFStringEncoding(0x08000100) @@ -509,6 +407,7 @@ class SecurityConst(object): """ A class object that acts as essentially a namespace for Security constants. """ + kSSLSessionOptionBreakOnServerAuth = 0 kSSLProtocol2 = 1 @@ -516,6 +415,8 @@ class SecurityConst(object): kTLSProtocol1 = 4 kTLSProtocol11 = 7 kTLSProtocol12 = 8 + kTLSProtocol13 = 10 + kTLSProtocolMaxSupported = 999 kSSLClientSide = 1 kSSLStreamType = 0 @@ -558,30 +459,27 @@ class SecurityConst(object): errSecInvalidTrustSettings = -25262 # Cipher suites. We only pick the ones our default cipher string allows. + # Source: https://developer.apple.com/documentation/security/1550981-ssl_cipher_suite_values TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030 TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F - TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 = 0x00A3 + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA9 + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA8 TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F - TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 = 0x00A2 TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024 TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028 TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014 TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B - TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = 0x006A TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039 - TLS_DHE_DSS_WITH_AES_256_CBC_SHA = 0x0038 TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023 TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027 TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009 TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013 TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067 - TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = 0x0040 TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033 - TLS_DHE_DSS_WITH_AES_128_CBC_SHA = 0x0032 TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D @@ -590,4 +488,5 @@ class SecurityConst(object): TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F TLS_AES_128_GCM_SHA256 = 0x1301 TLS_AES_256_GCM_SHA384 = 0x1302 - TLS_CHACHA20_POLY1305_SHA256 = 0x1303 + TLS_AES_128_CCM_8_SHA256 = 0x1305 + TLS_AES_128_CCM_SHA256 = 0x1304 diff --git a/pipenv/patched/notpip/_vendor/urllib3/contrib/_securetransport/low_level.py b/pipenv/patched/notpip/_vendor/urllib3/contrib/_securetransport/low_level.py index b13cd9e72c..e60168cac1 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/contrib/_securetransport/low_level.py +++ b/pipenv/patched/notpip/_vendor/urllib3/contrib/_securetransport/low_level.py @@ -66,22 +66,18 @@ def _cf_string_to_unicode(value): value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p)) string = CoreFoundation.CFStringGetCStringPtr( - value_as_void_p, - CFConst.kCFStringEncodingUTF8 + value_as_void_p, CFConst.kCFStringEncodingUTF8 ) if string is None: buffer = ctypes.create_string_buffer(1024) result = CoreFoundation.CFStringGetCString( - value_as_void_p, - buffer, - 1024, - CFConst.kCFStringEncodingUTF8 + value_as_void_p, buffer, 1024, CFConst.kCFStringEncodingUTF8 ) if not result: - raise OSError('Error copying C string from CFStringRef') + raise OSError("Error copying C string from CFStringRef") string = buffer.value if string is not None: - string = string.decode('utf-8') + string = string.decode("utf-8") return string @@ -97,8 +93,8 @@ def _assert_no_error(error, exception_class=None): output = _cf_string_to_unicode(cf_error_string) CoreFoundation.CFRelease(cf_error_string) - if output is None or output == u'': - output = u'OSStatus %s' % error + if output is None or output == u"": + output = u"OSStatus %s" % error if exception_class is None: exception_class = ssl.SSLError @@ -115,8 +111,7 @@ def _cert_array_from_pem(pem_bundle): pem_bundle = pem_bundle.replace(b"\r\n", b"\n") der_certs = [ - base64.b64decode(match.group(1)) - for match in _PEM_CERTS_RE.finditer(pem_bundle) + base64.b64decode(match.group(1)) for match in _PEM_CERTS_RE.finditer(pem_bundle) ] if not der_certs: raise ssl.SSLError("No root certificates specified") @@ -124,7 +119,7 @@ def _cert_array_from_pem(pem_bundle): cert_array = CoreFoundation.CFArrayCreateMutable( CoreFoundation.kCFAllocatorDefault, 0, - ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks) + ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks), ) if not cert_array: raise ssl.SSLError("Unable to allocate memory!") @@ -186,21 +181,16 @@ def _temporary_keychain(): # some random bytes to password-protect the keychain we're creating, so we # ask for 40 random bytes. random_bytes = os.urandom(40) - filename = base64.b16encode(random_bytes[:8]).decode('utf-8') + filename = base64.b16encode(random_bytes[:8]).decode("utf-8") password = base64.b16encode(random_bytes[8:]) # Must be valid UTF-8 tempdirectory = tempfile.mkdtemp() - keychain_path = os.path.join(tempdirectory, filename).encode('utf-8') + keychain_path = os.path.join(tempdirectory, filename).encode("utf-8") # We now want to create the keychain itself. keychain = Security.SecKeychainRef() status = Security.SecKeychainCreate( - keychain_path, - len(password), - password, - False, - None, - ctypes.byref(keychain) + keychain_path, len(password), password, False, None, ctypes.byref(keychain) ) _assert_no_error(status) @@ -219,14 +209,12 @@ def _load_items_from_file(keychain, path): identities = [] result_array = None - with open(path, 'rb') as f: + with open(path, "rb") as f: raw_filedata = f.read() try: filedata = CoreFoundation.CFDataCreate( - CoreFoundation.kCFAllocatorDefault, - raw_filedata, - len(raw_filedata) + CoreFoundation.kCFAllocatorDefault, raw_filedata, len(raw_filedata) ) result_array = CoreFoundation.CFArrayRef() result = Security.SecItemImport( @@ -237,7 +225,7 @@ def _load_items_from_file(keychain, path): 0, # import flags None, # key params, can include passphrase in the future keychain, # The keychain to insert into - ctypes.byref(result_array) # Results + ctypes.byref(result_array), # Results ) _assert_no_error(result) @@ -247,9 +235,7 @@ def _load_items_from_file(keychain, path): # keychain already has them! result_count = CoreFoundation.CFArrayGetCount(result_array) for index in range(result_count): - item = CoreFoundation.CFArrayGetValueAtIndex( - result_array, index - ) + item = CoreFoundation.CFArrayGetValueAtIndex(result_array, index) item = ctypes.cast(item, CoreFoundation.CFTypeRef) if _is_cert(item): @@ -307,9 +293,7 @@ def _load_client_cert_chain(keychain, *paths): try: for file_path in paths: - new_identities, new_certs = _load_items_from_file( - keychain, file_path - ) + new_identities, new_certs = _load_items_from_file(keychain, file_path) identities.extend(new_identities) certificates.extend(new_certs) @@ -318,9 +302,7 @@ def _load_client_cert_chain(keychain, *paths): if not identities: new_identity = Security.SecIdentityRef() status = Security.SecIdentityCreateWithCertificate( - keychain, - certificates[0], - ctypes.byref(new_identity) + keychain, certificates[0], ctypes.byref(new_identity) ) _assert_no_error(status) identities.append(new_identity) diff --git a/pipenv/patched/notpip/_vendor/urllib3/contrib/appengine.py b/pipenv/patched/notpip/_vendor/urllib3/contrib/appengine.py index 1c2332cb71..d8716b9f9e 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/contrib/appengine.py +++ b/pipenv/patched/notpip/_vendor/urllib3/contrib/appengine.py @@ -50,7 +50,7 @@ MaxRetryError, ProtocolError, TimeoutError, - SSLError + SSLError, ) from ..request import RequestMethods @@ -96,23 +96,31 @@ class AppEngineManager(RequestMethods): Beyond those cases, it will raise normal urllib3 errors. """ - def __init__(self, headers=None, retries=None, validate_certificate=True, - urlfetch_retries=True): + def __init__( + self, + headers=None, + retries=None, + validate_certificate=True, + urlfetch_retries=True, + ): if not urlfetch: raise AppEnginePlatformError( - "URLFetch is not available in this environment.") + "URLFetch is not available in this environment." + ) if is_prod_appengine_mvms(): raise AppEnginePlatformError( "Use normal urllib3.PoolManager instead of AppEngineManager" "on Managed VMs, as using URLFetch is not necessary in " - "this environment.") + "this environment." + ) warnings.warn( "urllib3 is using URLFetch on Google App Engine sandbox instead " "of sockets. To use sockets directly instead of URLFetch see " "https://urllib3.readthedocs.io/en/latest/reference/urllib3.contrib.html.", - AppEnginePlatformWarning) + AppEnginePlatformWarning, + ) RequestMethods.__init__(self, headers) self.validate_certificate = validate_certificate @@ -127,17 +135,22 @@ def __exit__(self, exc_type, exc_val, exc_tb): # Return False to re-raise any potential exceptions return False - def urlopen(self, method, url, body=None, headers=None, - retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT, - **response_kw): + def urlopen( + self, + method, + url, + body=None, + headers=None, + retries=None, + redirect=True, + timeout=Timeout.DEFAULT_TIMEOUT, + **response_kw + ): retries = self._get_retries(retries, redirect) try: - follow_redirects = ( - redirect and - retries.redirect != 0 and - retries.total) + follow_redirects = redirect and retries.redirect != 0 and retries.total response = urlfetch.fetch( url, payload=body, @@ -152,44 +165,52 @@ def urlopen(self, method, url, body=None, headers=None, raise TimeoutError(self, e) except urlfetch.InvalidURLError as e: - if 'too large' in str(e): + if "too large" in str(e): raise AppEnginePlatformError( "URLFetch request too large, URLFetch only " - "supports requests up to 10mb in size.", e) + "supports requests up to 10mb in size.", + e, + ) raise ProtocolError(e) except urlfetch.DownloadError as e: - if 'Too many redirects' in str(e): + if "Too many redirects" in str(e): raise MaxRetryError(self, url, reason=e) raise ProtocolError(e) except urlfetch.ResponseTooLargeError as e: raise AppEnginePlatformError( "URLFetch response too large, URLFetch only supports" - "responses up to 32mb in size.", e) + "responses up to 32mb in size.", + e, + ) except urlfetch.SSLCertificateError as e: raise SSLError(e) except urlfetch.InvalidMethodError as e: raise AppEnginePlatformError( - "URLFetch does not support method: %s" % method, e) + "URLFetch does not support method: %s" % method, e + ) http_response = self._urlfetch_response_to_http_response( - response, retries=retries, **response_kw) + response, retries=retries, **response_kw + ) # Handle redirect? redirect_location = redirect and http_response.get_redirect_location() if redirect_location: # Check for redirect response - if (self.urlfetch_retries and retries.raise_on_redirect): + if self.urlfetch_retries and retries.raise_on_redirect: raise MaxRetryError(self, url, "too many redirects") else: if http_response.status == 303: - method = 'GET' + method = "GET" try: - retries = retries.increment(method, url, response=http_response, _pool=self) + retries = retries.increment( + method, url, response=http_response, _pool=self + ) except MaxRetryError: if retries.raise_on_redirect: raise MaxRetryError(self, url, "too many redirects") @@ -199,22 +220,32 @@ def urlopen(self, method, url, body=None, headers=None, log.debug("Redirecting %s -> %s", url, redirect_location) redirect_url = urljoin(url, redirect_location) return self.urlopen( - method, redirect_url, body, headers, - retries=retries, redirect=redirect, - timeout=timeout, **response_kw) + method, + redirect_url, + body, + headers, + retries=retries, + redirect=redirect, + timeout=timeout, + **response_kw + ) # Check if we should retry the HTTP response. - has_retry_after = bool(http_response.getheader('Retry-After')) + has_retry_after = bool(http_response.getheader("Retry-After")) if retries.is_retry(method, http_response.status, has_retry_after): - retries = retries.increment( - method, url, response=http_response, _pool=self) + retries = retries.increment(method, url, response=http_response, _pool=self) log.debug("Retry: %s", url) retries.sleep(http_response) return self.urlopen( - method, url, - body=body, headers=headers, - retries=retries, redirect=redirect, - timeout=timeout, **response_kw) + method, + url, + body=body, + headers=headers, + retries=retries, + redirect=redirect, + timeout=timeout, + **response_kw + ) return http_response @@ -223,18 +254,18 @@ def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw): if is_prod_appengine(): # Production GAE handles deflate encoding automatically, but does # not remove the encoding header. - content_encoding = urlfetch_resp.headers.get('content-encoding') + content_encoding = urlfetch_resp.headers.get("content-encoding") - if content_encoding == 'deflate': - del urlfetch_resp.headers['content-encoding'] + if content_encoding == "deflate": + del urlfetch_resp.headers["content-encoding"] - transfer_encoding = urlfetch_resp.headers.get('transfer-encoding') + transfer_encoding = urlfetch_resp.headers.get("transfer-encoding") # We have a full response's content, # so let's make sure we don't report ourselves as chunked data. - if transfer_encoding == 'chunked': + if transfer_encoding == "chunked": encodings = transfer_encoding.split(",") - encodings.remove('chunked') - urlfetch_resp.headers['transfer-encoding'] = ','.join(encodings) + encodings.remove("chunked") + urlfetch_resp.headers["transfer-encoding"] = ",".join(encodings) original_response = HTTPResponse( # In order for decoding to work, we must present the content as @@ -262,20 +293,21 @@ def _get_absolute_timeout(self, timeout): warnings.warn( "URLFetch does not support granular timeout settings, " "reverting to total or default URLFetch timeout.", - AppEnginePlatformWarning) + AppEnginePlatformWarning, + ) return timeout.total return timeout def _get_retries(self, retries, redirect): if not isinstance(retries, Retry): - retries = Retry.from_int( - retries, redirect=redirect, default=self.retries) + retries = Retry.from_int(retries, redirect=redirect, default=self.retries) if retries.connect or retries.read or retries.redirect: warnings.warn( "URLFetch only supports total retries and does not " "recognize connect, read, or redirect retry parameters.", - AppEnginePlatformWarning) + AppEnginePlatformWarning, + ) return retries diff --git a/pipenv/patched/notpip/_vendor/urllib3/contrib/ntlmpool.py b/pipenv/patched/notpip/_vendor/urllib3/contrib/ntlmpool.py index 8ea127c583..9c96be29d8 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/contrib/ntlmpool.py +++ b/pipenv/patched/notpip/_vendor/urllib3/contrib/ntlmpool.py @@ -20,7 +20,7 @@ class NTLMConnectionPool(HTTPSConnectionPool): Implements an NTLM authentication version of an urllib3 connection pool """ - scheme = 'https' + scheme = "https" def __init__(self, user, pw, authurl, *args, **kwargs): """ @@ -31,7 +31,7 @@ def __init__(self, user, pw, authurl, *args, **kwargs): super(NTLMConnectionPool, self).__init__(*args, **kwargs) self.authurl = authurl self.rawuser = user - user_parts = user.split('\\', 1) + user_parts = user.split("\\", 1) self.domain = user_parts[0].upper() self.user = user_parts[1] self.pw = pw @@ -40,72 +40,84 @@ def _new_conn(self): # Performs the NTLM handshake that secures the connection. The socket # must be kept open while requests are performed. self.num_connections += 1 - log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s', - self.num_connections, self.host, self.authurl) + log.debug( + "Starting NTLM HTTPS connection no. %d: https://%s%s", + self.num_connections, + self.host, + self.authurl, + ) - headers = {'Connection': 'Keep-Alive'} - req_header = 'Authorization' - resp_header = 'www-authenticate' + headers = {"Connection": "Keep-Alive"} + req_header = "Authorization" + resp_header = "www-authenticate" conn = HTTPSConnection(host=self.host, port=self.port) # Send negotiation message - headers[req_header] = ( - 'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser)) - log.debug('Request headers: %s', headers) - conn.request('GET', self.authurl, None, headers) + headers[req_header] = "NTLM %s" % ntlm.create_NTLM_NEGOTIATE_MESSAGE( + self.rawuser + ) + log.debug("Request headers: %s", headers) + conn.request("GET", self.authurl, None, headers) res = conn.getresponse() reshdr = dict(res.getheaders()) - log.debug('Response status: %s %s', res.status, res.reason) - log.debug('Response headers: %s', reshdr) - log.debug('Response data: %s [...]', res.read(100)) + log.debug("Response status: %s %s", res.status, res.reason) + log.debug("Response headers: %s", reshdr) + log.debug("Response data: %s [...]", res.read(100)) # Remove the reference to the socket, so that it can not be closed by # the response object (we want to keep the socket open) res.fp = None # Server should respond with a challenge message - auth_header_values = reshdr[resp_header].split(', ') + auth_header_values = reshdr[resp_header].split(", ") auth_header_value = None for s in auth_header_values: - if s[:5] == 'NTLM ': + if s[:5] == "NTLM ": auth_header_value = s[5:] if auth_header_value is None: - raise Exception('Unexpected %s response header: %s' % - (resp_header, reshdr[resp_header])) + raise Exception( + "Unexpected %s response header: %s" % (resp_header, reshdr[resp_header]) + ) # Send authentication message - ServerChallenge, NegotiateFlags = \ - ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value) - auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge, - self.user, - self.domain, - self.pw, - NegotiateFlags) - headers[req_header] = 'NTLM %s' % auth_msg - log.debug('Request headers: %s', headers) - conn.request('GET', self.authurl, None, headers) + ServerChallenge, NegotiateFlags = ntlm.parse_NTLM_CHALLENGE_MESSAGE( + auth_header_value + ) + auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE( + ServerChallenge, self.user, self.domain, self.pw, NegotiateFlags + ) + headers[req_header] = "NTLM %s" % auth_msg + log.debug("Request headers: %s", headers) + conn.request("GET", self.authurl, None, headers) res = conn.getresponse() - log.debug('Response status: %s %s', res.status, res.reason) - log.debug('Response headers: %s', dict(res.getheaders())) - log.debug('Response data: %s [...]', res.read()[:100]) + log.debug("Response status: %s %s", res.status, res.reason) + log.debug("Response headers: %s", dict(res.getheaders())) + log.debug("Response data: %s [...]", res.read()[:100]) if res.status != 200: if res.status == 401: - raise Exception('Server rejected request: wrong ' - 'username or password') - raise Exception('Wrong server response: %s %s' % - (res.status, res.reason)) + raise Exception( + "Server rejected request: wrong " "username or password" + ) + raise Exception("Wrong server response: %s %s" % (res.status, res.reason)) res.fp = None - log.debug('Connection established') + log.debug("Connection established") return conn - def urlopen(self, method, url, body=None, headers=None, retries=3, - redirect=True, assert_same_host=True): + def urlopen( + self, + method, + url, + body=None, + headers=None, + retries=3, + redirect=True, + assert_same_host=True, + ): if headers is None: headers = {} - headers['Connection'] = 'Keep-Alive' - return super(NTLMConnectionPool, self).urlopen(method, url, body, - headers, retries, - redirect, - assert_same_host) + headers["Connection"] = "Keep-Alive" + return super(NTLMConnectionPool, self).urlopen( + method, url, body, headers, retries, redirect, assert_same_host + ) diff --git a/pipenv/patched/notpip/_vendor/urllib3/contrib/pyopenssl.py b/pipenv/patched/notpip/_vendor/urllib3/contrib/pyopenssl.py index f5bc7d83ea..e533512d51 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/contrib/pyopenssl.py +++ b/pipenv/patched/notpip/_vendor/urllib3/contrib/pyopenssl.py @@ -47,6 +47,7 @@ from cryptography import x509 from cryptography.hazmat.backends.openssl import backend as openssl_backend from cryptography.hazmat.backends.openssl.x509 import _Certificate + try: from cryptography.x509 import UnsupportedExtension except ImportError: @@ -54,6 +55,7 @@ class UnsupportedExtension(Exception): pass + from socket import timeout, error as SocketError from io import BytesIO @@ -70,37 +72,35 @@ class UnsupportedExtension(Exception): from .. import util -__all__ = ['inject_into_urllib3', 'extract_from_urllib3'] + +__all__ = ["inject_into_urllib3", "extract_from_urllib3"] # SNI always works. HAS_SNI = True # Map from urllib3 to PyOpenSSL compatible parameter-values. _openssl_versions = { - ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD, + util.PROTOCOL_TLS: OpenSSL.SSL.SSLv23_METHOD, ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD, } -if hasattr(ssl, 'PROTOCOL_TLSv1_1') and hasattr(OpenSSL.SSL, 'TLSv1_1_METHOD'): +if hasattr(ssl, "PROTOCOL_SSLv3") and hasattr(OpenSSL.SSL, "SSLv3_METHOD"): + _openssl_versions[ssl.PROTOCOL_SSLv3] = OpenSSL.SSL.SSLv3_METHOD + +if hasattr(ssl, "PROTOCOL_TLSv1_1") and hasattr(OpenSSL.SSL, "TLSv1_1_METHOD"): _openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD -if hasattr(ssl, 'PROTOCOL_TLSv1_2') and hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD'): +if hasattr(ssl, "PROTOCOL_TLSv1_2") and hasattr(OpenSSL.SSL, "TLSv1_2_METHOD"): _openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD -try: - _openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD}) -except AttributeError: - pass _stdlib_to_openssl_verify = { ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE, ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER, - ssl.CERT_REQUIRED: - OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT, + ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER + + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT, } -_openssl_to_stdlib_verify = dict( - (v, k) for k, v in _stdlib_to_openssl_verify.items() -) +_openssl_to_stdlib_verify = dict((v, k) for k, v in _stdlib_to_openssl_verify.items()) # OpenSSL will only write 16K at a time SSL_WRITE_BLOCKSIZE = 16384 @@ -113,10 +113,11 @@ class UnsupportedExtension(Exception): def inject_into_urllib3(): - 'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.' + "Monkey-patch urllib3 with PyOpenSSL-backed SSL-support." _validate_dependencies_met() + util.SSLContext = PyOpenSSLContext util.ssl_.SSLContext = PyOpenSSLContext util.HAS_SNI = HAS_SNI util.ssl_.HAS_SNI = HAS_SNI @@ -125,8 +126,9 @@ def inject_into_urllib3(): def extract_from_urllib3(): - 'Undo monkey-patching by :func:`inject_into_urllib3`.' + "Undo monkey-patching by :func:`inject_into_urllib3`." + util.SSLContext = orig_util_SSLContext util.ssl_.SSLContext = orig_util_SSLContext util.HAS_SNI = orig_util_HAS_SNI util.ssl_.HAS_SNI = orig_util_HAS_SNI @@ -141,17 +143,23 @@ def _validate_dependencies_met(): """ # Method added in `cryptography==1.1`; not available in older versions from cryptography.x509.extensions import Extensions + if getattr(Extensions, "get_extension_for_class", None) is None: - raise ImportError("'cryptography' module missing required functionality. " - "Try upgrading to v1.3.4 or newer.") + raise ImportError( + "'cryptography' module missing required functionality. " + "Try upgrading to v1.3.4 or newer." + ) # pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509 # attribute is only present on those versions. from OpenSSL.crypto import X509 + x509 = X509() if getattr(x509, "_x509", None) is None: - raise ImportError("'pyOpenSSL' module missing required functionality. " - "Try upgrading to v0.14 or newer.") + raise ImportError( + "'pyOpenSSL' module missing required functionality. " + "Try upgrading to v0.14 or newer." + ) def _dnsname_to_stdlib(name): @@ -167,6 +175,7 @@ def _dnsname_to_stdlib(name): If the name cannot be idna-encoded then we return None signalling that the name given should be skipped. """ + def idna_encode(name): """ Borrowed wholesale from the Python Cryptography Project. It turns out @@ -176,19 +185,23 @@ def idna_encode(name): from pipenv.patched.notpip._vendor import idna try: - for prefix in [u'*.', u'.']: + for prefix in [u"*.", u"."]: if name.startswith(prefix): - name = name[len(prefix):] - return prefix.encode('ascii') + idna.encode(name) + name = name[len(prefix) :] + return prefix.encode("ascii") + idna.encode(name) return idna.encode(name) except idna.core.IDNAError: return None + # Don't send IPv6 addresses through the IDNA encoder. + if ":" in name: + return name + name = idna_encode(name) if name is None: return None elif sys.version_info >= (3, 0): - name = name.decode('utf-8') + name = name.decode("utf-8") return name @@ -207,14 +220,16 @@ def get_subj_alt_name(peer_cert): # We want to find the SAN extension. Ask Cryptography to locate it (it's # faster than looping in Python) try: - ext = cert.extensions.get_extension_for_class( - x509.SubjectAlternativeName - ).value + ext = cert.extensions.get_extension_for_class(x509.SubjectAlternativeName).value except x509.ExtensionNotFound: # No such extension, return the empty list. return [] - except (x509.DuplicateExtension, UnsupportedExtension, - x509.UnsupportedGeneralNameType, UnicodeError) as e: + except ( + x509.DuplicateExtension, + UnsupportedExtension, + x509.UnsupportedGeneralNameType, + UnicodeError, + ) as e: # A problem has been found with the quality of the certificate. Assume # no SAN field is present. log.warning( @@ -233,23 +248,23 @@ def get_subj_alt_name(peer_cert): # does with certificates, and so we need to attempt to do the same. # We also want to skip over names which cannot be idna encoded. names = [ - ('DNS', name) for name in map(_dnsname_to_stdlib, ext.get_values_for_type(x509.DNSName)) + ("DNS", name) + for name in map(_dnsname_to_stdlib, ext.get_values_for_type(x509.DNSName)) if name is not None ] names.extend( - ('IP Address', str(name)) - for name in ext.get_values_for_type(x509.IPAddress) + ("IP Address", str(name)) for name in ext.get_values_for_type(x509.IPAddress) ) return names class WrappedSocket(object): - '''API-compatibility wrapper for Python OpenSSL's Connection-class. + """API-compatibility wrapper for Python OpenSSL's Connection-class. Note: _makefile_refs, _drop() and _reuse() are needed for the garbage collector of pypy. - ''' + """ def __init__(self, connection, socket, suppress_ragged_eofs=True): self.connection = connection @@ -272,20 +287,24 @@ def recv(self, *args, **kwargs): try: data = self.connection.recv(*args, **kwargs) except OpenSSL.SSL.SysCallError as e: - if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'): - return b'' + if self.suppress_ragged_eofs and e.args == (-1, "Unexpected EOF"): + return b"" else: raise SocketError(str(e)) - except OpenSSL.SSL.ZeroReturnError as e: + except OpenSSL.SSL.ZeroReturnError: if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN: - return b'' + return b"" else: raise except OpenSSL.SSL.WantReadError: if not util.wait_for_read(self.socket, self.socket.gettimeout()): - raise timeout('The read operation timed out') + raise timeout("The read operation timed out") else: return self.recv(*args, **kwargs) + + # TLS 1.3 post-handshake authentication + except OpenSSL.SSL.Error as e: + raise ssl.SSLError("read error: %r" % e) else: return data @@ -293,21 +312,25 @@ def recv_into(self, *args, **kwargs): try: return self.connection.recv_into(*args, **kwargs) except OpenSSL.SSL.SysCallError as e: - if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'): + if self.suppress_ragged_eofs and e.args == (-1, "Unexpected EOF"): return 0 else: raise SocketError(str(e)) - except OpenSSL.SSL.ZeroReturnError as e: + except OpenSSL.SSL.ZeroReturnError: if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN: return 0 else: raise except OpenSSL.SSL.WantReadError: if not util.wait_for_read(self.socket, self.socket.gettimeout()): - raise timeout('The read operation timed out') + raise timeout("The read operation timed out") else: return self.recv_into(*args, **kwargs) + # TLS 1.3 post-handshake authentication + except OpenSSL.SSL.Error as e: + raise ssl.SSLError("read error: %r" % e) + def settimeout(self, timeout): return self.socket.settimeout(timeout) @@ -325,7 +348,9 @@ def _send_until_done(self, data): def sendall(self, data): total_sent = 0 while total_sent < len(data): - sent = self._send_until_done(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE]) + sent = self._send_until_done( + data[total_sent : total_sent + SSL_WRITE_BLOCKSIZE] + ) total_sent += sent def shutdown(self): @@ -349,17 +374,16 @@ def getpeercert(self, binary_form=False): return x509 if binary_form: - return OpenSSL.crypto.dump_certificate( - OpenSSL.crypto.FILETYPE_ASN1, - x509) + return OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_ASN1, x509) return { - 'subject': ( - (('commonName', x509.get_subject().CN),), - ), - 'subjectAltName': get_subj_alt_name(x509) + "subject": ((("commonName", x509.get_subject().CN),),), + "subjectAltName": get_subj_alt_name(x509), } + def version(self): + return self.connection.get_protocol_version_name() + def _reuse(self): self._makefile_refs += 1 @@ -371,9 +395,12 @@ def _drop(self): if _fileobject: # Platform-specific: Python 2 + def makefile(self, mode, bufsize=-1): self._makefile_refs += 1 return _fileobject(self, mode, bufsize, close=True) + + else: # Platform-specific: Python 3 makefile = backport_makefile @@ -386,6 +413,7 @@ class PyOpenSSLContext(object): for translating the interface of the standard library ``SSLContext`` object to calls into PyOpenSSL. """ + def __init__(self, protocol): self.protocol = _openssl_versions[protocol] self._ctx = OpenSSL.SSL.Context(self.protocol) @@ -407,24 +435,21 @@ def verify_mode(self): @verify_mode.setter def verify_mode(self, value): - self._ctx.set_verify( - _stdlib_to_openssl_verify[value], - _verify_callback - ) + self._ctx.set_verify(_stdlib_to_openssl_verify[value], _verify_callback) def set_default_verify_paths(self): self._ctx.set_default_verify_paths() def set_ciphers(self, ciphers): if isinstance(ciphers, six.text_type): - ciphers = ciphers.encode('utf-8') + ciphers = ciphers.encode("utf-8") self._ctx.set_cipher_list(ciphers) def load_verify_locations(self, cafile=None, capath=None, cadata=None): if cafile is not None: - cafile = cafile.encode('utf-8') + cafile = cafile.encode("utf-8") if capath is not None: - capath = capath.encode('utf-8') + capath = capath.encode("utf-8") self._ctx.load_verify_locations(cafile, capath) if cadata is not None: self._ctx.load_verify_locations(BytesIO(cadata)) @@ -432,16 +457,23 @@ def load_verify_locations(self, cafile=None, capath=None, cadata=None): def load_cert_chain(self, certfile, keyfile=None, password=None): self._ctx.use_certificate_chain_file(certfile) if password is not None: - self._ctx.set_passwd_cb(lambda max_length, prompt_twice, userdata: password) + if not isinstance(password, six.binary_type): + password = password.encode("utf-8") + self._ctx.set_passwd_cb(lambda *_: password) self._ctx.use_privatekey_file(keyfile or certfile) - def wrap_socket(self, sock, server_side=False, - do_handshake_on_connect=True, suppress_ragged_eofs=True, - server_hostname=None): + def wrap_socket( + self, + sock, + server_side=False, + do_handshake_on_connect=True, + suppress_ragged_eofs=True, + server_hostname=None, + ): cnx = OpenSSL.SSL.Connection(self._ctx, sock) if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3 - server_hostname = server_hostname.encode('utf-8') + server_hostname = server_hostname.encode("utf-8") if server_hostname is not None: cnx.set_tlsext_host_name(server_hostname) @@ -453,10 +485,10 @@ def wrap_socket(self, sock, server_side=False, cnx.do_handshake() except OpenSSL.SSL.WantReadError: if not util.wait_for_read(sock, sock.gettimeout()): - raise timeout('select timed out') + raise timeout("select timed out") continue except OpenSSL.SSL.Error as e: - raise ssl.SSLError('bad handshake: %r' % e) + raise ssl.SSLError("bad handshake: %r" % e) break return WrappedSocket(cnx, sock) diff --git a/pipenv/patched/notpip/_vendor/urllib3/contrib/securetransport.py b/pipenv/patched/notpip/_vendor/urllib3/contrib/securetransport.py index 77cb59ed71..24e6b5c4d9 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/contrib/securetransport.py +++ b/pipenv/patched/notpip/_vendor/urllib3/contrib/securetransport.py @@ -23,6 +23,31 @@ urllib3.contrib.securetransport.inject_into_urllib3() Happy TLSing! + +This code is a bastardised version of the code found in Will Bond's oscrypto +library. An enormous debt is owed to him for blazing this trail for us. For +that reason, this code should be considered to be covered both by urllib3's +license and by oscrypto's: + + Copyright (c) 2015-2016 Will Bond <will@wbond.net> + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. """ from __future__ import absolute_import @@ -37,12 +62,12 @@ import weakref from .. import util -from ._securetransport.bindings import ( - Security, SecurityConst, CoreFoundation -) +from ._securetransport.bindings import Security, SecurityConst, CoreFoundation from ._securetransport.low_level import ( - _assert_no_error, _cert_array_from_pem, _temporary_keychain, - _load_client_cert_chain + _assert_no_error, + _cert_array_from_pem, + _temporary_keychain, + _load_client_cert_chain, ) try: # Platform-specific: Python 2 @@ -51,7 +76,7 @@ _fileobject = None from ..packages.backports.makefile import backport_makefile -__all__ = ['inject_into_urllib3', 'extract_from_urllib3'] +__all__ = ["inject_into_urllib3", "extract_from_urllib3"] # SNI always works HAS_SNI = True @@ -86,35 +111,32 @@ # individual cipher suites. We need to do this because this is how # SecureTransport wants them. CIPHER_SUITES = [ - SecurityConst.TLS_AES_256_GCM_SHA384, - SecurityConst.TLS_CHACHA20_POLY1305_SHA256, - SecurityConst.TLS_AES_128_GCM_SHA256, SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - SecurityConst.TLS_DHE_DSS_WITH_AES_256_GCM_SHA384, + SecurityConst.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + SecurityConst.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, SecurityConst.TLS_DHE_RSA_WITH_AES_256_GCM_SHA384, - SecurityConst.TLS_DHE_DSS_WITH_AES_128_GCM_SHA256, SecurityConst.TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, - SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, - SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, - SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA, - SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA, SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, - SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, + SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, + SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA, SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, - SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA, - SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA, + SecurityConst.TLS_AES_256_GCM_SHA384, + SecurityConst.TLS_AES_128_GCM_SHA256, SecurityConst.TLS_RSA_WITH_AES_256_GCM_SHA384, SecurityConst.TLS_RSA_WITH_AES_128_GCM_SHA256, + SecurityConst.TLS_AES_128_CCM_8_SHA256, + SecurityConst.TLS_AES_128_CCM_SHA256, SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA256, SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA256, SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA, @@ -122,39 +144,47 @@ ] # Basically this is simple: for PROTOCOL_SSLv23 we turn it into a low of -# TLSv1 and a high of TLSv1.2. For everything else, we pin to that version. +# TLSv1 and a high of TLSv1.3. For everything else, we pin to that version. +# TLSv1 to 1.2 are supported on macOS 10.8+ and TLSv1.3 is macOS 10.13+ _protocol_to_min_max = { - ssl.PROTOCOL_SSLv23: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12), + util.PROTOCOL_TLS: ( + SecurityConst.kTLSProtocol1, + SecurityConst.kTLSProtocolMaxSupported, + ) } if hasattr(ssl, "PROTOCOL_SSLv2"): _protocol_to_min_max[ssl.PROTOCOL_SSLv2] = ( - SecurityConst.kSSLProtocol2, SecurityConst.kSSLProtocol2 + SecurityConst.kSSLProtocol2, + SecurityConst.kSSLProtocol2, ) if hasattr(ssl, "PROTOCOL_SSLv3"): _protocol_to_min_max[ssl.PROTOCOL_SSLv3] = ( - SecurityConst.kSSLProtocol3, SecurityConst.kSSLProtocol3 + SecurityConst.kSSLProtocol3, + SecurityConst.kSSLProtocol3, ) if hasattr(ssl, "PROTOCOL_TLSv1"): _protocol_to_min_max[ssl.PROTOCOL_TLSv1] = ( - SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol1 + SecurityConst.kTLSProtocol1, + SecurityConst.kTLSProtocol1, ) if hasattr(ssl, "PROTOCOL_TLSv1_1"): _protocol_to_min_max[ssl.PROTOCOL_TLSv1_1] = ( - SecurityConst.kTLSProtocol11, SecurityConst.kTLSProtocol11 + SecurityConst.kTLSProtocol11, + SecurityConst.kTLSProtocol11, ) if hasattr(ssl, "PROTOCOL_TLSv1_2"): _protocol_to_min_max[ssl.PROTOCOL_TLSv1_2] = ( - SecurityConst.kTLSProtocol12, SecurityConst.kTLSProtocol12 + SecurityConst.kTLSProtocol12, + SecurityConst.kTLSProtocol12, ) -if hasattr(ssl, "PROTOCOL_TLS"): - _protocol_to_min_max[ssl.PROTOCOL_TLS] = _protocol_to_min_max[ssl.PROTOCOL_SSLv23] def inject_into_urllib3(): """ Monkey-patch urllib3 with SecureTransport-backed SSL-support. """ + util.SSLContext = SecureTransportContext util.ssl_.SSLContext = SecureTransportContext util.HAS_SNI = HAS_SNI util.ssl_.HAS_SNI = HAS_SNI @@ -166,6 +196,7 @@ def extract_from_urllib3(): """ Undo monkey-patching by :func:`inject_into_urllib3`. """ + util.SSLContext = orig_util_SSLContext util.ssl_.SSLContext = orig_util_SSLContext util.HAS_SNI = orig_util_HAS_SNI util.ssl_.HAS_SNI = orig_util_HAS_SNI @@ -195,7 +226,7 @@ def _read_callback(connection_id, data_buffer, data_length_pointer): while read_count < requested_length: if timeout is None or timeout >= 0: if not util.wait_for_read(base_socket, timeout): - raise socket.error(errno.EAGAIN, 'timed out') + raise socket.error(errno.EAGAIN, "timed out") remaining = requested_length - read_count buffer = (ctypes.c_char * remaining).from_address( @@ -251,7 +282,7 @@ def _write_callback(connection_id, data_buffer, data_length_pointer): while sent < bytes_to_write: if timeout is None or timeout >= 0: if not util.wait_for_write(base_socket, timeout): - raise socket.error(errno.EAGAIN, 'timed out') + raise socket.error(errno.EAGAIN, "timed out") chunk_sent = base_socket.send(data) sent += chunk_sent @@ -293,6 +324,7 @@ class WrappedSocket(object): Note: _makefile_refs, _drop(), and _reuse() are needed for the garbage collector of PyPy. """ + def __init__(self, socket): self.socket = socket self.context = None @@ -357,7 +389,7 @@ def _custom_validate(self, verify, trust_bundle): # We want data in memory, so load it up. if os.path.isfile(trust_bundle): - with open(trust_bundle, 'rb') as f: + with open(trust_bundle, "rb") as f: trust_bundle = f.read() cert_array = None @@ -371,9 +403,7 @@ def _custom_validate(self, verify, trust_bundle): # created for this connection, shove our CAs into it, tell ST to # ignore everything else it knows, and then ask if it can build a # chain. This is a buuuunch of code. - result = Security.SSLCopyPeerTrust( - self.context, ctypes.byref(trust) - ) + result = Security.SSLCopyPeerTrust(self.context, ctypes.byref(trust)) _assert_no_error(result) if not trust: raise ssl.SSLError("Failed to copy trust reference") @@ -385,9 +415,7 @@ def _custom_validate(self, verify, trust_bundle): _assert_no_error(result) trust_result = Security.SecTrustResultType() - result = Security.SecTrustEvaluate( - trust, ctypes.byref(trust_result) - ) + result = Security.SecTrustEvaluate(trust, ctypes.byref(trust_result)) _assert_no_error(result) finally: if trust: @@ -399,23 +427,24 @@ def _custom_validate(self, verify, trust_bundle): # Ok, now we can look at what the result was. successes = ( SecurityConst.kSecTrustResultUnspecified, - SecurityConst.kSecTrustResultProceed + SecurityConst.kSecTrustResultProceed, ) if trust_result.value not in successes: raise ssl.SSLError( - "certificate verify failed, error code: %d" % - trust_result.value + "certificate verify failed, error code: %d" % trust_result.value ) - def handshake(self, - server_hostname, - verify, - trust_bundle, - min_version, - max_version, - client_cert, - client_key, - client_key_passphrase): + def handshake( + self, + server_hostname, + verify, + trust_bundle, + min_version, + max_version, + client_cert, + client_key, + client_key_passphrase, + ): """ Actually performs the TLS handshake. This is run automatically by wrapped socket, and shouldn't be needed in user code. @@ -445,7 +474,7 @@ def handshake(self, # If we have a server hostname, we should set that too. if server_hostname: if not isinstance(server_hostname, bytes): - server_hostname = server_hostname.encode('utf-8') + server_hostname = server_hostname.encode("utf-8") result = Security.SSLSetPeerDomainName( self.context, server_hostname, len(server_hostname) @@ -458,7 +487,16 @@ def handshake(self, # Set the minimum and maximum TLS versions. result = Security.SSLSetProtocolVersionMin(self.context, min_version) _assert_no_error(result) + + # TLS 1.3 isn't necessarily enabled by the OS + # so we have to detect when we error out and try + # setting TLS 1.3 if it's allowed. kTLSProtocolMaxSupported + # was added in macOS 10.13 along with kTLSProtocol13. result = Security.SSLSetProtocolVersionMax(self.context, max_version) + if result != 0 and max_version == SecurityConst.kTLSProtocolMaxSupported: + result = Security.SSLSetProtocolVersionMax( + self.context, SecurityConst.kTLSProtocol12 + ) _assert_no_error(result) # If there's a trust DB, we need to use it. We do that by telling @@ -467,9 +505,7 @@ def handshake(self, # authing in that case. if not verify or trust_bundle is not None: result = Security.SSLSetSessionOption( - self.context, - SecurityConst.kSSLSessionOptionBreakOnServerAuth, - True + self.context, SecurityConst.kSSLSessionOptionBreakOnServerAuth, True ) _assert_no_error(result) @@ -479,9 +515,7 @@ def handshake(self, self._client_cert_chain = _load_client_cert_chain( self._keychain, client_cert, client_key ) - result = Security.SSLSetCertificate( - self.context, self._client_cert_chain - ) + result = Security.SSLSetCertificate(self.context, self._client_cert_chain) _assert_no_error(result) while True: @@ -532,7 +566,7 @@ def recv_into(self, buffer, nbytes=None): # There are some result codes that we want to treat as "not always # errors". Specifically, those are errSSLWouldBlock, # errSSLClosedGraceful, and errSSLClosedNoNotify. - if (result == SecurityConst.errSSLWouldBlock): + if result == SecurityConst.errSSLWouldBlock: # If we didn't process any bytes, then this was just a time out. # However, we can get errSSLWouldBlock in situations when we *did* # read some data, and in those cases we should just read "short" @@ -540,7 +574,10 @@ def recv_into(self, buffer, nbytes=None): if processed_bytes.value == 0: # Timed out, no data read. raise socket.timeout("recv timed out") - elif result in (SecurityConst.errSSLClosedGraceful, SecurityConst.errSSLClosedNoNotify): + elif result in ( + SecurityConst.errSSLClosedGraceful, + SecurityConst.errSSLClosedNoNotify, + ): # The remote peer has closed this connection. We should do so as # well. Note that we don't actually return here because in # principle this could actually be fired along with return data. @@ -579,7 +616,7 @@ def send(self, data): def sendall(self, data): total_sent = 0 while total_sent < len(data): - sent = self.send(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE]) + sent = self.send(data[total_sent : total_sent + SSL_WRITE_BLOCKSIZE]) total_sent += sent def shutdown(self): @@ -626,18 +663,14 @@ def getpeercert(self, binary_form=False): # instead to just flag to urllib3 that it shouldn't do its own hostname # validation when using SecureTransport. if not binary_form: - raise ValueError( - "SecureTransport only supports dumping binary certs" - ) + raise ValueError("SecureTransport only supports dumping binary certs") trust = Security.SecTrustRef() certdata = None der_bytes = None try: # Grab the trust store. - result = Security.SSLCopyPeerTrust( - self.context, ctypes.byref(trust) - ) + result = Security.SSLCopyPeerTrust(self.context, ctypes.byref(trust)) _assert_no_error(result) if not trust: # Probably we haven't done the handshake yet. No biggie. @@ -667,6 +700,27 @@ def getpeercert(self, binary_form=False): return der_bytes + def version(self): + protocol = Security.SSLProtocol() + result = Security.SSLGetNegotiatedProtocolVersion( + self.context, ctypes.byref(protocol) + ) + _assert_no_error(result) + if protocol.value == SecurityConst.kTLSProtocol13: + return "TLSv1.3" + elif protocol.value == SecurityConst.kTLSProtocol12: + return "TLSv1.2" + elif protocol.value == SecurityConst.kTLSProtocol11: + return "TLSv1.1" + elif protocol.value == SecurityConst.kTLSProtocol1: + return "TLSv1" + elif protocol.value == SecurityConst.kSSLProtocol3: + return "SSLv3" + elif protocol.value == SecurityConst.kSSLProtocol2: + return "SSLv2" + else: + raise ssl.SSLError("Unknown TLS version: %r" % protocol) + def _reuse(self): self._makefile_refs += 1 @@ -678,16 +732,21 @@ def _drop(self): if _fileobject: # Platform-specific: Python 2 + def makefile(self, mode, bufsize=-1): self._makefile_refs += 1 return _fileobject(self, mode, bufsize, close=True) + + else: # Platform-specific: Python 3 + def makefile(self, mode="r", buffering=None, *args, **kwargs): # We disable buffering with SecureTransport because it conflicts with # the buffering that ST does internally (see issue #1153 for more). buffering = 0 return backport_makefile(self, mode, buffering, *args, **kwargs) + WrappedSocket.makefile = makefile @@ -697,6 +756,7 @@ class SecureTransportContext(object): interface of the standard library ``SSLContext`` object to calls into SecureTransport. """ + def __init__(self, protocol): self._min_version, self._max_version = _protocol_to_min_max[protocol] self._options = 0 @@ -763,16 +823,12 @@ def load_default_certs(self): def set_ciphers(self, ciphers): # For now, we just require the default cipher string. if ciphers != util.ssl_.DEFAULT_CIPHERS: - raise ValueError( - "SecureTransport doesn't support custom cipher strings" - ) + raise ValueError("SecureTransport doesn't support custom cipher strings") def load_verify_locations(self, cafile=None, capath=None, cadata=None): # OK, we only really support cadata and cafile. if capath is not None: - raise ValueError( - "SecureTransport does not support cert directories" - ) + raise ValueError("SecureTransport does not support cert directories") self._trust_bundle = cafile or cadata @@ -781,9 +837,14 @@ def load_cert_chain(self, certfile, keyfile=None, password=None): self._client_key = keyfile self._client_cert_passphrase = password - def wrap_socket(self, sock, server_side=False, - do_handshake_on_connect=True, suppress_ragged_eofs=True, - server_hostname=None): + def wrap_socket( + self, + sock, + server_side=False, + do_handshake_on_connect=True, + suppress_ragged_eofs=True, + server_hostname=None, + ): # So, what do we do here? Firstly, we assert some properties. This is a # stripped down shim, so there is some functionality we don't support. # See PEP 543 for the real deal. @@ -797,8 +858,13 @@ def wrap_socket(self, sock, server_side=False, # Now we can handshake wrapped_socket.handshake( - server_hostname, self._verify, self._trust_bundle, - self._min_version, self._max_version, self._client_cert, - self._client_key, self._client_key_passphrase + server_hostname, + self._verify, + self._trust_bundle, + self._min_version, + self._max_version, + self._client_cert, + self._client_key, + self._client_key_passphrase, ) return wrapped_socket diff --git a/pipenv/patched/notpip/_vendor/urllib3/contrib/socks.py b/pipenv/patched/notpip/_vendor/urllib3/contrib/socks.py index 811e312ec8..9e97f7aa98 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/contrib/socks.py +++ b/pipenv/patched/notpip/_vendor/urllib3/contrib/socks.py @@ -1,25 +1,38 @@ # -*- coding: utf-8 -*- """ This module contains provisional support for SOCKS proxies from within -urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and +urllib3. This module supports SOCKS4, SOCKS4A (an extension of SOCKS4), and SOCKS5. To enable its functionality, either install PySocks or install this module with the ``socks`` extra. The SOCKS implementation supports the full range of urllib3 features. It also supports the following SOCKS features: -- SOCKS4 -- SOCKS4a -- SOCKS5 +- SOCKS4A (``proxy_url='socks4a://...``) +- SOCKS4 (``proxy_url='socks4://...``) +- SOCKS5 with remote DNS (``proxy_url='socks5h://...``) +- SOCKS5 with local DNS (``proxy_url='socks5://...``) - Usernames and passwords for the SOCKS proxy -Known Limitations: + .. note:: + It is recommended to use ``socks5h://`` or ``socks4a://`` schemes in + your ``proxy_url`` to ensure that DNS resolution is done from the remote + server instead of client-side when connecting to a domain name. + +SOCKS4 supports IPv4 and domain names with the SOCKS4A extension. SOCKS5 +supports IPv4, IPv6, and domain names. + +When connecting to a SOCKS4 proxy the ``username`` portion of the ``proxy_url`` +will be sent as the ``userid`` section of the SOCKS request:: + + proxy_url="socks4a://<userid>@proxy-host" + +When connecting to a SOCKS5 proxy the ``username`` and ``password`` portion +of the ``proxy_url`` will be sent as the username/password to authenticate +with the proxy:: + + proxy_url="socks5h://<username>:<password>@proxy-host" -- Currently PySocks does not support contacting remote websites via literal - IPv6 addresses. Any such connection attempt will fail. You must use a domain - name. -- Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any - such connection attempt will fail. """ from __future__ import absolute_import @@ -29,23 +42,20 @@ import warnings from ..exceptions import DependencyWarning - warnings.warn(( - 'SOCKS support in urllib3 requires the installation of optional ' - 'dependencies: specifically, PySocks. For more information, see ' - 'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies' + warnings.warn( + ( + "SOCKS support in urllib3 requires the installation of optional " + "dependencies: specifically, PySocks. For more information, see " + "https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies" ), - DependencyWarning + DependencyWarning, ) raise from socket import error as SocketError, timeout as SocketTimeout -from ..connection import ( - HTTPConnection, HTTPSConnection -) -from ..connectionpool import ( - HTTPConnectionPool, HTTPSConnectionPool -) +from ..connection import HTTPConnection, HTTPSConnection +from ..connectionpool import HTTPConnectionPool, HTTPSConnectionPool from ..exceptions import ConnectTimeoutError, NewConnectionError from ..poolmanager import PoolManager from ..util.url import parse_url @@ -60,8 +70,9 @@ class SOCKSConnection(HTTPConnection): """ A plain-text HTTP connection that connects via a SOCKS proxy. """ + def __init__(self, *args, **kwargs): - self._socks_options = kwargs.pop('_socks_options') + self._socks_options = kwargs.pop("_socks_options") super(SOCKSConnection, self).__init__(*args, **kwargs) def _new_conn(self): @@ -70,28 +81,30 @@ def _new_conn(self): """ extra_kw = {} if self.source_address: - extra_kw['source_address'] = self.source_address + extra_kw["source_address"] = self.source_address if self.socket_options: - extra_kw['socket_options'] = self.socket_options + extra_kw["socket_options"] = self.socket_options try: conn = socks.create_connection( (self.host, self.port), - proxy_type=self._socks_options['socks_version'], - proxy_addr=self._socks_options['proxy_host'], - proxy_port=self._socks_options['proxy_port'], - proxy_username=self._socks_options['username'], - proxy_password=self._socks_options['password'], - proxy_rdns=self._socks_options['rdns'], + proxy_type=self._socks_options["socks_version"], + proxy_addr=self._socks_options["proxy_host"], + proxy_port=self._socks_options["proxy_port"], + proxy_username=self._socks_options["username"], + proxy_password=self._socks_options["password"], + proxy_rdns=self._socks_options["rdns"], timeout=self.timeout, **extra_kw ) - except SocketTimeout as e: + except SocketTimeout: raise ConnectTimeoutError( - self, "Connection to %s timed out. (connect timeout=%s)" % - (self.host, self.timeout)) + self, + "Connection to %s timed out. (connect timeout=%s)" + % (self.host, self.timeout), + ) except socks.ProxyError as e: # This is fragile as hell, but it seems to be the only way to raise @@ -101,23 +114,22 @@ def _new_conn(self): if isinstance(error, SocketTimeout): raise ConnectTimeoutError( self, - "Connection to %s timed out. (connect timeout=%s)" % - (self.host, self.timeout) + "Connection to %s timed out. (connect timeout=%s)" + % (self.host, self.timeout), ) else: raise NewConnectionError( - self, - "Failed to establish a new connection: %s" % error + self, "Failed to establish a new connection: %s" % error ) else: raise NewConnectionError( - self, - "Failed to establish a new connection: %s" % e + self, "Failed to establish a new connection: %s" % e ) except SocketError as e: # Defensive: PySocks should catch all these. raise NewConnectionError( - self, "Failed to establish a new connection: %s" % e) + self, "Failed to establish a new connection: %s" % e + ) return conn @@ -143,47 +155,53 @@ class SOCKSProxyManager(PoolManager): A version of the urllib3 ProxyManager that routes connections via the defined SOCKS proxy. """ + pool_classes_by_scheme = { - 'http': SOCKSHTTPConnectionPool, - 'https': SOCKSHTTPSConnectionPool, + "http": SOCKSHTTPConnectionPool, + "https": SOCKSHTTPSConnectionPool, } - def __init__(self, proxy_url, username=None, password=None, - num_pools=10, headers=None, **connection_pool_kw): + def __init__( + self, + proxy_url, + username=None, + password=None, + num_pools=10, + headers=None, + **connection_pool_kw + ): parsed = parse_url(proxy_url) if username is None and password is None and parsed.auth is not None: - split = parsed.auth.split(':') + split = parsed.auth.split(":") if len(split) == 2: username, password = split - if parsed.scheme == 'socks5': + if parsed.scheme == "socks5": socks_version = socks.PROXY_TYPE_SOCKS5 rdns = False - elif parsed.scheme == 'socks5h': + elif parsed.scheme == "socks5h": socks_version = socks.PROXY_TYPE_SOCKS5 rdns = True - elif parsed.scheme == 'socks4': + elif parsed.scheme == "socks4": socks_version = socks.PROXY_TYPE_SOCKS4 rdns = False - elif parsed.scheme == 'socks4a': + elif parsed.scheme == "socks4a": socks_version = socks.PROXY_TYPE_SOCKS4 rdns = True else: - raise ValueError( - "Unable to determine SOCKS version from %s" % proxy_url - ) + raise ValueError("Unable to determine SOCKS version from %s" % proxy_url) self.proxy_url = proxy_url socks_options = { - 'socks_version': socks_version, - 'proxy_host': parsed.host, - 'proxy_port': parsed.port, - 'username': username, - 'password': password, - 'rdns': rdns + "socks_version": socks_version, + "proxy_host": parsed.host, + "proxy_port": parsed.port, + "username": username, + "password": password, + "rdns": rdns, } - connection_pool_kw['_socks_options'] = socks_options + connection_pool_kw["_socks_options"] = socks_options super(SOCKSProxyManager, self).__init__( num_pools, headers, **connection_pool_kw diff --git a/pipenv/patched/notpip/_vendor/urllib3/exceptions.py b/pipenv/patched/notpip/_vendor/urllib3/exceptions.py index 7bbaa9871f..93d93fba7d 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/exceptions.py +++ b/pipenv/patched/notpip/_vendor/urllib3/exceptions.py @@ -1,7 +1,6 @@ from __future__ import absolute_import -from .packages.six.moves.http_client import ( - IncompleteRead as httplib_IncompleteRead -) +from .packages.six.moves.http_client import IncompleteRead as httplib_IncompleteRead + # Base Exceptions @@ -17,6 +16,7 @@ class HTTPWarning(Warning): class PoolError(HTTPError): "Base exception for errors caused within a pool." + def __init__(self, pool, message): self.pool = pool HTTPError.__init__(self, "%s: %s" % (pool, message)) @@ -28,6 +28,7 @@ def __reduce__(self): class RequestError(PoolError): "Base exception for PoolErrors that have associated URLs." + def __init__(self, pool, url, message): self.url = url PoolError.__init__(self, pool, message) @@ -63,6 +64,7 @@ class ProtocolError(HTTPError): # Leaf Exceptions + class MaxRetryError(RequestError): """Raised when the maximum number of retries is exceeded. @@ -76,8 +78,7 @@ class MaxRetryError(RequestError): def __init__(self, pool, url, reason=None): self.reason = reason - message = "Max retries exceeded with url: %s (Caused by %r)" % ( - url, reason) + message = "Max retries exceeded with url: %s (Caused by %r)" % (url, reason) RequestError.__init__(self, pool, url, message) @@ -93,6 +94,7 @@ def __init__(self, pool, url, retries=3): class TimeoutStateError(HTTPError): """ Raised when passing an invalid state to a timeout """ + pass @@ -102,6 +104,7 @@ class TimeoutError(HTTPError): Catching this error will catch both :exc:`ReadTimeoutErrors <ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`. """ + pass @@ -149,8 +152,8 @@ def __init__(self, location): class ResponseError(HTTPError): "Used as a container for an error reason supplied in a MaxRetryError." - GENERIC_ERROR = 'too many error responses' - SPECIFIC_ERROR = 'too many {status_code} error responses' + GENERIC_ERROR = "too many error responses" + SPECIFIC_ERROR = "too many {status_code} error responses" class SecurityWarning(HTTPWarning): @@ -188,6 +191,7 @@ class DependencyWarning(HTTPWarning): Warned when an attempt is made to import a module with missing optional dependencies. """ + pass @@ -201,6 +205,7 @@ class BodyNotHttplibCompatible(HTTPError): Body should be httplib.HTTPResponse like (have an fp attribute which returns raw chunks) for read_chunked(). """ + pass @@ -212,12 +217,15 @@ class IncompleteRead(HTTPError, httplib_IncompleteRead): for `partial` to avoid creating large objects on streamed reads. """ + def __init__(self, partial, expected): super(IncompleteRead, self).__init__(partial, expected) def __repr__(self): - return ('IncompleteRead(%i bytes read, ' - '%i more expected)' % (self.partial, self.expected)) + return "IncompleteRead(%i bytes read, " "%i more expected)" % ( + self.partial, + self.expected, + ) class InvalidHeader(HTTPError): @@ -236,8 +244,9 @@ def __init__(self, scheme): class HeaderParsingError(HTTPError): "Raised by assert_header_parsing, but we convert it to a log.warning statement." + def __init__(self, defects, unparsed_data): - message = '%s, unparsed data: %r' % (defects or 'Unknown', unparsed_data) + message = "%s, unparsed data: %r" % (defects or "Unknown", unparsed_data) super(HeaderParsingError, self).__init__(message) diff --git a/pipenv/patched/notpip/_vendor/urllib3/fields.py b/pipenv/patched/notpip/_vendor/urllib3/fields.py index 37fe64a3e8..8715b2202b 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/fields.py +++ b/pipenv/patched/notpip/_vendor/urllib3/fields.py @@ -1,11 +1,12 @@ from __future__ import absolute_import import email.utils import mimetypes +import re from .packages import six -def guess_content_type(filename, default='application/octet-stream'): +def guess_content_type(filename, default="application/octet-stream"): """ Guess the "Content-Type" of a file. @@ -19,57 +20,143 @@ def guess_content_type(filename, default='application/octet-stream'): return default -def format_header_param(name, value): +def format_header_param_rfc2231(name, value): """ - Helper function to format and quote a single header parameter. + Helper function to format and quote a single header parameter using the + strategy defined in RFC 2231. Particularly useful for header parameters which might contain - non-ASCII values, like file names. This follows RFC 2231, as - suggested by RFC 2388 Section 4.4. + non-ASCII values, like file names. This follows RFC 2388 Section 4.4. :param name: The name of the parameter, a string expected to be ASCII only. :param value: - The value of the parameter, provided as a unicode string. + The value of the parameter, provided as ``bytes`` or `str``. + :ret: + An RFC-2231-formatted unicode string. """ + if isinstance(value, six.binary_type): + value = value.decode("utf-8") + if not any(ch in value for ch in '"\\\r\n'): - result = '%s="%s"' % (name, value) + result = u'%s="%s"' % (name, value) try: - result.encode('ascii') + result.encode("ascii") except (UnicodeEncodeError, UnicodeDecodeError): pass else: return result - if not six.PY3 and isinstance(value, six.text_type): # Python 2: - value = value.encode('utf-8') - value = email.utils.encode_rfc2231(value, 'utf-8') - value = '%s*=%s' % (name, value) + + if six.PY2: # Python 2: + value = value.encode("utf-8") + + # encode_rfc2231 accepts an encoded string and returns an ascii-encoded + # string in Python 2 but accepts and returns unicode strings in Python 3 + value = email.utils.encode_rfc2231(value, "utf-8") + value = "%s*=%s" % (name, value) + + if six.PY2: # Python 2: + value = value.decode("utf-8") + return value +_HTML5_REPLACEMENTS = { + u"\u0022": u"%22", + # Replace "\" with "\\". + u"\u005C": u"\u005C\u005C", + u"\u005C": u"\u005C\u005C", +} + +# All control characters from 0x00 to 0x1F *except* 0x1B. +_HTML5_REPLACEMENTS.update( + { + six.unichr(cc): u"%{:02X}".format(cc) + for cc in range(0x00, 0x1F + 1) + if cc not in (0x1B,) + } +) + + +def _replace_multiple(value, needles_and_replacements): + def replacer(match): + return needles_and_replacements[match.group(0)] + + pattern = re.compile( + r"|".join([re.escape(needle) for needle in needles_and_replacements.keys()]) + ) + + result = pattern.sub(replacer, value) + + return result + + +def format_header_param_html5(name, value): + """ + Helper function to format and quote a single header parameter using the + HTML5 strategy. + + Particularly useful for header parameters which might contain + non-ASCII values, like file names. This follows the `HTML5 Working Draft + Section 4.10.22.7`_ and matches the behavior of curl and modern browsers. + + .. _HTML5 Working Draft Section 4.10.22.7: + https://w3c.github.io/html/sec-forms.html#multipart-form-data + + :param name: + The name of the parameter, a string expected to be ASCII only. + :param value: + The value of the parameter, provided as ``bytes`` or `str``. + :ret: + A unicode string, stripped of troublesome characters. + """ + if isinstance(value, six.binary_type): + value = value.decode("utf-8") + + value = _replace_multiple(value, _HTML5_REPLACEMENTS) + + return u'%s="%s"' % (name, value) + + +# For backwards-compatibility. +format_header_param = format_header_param_html5 + + class RequestField(object): """ A data container for request body parameters. :param name: - The name of this request field. + The name of this request field. Must be unicode. :param data: The data/value body. :param filename: - An optional filename of the request field. + An optional filename of the request field. Must be unicode. :param headers: An optional dict-like object of headers to initially use for the field. + :param header_formatter: + An optional callable that is used to encode and format the headers. By + default, this is :func:`format_header_param_html5`. """ - def __init__(self, name, data, filename=None, headers=None): + + def __init__( + self, + name, + data, + filename=None, + headers=None, + header_formatter=format_header_param_html5, + ): self._name = name self._filename = filename self.data = data self.headers = {} if headers: self.headers = dict(headers) + self.header_formatter = header_formatter @classmethod - def from_tuples(cls, fieldname, value): + def from_tuples(cls, fieldname, value, header_formatter=format_header_param_html5): """ A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters. @@ -97,21 +184,25 @@ def from_tuples(cls, fieldname, value): content_type = None data = value - request_param = cls(fieldname, data, filename=filename) + request_param = cls( + fieldname, data, filename=filename, header_formatter=header_formatter + ) request_param.make_multipart(content_type=content_type) return request_param def _render_part(self, name, value): """ - Overridable helper function to format a single header parameter. + Overridable helper function to format a single header parameter. By + default, this calls ``self.header_formatter``. :param name: The name of the parameter, a string expected to be ASCII only. :param value: The value of the parameter, provided as a unicode string. """ - return format_header_param(name, value) + + return self.header_formatter(name, value) def _render_parts(self, header_parts): """ @@ -133,7 +224,7 @@ def _render_parts(self, header_parts): if value is not None: parts.append(self._render_part(name, value)) - return '; '.join(parts) + return u"; ".join(parts) def render_headers(self): """ @@ -141,21 +232,22 @@ def render_headers(self): """ lines = [] - sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location'] + sort_keys = ["Content-Disposition", "Content-Type", "Content-Location"] for sort_key in sort_keys: if self.headers.get(sort_key, False): - lines.append('%s: %s' % (sort_key, self.headers[sort_key])) + lines.append(u"%s: %s" % (sort_key, self.headers[sort_key])) for header_name, header_value in self.headers.items(): if header_name not in sort_keys: if header_value: - lines.append('%s: %s' % (header_name, header_value)) + lines.append(u"%s: %s" % (header_name, header_value)) - lines.append('\r\n') - return '\r\n'.join(lines) + lines.append(u"\r\n") + return u"\r\n".join(lines) - def make_multipart(self, content_disposition=None, content_type=None, - content_location=None): + def make_multipart( + self, content_disposition=None, content_type=None, content_location=None + ): """ Makes this request field into a multipart request field. @@ -168,11 +260,14 @@ def make_multipart(self, content_disposition=None, content_type=None, The 'Content-Location' of the request body. """ - self.headers['Content-Disposition'] = content_disposition or 'form-data' - self.headers['Content-Disposition'] += '; '.join([ - '', self._render_parts( - (('name', self._name), ('filename', self._filename)) - ) - ]) - self.headers['Content-Type'] = content_type - self.headers['Content-Location'] = content_location + self.headers["Content-Disposition"] = content_disposition or u"form-data" + self.headers["Content-Disposition"] += u"; ".join( + [ + u"", + self._render_parts( + ((u"name", self._name), (u"filename", self._filename)) + ), + ] + ) + self.headers["Content-Type"] = content_type + self.headers["Content-Location"] = content_location diff --git a/pipenv/patched/notpip/_vendor/urllib3/filepost.py b/pipenv/patched/notpip/_vendor/urllib3/filepost.py index 78f1e19b0e..b7b00992c6 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/filepost.py +++ b/pipenv/patched/notpip/_vendor/urllib3/filepost.py @@ -9,7 +9,7 @@ from .packages.six import b from .fields import RequestField -writer = codecs.lookup('utf-8')[3] +writer = codecs.lookup("utf-8")[3] def choose_boundary(): @@ -17,8 +17,8 @@ def choose_boundary(): Our embarrassingly-simple replacement for mimetools.choose_boundary. """ boundary = binascii.hexlify(os.urandom(16)) - if six.PY3: - boundary = boundary.decode('ascii') + if not six.PY2: + boundary = boundary.decode("ascii") return boundary @@ -76,7 +76,7 @@ def encode_multipart_formdata(fields, boundary=None): boundary = choose_boundary() for field in iter_field_objects(fields): - body.write(b('--%s\r\n' % (boundary))) + body.write(b("--%s\r\n" % (boundary))) writer(body).write(field.render_headers()) data = field.data @@ -89,10 +89,10 @@ def encode_multipart_formdata(fields, boundary=None): else: body.write(data) - body.write(b'\r\n') + body.write(b"\r\n") - body.write(b('--%s--\r\n' % (boundary))) + body.write(b("--%s--\r\n" % (boundary))) - content_type = str('multipart/form-data; boundary=%s' % boundary) + content_type = str("multipart/form-data; boundary=%s" % boundary) return body.getvalue(), content_type diff --git a/pipenv/patched/notpip/_vendor/urllib3/packages/__init__.py b/pipenv/patched/notpip/_vendor/urllib3/packages/__init__.py index 170e974c15..fce4caa65d 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/packages/__init__.py +++ b/pipenv/patched/notpip/_vendor/urllib3/packages/__init__.py @@ -2,4 +2,4 @@ from . import ssl_match_hostname -__all__ = ('ssl_match_hostname', ) +__all__ = ("ssl_match_hostname",) diff --git a/pipenv/patched/notpip/_vendor/urllib3/packages/backports/makefile.py b/pipenv/patched/notpip/_vendor/urllib3/packages/backports/makefile.py index 740db377d9..a3156a69c0 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/packages/backports/makefile.py +++ b/pipenv/patched/notpip/_vendor/urllib3/packages/backports/makefile.py @@ -11,15 +11,14 @@ from socket import SocketIO -def backport_makefile(self, mode="r", buffering=None, encoding=None, - errors=None, newline=None): +def backport_makefile( + self, mode="r", buffering=None, encoding=None, errors=None, newline=None +): """ Backport of ``socket.makefile`` from Python 3.5. """ if not set(mode) <= {"r", "w", "b"}: - raise ValueError( - "invalid mode %r (only r, w, b allowed)" % (mode,) - ) + raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,)) writing = "w" in mode reading = "r" in mode or not writing assert reading or writing diff --git a/pipenv/patched/notpip/_vendor/urllib3/packages/six.py b/pipenv/patched/notpip/_vendor/urllib3/packages/six.py index 190c0239cd..314424099f 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/packages/six.py +++ b/pipenv/patched/notpip/_vendor/urllib3/packages/six.py @@ -1,6 +1,4 @@ -"""Utilities for writing code that runs on Python 2 and 3""" - -# Copyright (c) 2010-2015 Benjamin Peterson +# Copyright (c) 2010-2019 Benjamin Peterson # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal @@ -20,6 +18,8 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. +"""Utilities for writing code that runs on Python 2 and 3""" + from __future__ import absolute_import import functools @@ -29,7 +29,7 @@ import types __author__ = "Benjamin Peterson <benjamin@python.org>" -__version__ = "1.10.0" +__version__ = "1.12.0" # Useful for very coarse version differentiation. @@ -38,15 +38,15 @@ PY34 = sys.version_info[0:2] >= (3, 4) if PY3: - string_types = str, - integer_types = int, - class_types = type, + string_types = (str,) + integer_types = (int,) + class_types = (type,) text_type = str binary_type = bytes MAXSIZE = sys.maxsize else: - string_types = basestring, + string_types = (basestring,) integer_types = (int, long) class_types = (type, types.ClassType) text_type = unicode @@ -58,9 +58,9 @@ else: # It's possible to have sizeof(long) != sizeof(Py_ssize_t). class X(object): - def __len__(self): return 1 << 31 + try: len(X()) except OverflowError: @@ -84,7 +84,6 @@ def _import_module(name): class _LazyDescr(object): - def __init__(self, name): self.name = name @@ -101,7 +100,6 @@ def __get__(self, obj, tp): class MovedModule(_LazyDescr): - def __init__(self, name, old, new=None): super(MovedModule, self).__init__(name) if PY3: @@ -122,7 +120,6 @@ def __getattr__(self, attr): class _LazyModule(types.ModuleType): - def __init__(self, name): super(_LazyModule, self).__init__(name) self.__doc__ = self.__class__.__doc__ @@ -137,7 +134,6 @@ def __dir__(self): class MovedAttribute(_LazyDescr): - def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): super(MovedAttribute, self).__init__(name) if PY3: @@ -221,28 +217,36 @@ def get_code(self, fullname): Required, if is_package is implemented""" self.__get_module(fullname) # eventually raises ImportError return None + get_source = get_code # same as get_code + _importer = _SixMetaPathImporter(__name__) class _MovedItems(_LazyModule): """Lazy loading of moved objects""" + __path__ = [] # mark as package _moved_attributes = [ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), - MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), + MovedAttribute( + "filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse" + ), MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), MovedAttribute("intern", "__builtin__", "sys"), MovedAttribute("map", "itertools", "builtins", "imap", "map"), MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), + MovedAttribute("getoutput", "commands", "subprocess"), MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), + MovedAttribute( + "reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload" + ), MovedAttribute("reduce", "__builtin__", "functools"), MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), MovedAttribute("StringIO", "StringIO", "io"), @@ -251,7 +255,9 @@ class _MovedItems(_LazyModule): MovedAttribute("UserString", "UserString", "collections"), MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), - MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), + MovedAttribute( + "zip_longest", "itertools", "itertools", "izip_longest", "zip_longest" + ), MovedModule("builtins", "__builtin__"), MovedModule("configparser", "ConfigParser"), MovedModule("copyreg", "copy_reg"), @@ -262,10 +268,13 @@ class _MovedItems(_LazyModule): MovedModule("html_entities", "htmlentitydefs", "html.entities"), MovedModule("html_parser", "HTMLParser", "html.parser"), MovedModule("http_client", "httplib", "http.client"), + MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), + MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"), MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), - MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), + MovedModule( + "email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart" + ), MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), - MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), @@ -283,15 +292,12 @@ class _MovedItems(_LazyModule): MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), - MovedModule("tkinter_colorchooser", "tkColorChooser", - "tkinter.colorchooser"), - MovedModule("tkinter_commondialog", "tkCommonDialog", - "tkinter.commondialog"), + MovedModule("tkinter_colorchooser", "tkColorChooser", "tkinter.colorchooser"), + MovedModule("tkinter_commondialog", "tkCommonDialog", "tkinter.commondialog"), MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), MovedModule("tkinter_font", "tkFont", "tkinter.font"), MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), - MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", - "tkinter.simpledialog"), + MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", "tkinter.simpledialog"), MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), @@ -301,9 +307,7 @@ class _MovedItems(_LazyModule): ] # Add windows specific modules. if sys.platform == "win32": - _moved_attributes += [ - MovedModule("winreg", "_winreg"), - ] + _moved_attributes += [MovedModule("winreg", "_winreg")] for attr in _moved_attributes: setattr(_MovedItems, attr.name, attr) @@ -337,10 +341,14 @@ class Module_six_moves_urllib_parse(_LazyModule): MovedAttribute("quote_plus", "urllib", "urllib.parse"), MovedAttribute("unquote", "urllib", "urllib.parse"), MovedAttribute("unquote_plus", "urllib", "urllib.parse"), + MovedAttribute( + "unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes" + ), MovedAttribute("urlencode", "urllib", "urllib.parse"), MovedAttribute("splitquery", "urllib", "urllib.parse"), MovedAttribute("splittag", "urllib", "urllib.parse"), MovedAttribute("splituser", "urllib", "urllib.parse"), + MovedAttribute("splitvalue", "urllib", "urllib.parse"), MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), MovedAttribute("uses_params", "urlparse", "urllib.parse"), @@ -353,8 +361,11 @@ class Module_six_moves_urllib_parse(_LazyModule): Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes -_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), - "moves.urllib_parse", "moves.urllib.parse") +_importer._add_module( + Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), + "moves.urllib_parse", + "moves.urllib.parse", +) class Module_six_moves_urllib_error(_LazyModule): @@ -373,8 +384,11 @@ class Module_six_moves_urllib_error(_LazyModule): Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes -_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), - "moves.urllib_error", "moves.urllib.error") +_importer._add_module( + Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), + "moves.urllib_error", + "moves.urllib.error", +) class Module_six_moves_urllib_request(_LazyModule): @@ -416,6 +430,8 @@ class Module_six_moves_urllib_request(_LazyModule): MovedAttribute("URLopener", "urllib", "urllib.request"), MovedAttribute("FancyURLopener", "urllib", "urllib.request"), MovedAttribute("proxy_bypass", "urllib", "urllib.request"), + MovedAttribute("parse_http_list", "urllib2", "urllib.request"), + MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"), ] for attr in _urllib_request_moved_attributes: setattr(Module_six_moves_urllib_request, attr.name, attr) @@ -423,8 +439,11 @@ class Module_six_moves_urllib_request(_LazyModule): Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes -_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), - "moves.urllib_request", "moves.urllib.request") +_importer._add_module( + Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), + "moves.urllib_request", + "moves.urllib.request", +) class Module_six_moves_urllib_response(_LazyModule): @@ -444,8 +463,11 @@ class Module_six_moves_urllib_response(_LazyModule): Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes -_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), - "moves.urllib_response", "moves.urllib.response") +_importer._add_module( + Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), + "moves.urllib_response", + "moves.urllib.response", +) class Module_six_moves_urllib_robotparser(_LazyModule): @@ -454,21 +476,27 @@ class Module_six_moves_urllib_robotparser(_LazyModule): _urllib_robotparser_moved_attributes = [ - MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), + MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser") ] for attr in _urllib_robotparser_moved_attributes: setattr(Module_six_moves_urllib_robotparser, attr.name, attr) del attr -Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes +Module_six_moves_urllib_robotparser._moved_attributes = ( + _urllib_robotparser_moved_attributes +) -_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), - "moves.urllib_robotparser", "moves.urllib.robotparser") +_importer._add_module( + Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), + "moves.urllib_robotparser", + "moves.urllib.robotparser", +) class Module_six_moves_urllib(types.ModuleType): """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" + __path__ = [] # mark as package parse = _importer._get_module("moves.urllib_parse") error = _importer._get_module("moves.urllib_error") @@ -477,10 +505,12 @@ class Module_six_moves_urllib(types.ModuleType): robotparser = _importer._get_module("moves.urllib_robotparser") def __dir__(self): - return ['parse', 'error', 'request', 'response', 'robotparser'] + return ["parse", "error", "request", "response", "robotparser"] + -_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), - "moves.urllib") +_importer._add_module( + Module_six_moves_urllib(__name__ + ".moves.urllib"), "moves.urllib" +) def add_move(move): @@ -520,19 +550,24 @@ def remove_move(name): try: advance_iterator = next except NameError: + def advance_iterator(it): return it.next() + + next = advance_iterator try: callable = callable except NameError: + def callable(obj): return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) if PY3: + def get_unbound_function(unbound): return unbound @@ -543,6 +578,7 @@ def create_unbound_method(func, cls): Iterator = object else: + def get_unbound_function(unbound): return unbound.im_func @@ -553,13 +589,13 @@ def create_unbound_method(func, cls): return types.MethodType(func, None, cls) class Iterator(object): - def next(self): return type(self).__next__(self) callable = callable -_add_doc(get_unbound_function, - """Get the function out of a possibly unbound function""") +_add_doc( + get_unbound_function, """Get the function out of a possibly unbound function""" +) get_method_function = operator.attrgetter(_meth_func) @@ -571,6 +607,7 @@ def next(self): if PY3: + def iterkeys(d, **kw): return iter(d.keys(**kw)) @@ -589,6 +626,7 @@ def iterlists(d, **kw): viewitems = operator.methodcaller("items") else: + def iterkeys(d, **kw): return d.iterkeys(**kw) @@ -609,28 +647,33 @@ def iterlists(d, **kw): _add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") _add_doc(itervalues, "Return an iterator over the values of a dictionary.") -_add_doc(iteritems, - "Return an iterator over the (key, value) pairs of a dictionary.") -_add_doc(iterlists, - "Return an iterator over the (key, [values]) pairs of a dictionary.") +_add_doc(iteritems, "Return an iterator over the (key, value) pairs of a dictionary.") +_add_doc( + iterlists, "Return an iterator over the (key, [values]) pairs of a dictionary." +) if PY3: + def b(s): return s.encode("latin-1") def u(s): return s + unichr = chr import struct + int2byte = struct.Struct(">B").pack del struct byte2int = operator.itemgetter(0) indexbytes = operator.getitem iterbytes = iter import io + StringIO = io.StringIO BytesIO = io.BytesIO + del io _assertCountEqual = "assertCountEqual" if sys.version_info[1] <= 1: _assertRaisesRegex = "assertRaisesRegexp" @@ -639,12 +682,15 @@ def u(s): _assertRaisesRegex = "assertRaisesRegex" _assertRegex = "assertRegex" else: + def b(s): return s + # Workaround for standalone backslash def u(s): - return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") + return unicode(s.replace(r"\\", r"\\\\"), "unicode_escape") + unichr = unichr int2byte = chr @@ -653,8 +699,10 @@ def byte2int(bs): def indexbytes(buf, i): return ord(buf[i]) + iterbytes = functools.partial(itertools.imap, ord) import StringIO + StringIO = BytesIO = StringIO.StringIO _assertCountEqual = "assertItemsEqual" _assertRaisesRegex = "assertRaisesRegexp" @@ -679,13 +727,19 @@ def assertRegex(self, *args, **kwargs): exec_ = getattr(moves.builtins, "exec") def reraise(tp, value, tb=None): - if value is None: - value = tp() - if value.__traceback__ is not tb: - raise value.with_traceback(tb) - raise value + try: + if value is None: + value = tp() + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + finally: + value = None + tb = None + else: + def exec_(_code_, _globs_=None, _locs_=None): """Execute code in a namespace.""" if _globs_ is None: @@ -698,28 +752,45 @@ def exec_(_code_, _globs_=None, _locs_=None): _locs_ = _globs_ exec("""exec _code_ in _globs_, _locs_""") - exec_("""def reraise(tp, value, tb=None): - raise tp, value, tb -""") + exec_( + """def reraise(tp, value, tb=None): + try: + raise tp, value, tb + finally: + tb = None +""" + ) if sys.version_info[:2] == (3, 2): - exec_("""def raise_from(value, from_value): - if from_value is None: - raise value - raise value from from_value -""") + exec_( + """def raise_from(value, from_value): + try: + if from_value is None: + raise value + raise value from from_value + finally: + value = None +""" + ) elif sys.version_info[:2] > (3, 2): - exec_("""def raise_from(value, from_value): - raise value from from_value -""") + exec_( + """def raise_from(value, from_value): + try: + raise value from from_value + finally: + value = None +""" + ) else: + def raise_from(value, from_value): raise value print_ = getattr(moves.builtins, "print", None) if print_ is None: + def print_(*args, **kwargs): """The new-style print function for Python 2.4 and 2.5.""" fp = kwargs.pop("file", sys.stdout) @@ -730,14 +801,17 @@ def write(data): if not isinstance(data, basestring): data = str(data) # If the file has an encoding, encode unicode with it. - if (isinstance(fp, file) and - isinstance(data, unicode) and - fp.encoding is not None): + if ( + isinstance(fp, file) + and isinstance(data, unicode) + and fp.encoding is not None + ): errors = getattr(fp, "errors", None) if errors is None: errors = "strict" data = data.encode(fp.encoding, errors) fp.write(data) + want_unicode = False sep = kwargs.pop("sep", None) if sep is not None: @@ -773,6 +847,8 @@ def write(data): write(sep) write(arg) write(end) + + if sys.version_info[:2] < (3, 3): _print = print_ @@ -783,16 +859,24 @@ def print_(*args, **kwargs): if flush and fp is not None: fp.flush() + _add_doc(reraise, """Reraise an exception.""") if sys.version_info[0:2] < (3, 4): - def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, - updated=functools.WRAPPER_UPDATES): + + def wraps( + wrapped, + assigned=functools.WRAPPER_ASSIGNMENTS, + updated=functools.WRAPPER_UPDATES, + ): def wrapper(f): f = functools.wraps(wrapped, assigned, updated)(f) f.__wrapped__ = wrapped return f + return wrapper + + else: wraps = functools.wraps @@ -802,29 +886,95 @@ def with_metaclass(meta, *bases): # This requires a bit of explanation: the basic idea is to make a dummy # metaclass for one level of class instantiation that replaces itself with # the actual metaclass. - class metaclass(meta): - + class metaclass(type): def __new__(cls, name, this_bases, d): return meta(name, bases, d) - return type.__new__(metaclass, 'temporary_class', (), {}) + + @classmethod + def __prepare__(cls, name, this_bases): + return meta.__prepare__(name, bases) + + return type.__new__(metaclass, "temporary_class", (), {}) def add_metaclass(metaclass): """Class decorator for creating a class with a metaclass.""" + def wrapper(cls): orig_vars = cls.__dict__.copy() - slots = orig_vars.get('__slots__') + slots = orig_vars.get("__slots__") if slots is not None: if isinstance(slots, str): slots = [slots] for slots_var in slots: orig_vars.pop(slots_var) - orig_vars.pop('__dict__', None) - orig_vars.pop('__weakref__', None) + orig_vars.pop("__dict__", None) + orig_vars.pop("__weakref__", None) + if hasattr(cls, "__qualname__"): + orig_vars["__qualname__"] = cls.__qualname__ return metaclass(cls.__name__, cls.__bases__, orig_vars) + return wrapper +def ensure_binary(s, encoding="utf-8", errors="strict"): + """Coerce **s** to six.binary_type. + + For Python 2: + - `unicode` -> encoded to `str` + - `str` -> `str` + + For Python 3: + - `str` -> encoded to `bytes` + - `bytes` -> `bytes` + """ + if isinstance(s, text_type): + return s.encode(encoding, errors) + elif isinstance(s, binary_type): + return s + else: + raise TypeError("not expecting type '%s'" % type(s)) + + +def ensure_str(s, encoding="utf-8", errors="strict"): + """Coerce *s* to `str`. + + For Python 2: + - `unicode` -> encoded to `str` + - `str` -> `str` + + For Python 3: + - `str` -> `str` + - `bytes` -> decoded to `str` + """ + if not isinstance(s, (text_type, binary_type)): + raise TypeError("not expecting type '%s'" % type(s)) + if PY2 and isinstance(s, text_type): + s = s.encode(encoding, errors) + elif PY3 and isinstance(s, binary_type): + s = s.decode(encoding, errors) + return s + + +def ensure_text(s, encoding="utf-8", errors="strict"): + """Coerce *s* to six.text_type. + + For Python 2: + - `unicode` -> `unicode` + - `str` -> `unicode` + + For Python 3: + - `str` -> `str` + - `bytes` -> decoded to `str` + """ + if isinstance(s, binary_type): + return s.decode(encoding, errors) + elif isinstance(s, text_type): + return s + else: + raise TypeError("not expecting type '%s'" % type(s)) + + def python_2_unicode_compatible(klass): """ A decorator that defines __unicode__ and __str__ methods under Python 2. @@ -834,12 +984,13 @@ def python_2_unicode_compatible(klass): returning text and apply this decorator to the class. """ if PY2: - if '__str__' not in klass.__dict__: - raise ValueError("@python_2_unicode_compatible cannot be applied " - "to %s because it doesn't define __str__()." % - klass.__name__) + if "__str__" not in klass.__dict__: + raise ValueError( + "@python_2_unicode_compatible cannot be applied " + "to %s because it doesn't define __str__()." % klass.__name__ + ) klass.__unicode__ = klass.__str__ - klass.__str__ = lambda self: self.__unicode__().encode('utf-8') + klass.__str__ = lambda self: self.__unicode__().encode("utf-8") return klass @@ -859,8 +1010,10 @@ def python_2_unicode_compatible(klass): # be floating around. Therefore, we can't use isinstance() to check for # the six meta path importer, since the other six instance will have # inserted an importer with different class. - if (type(importer).__name__ == "_SixMetaPathImporter" and - importer.name == __name__): + if ( + type(importer).__name__ == "_SixMetaPathImporter" + and importer.name == __name__ + ): del sys.meta_path[i] break del i, importer diff --git a/pipenv/patched/notpip/_vendor/urllib3/packages/ssl_match_hostname/__init__.py b/pipenv/patched/notpip/_vendor/urllib3/packages/ssl_match_hostname/__init__.py index d6594eb264..75b6bb1cf0 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/packages/ssl_match_hostname/__init__.py +++ b/pipenv/patched/notpip/_vendor/urllib3/packages/ssl_match_hostname/__init__.py @@ -16,4 +16,4 @@ from ._implementation import CertificateError, match_hostname # Not needed, but documenting what we provide. -__all__ = ('CertificateError', 'match_hostname') +__all__ = ("CertificateError", "match_hostname") diff --git a/pipenv/patched/notpip/_vendor/urllib3/packages/ssl_match_hostname/_implementation.py b/pipenv/patched/notpip/_vendor/urllib3/packages/ssl_match_hostname/_implementation.py index b48752f821..507c655d7f 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/packages/ssl_match_hostname/_implementation.py +++ b/pipenv/patched/notpip/_vendor/urllib3/packages/ssl_match_hostname/_implementation.py @@ -15,7 +15,7 @@ except ImportError: ipaddress = None -__version__ = '3.5.0.1' +__version__ = "3.5.0.1" class CertificateError(ValueError): @@ -33,18 +33,19 @@ def _dnsname_match(dn, hostname, max_wildcards=1): # Ported from python3-syntax: # leftmost, *remainder = dn.split(r'.') - parts = dn.split(r'.') + parts = dn.split(r".") leftmost = parts[0] remainder = parts[1:] - wildcards = leftmost.count('*') + wildcards = leftmost.count("*") if wildcards > max_wildcards: # Issue #17980: avoid denials of service by refusing more # than one wildcard per fragment. A survey of established # policy among SSL implementations showed it to be a # reasonable choice. raise CertificateError( - "too many wildcards in certificate DNS name: " + repr(dn)) + "too many wildcards in certificate DNS name: " + repr(dn) + ) # speed up common case w/o wildcards if not wildcards: @@ -53,11 +54,11 @@ def _dnsname_match(dn, hostname, max_wildcards=1): # RFC 6125, section 6.4.3, subitem 1. # The client SHOULD NOT attempt to match a presented identifier in which # the wildcard character comprises a label other than the left-most label. - if leftmost == '*': + if leftmost == "*": # When '*' is a fragment by itself, it matches a non-empty dotless # fragment. - pats.append('[^.]+') - elif leftmost.startswith('xn--') or hostname.startswith('xn--'): + pats.append("[^.]+") + elif leftmost.startswith("xn--") or hostname.startswith("xn--"): # RFC 6125, section 6.4.3, subitem 3. # The client SHOULD NOT attempt to match a presented identifier # where the wildcard character is embedded within an A-label or @@ -65,21 +66,22 @@ def _dnsname_match(dn, hostname, max_wildcards=1): pats.append(re.escape(leftmost)) else: # Otherwise, '*' matches any dotless string, e.g. www* - pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) + pats.append(re.escape(leftmost).replace(r"\*", "[^.]*")) # add the remaining fragments, ignore any wildcards for frag in remainder: pats.append(re.escape(frag)) - pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) + pat = re.compile(r"\A" + r"\.".join(pats) + r"\Z", re.IGNORECASE) return pat.match(hostname) def _to_unicode(obj): if isinstance(obj, str) and sys.version_info < (3,): - obj = unicode(obj, encoding='ascii', errors='strict') + obj = unicode(obj, encoding="ascii", errors="strict") return obj + def _ipaddress_match(ipname, host_ip): """Exact matching of IP addresses. @@ -101,9 +103,11 @@ def match_hostname(cert, hostname): returns nothing. """ if not cert: - raise ValueError("empty or no certificate, match_hostname needs a " - "SSL socket or SSL context with either " - "CERT_OPTIONAL or CERT_REQUIRED") + raise ValueError( + "empty or no certificate, match_hostname needs a " + "SSL socket or SSL context with either " + "CERT_OPTIONAL or CERT_REQUIRED" + ) try: # Divergence from upstream: ipaddress can't handle byte str host_ip = ipaddress.ip_address(_to_unicode(hostname)) @@ -122,35 +126,37 @@ def match_hostname(cert, hostname): else: raise dnsnames = [] - san = cert.get('subjectAltName', ()) + san = cert.get("subjectAltName", ()) for key, value in san: - if key == 'DNS': + if key == "DNS": if host_ip is None and _dnsname_match(value, hostname): return dnsnames.append(value) - elif key == 'IP Address': + elif key == "IP Address": if host_ip is not None and _ipaddress_match(value, host_ip): return dnsnames.append(value) if not dnsnames: # The subject is only checked when there is no dNSName entry # in subjectAltName - for sub in cert.get('subject', ()): + for sub in cert.get("subject", ()): for key, value in sub: # XXX according to RFC 2818, the most specific Common Name # must be used. - if key == 'commonName': + if key == "commonName": if _dnsname_match(value, hostname): return dnsnames.append(value) if len(dnsnames) > 1: - raise CertificateError("hostname %r " - "doesn't match either of %s" - % (hostname, ', '.join(map(repr, dnsnames)))) + raise CertificateError( + "hostname %r " + "doesn't match either of %s" % (hostname, ", ".join(map(repr, dnsnames))) + ) elif len(dnsnames) == 1: - raise CertificateError("hostname %r " - "doesn't match %r" - % (hostname, dnsnames[0])) + raise CertificateError( + "hostname %r " "doesn't match %r" % (hostname, dnsnames[0]) + ) else: - raise CertificateError("no appropriate commonName or " - "subjectAltName fields were found") + raise CertificateError( + "no appropriate commonName or " "subjectAltName fields were found" + ) diff --git a/pipenv/patched/notpip/_vendor/urllib3/poolmanager.py b/pipenv/patched/notpip/_vendor/urllib3/poolmanager.py index fe5491cfda..242a2f8203 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/poolmanager.py +++ b/pipenv/patched/notpip/_vendor/urllib3/poolmanager.py @@ -7,52 +7,62 @@ from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool from .connectionpool import port_by_scheme from .exceptions import LocationValueError, MaxRetryError, ProxySchemeUnknown +from .packages import six from .packages.six.moves.urllib.parse import urljoin from .request import RequestMethods from .util.url import parse_url from .util.retry import Retry -__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url'] +__all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] log = logging.getLogger(__name__) -SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs', - 'ssl_version', 'ca_cert_dir', 'ssl_context') +SSL_KEYWORDS = ( + "key_file", + "cert_file", + "cert_reqs", + "ca_certs", + "ssl_version", + "ca_cert_dir", + "ssl_context", + "key_password", +) # All known keyword arguments that could be provided to the pool manager, its # pools, or the underlying connections. This is used to construct a pool key. _key_fields = ( - 'key_scheme', # str - 'key_host', # str - 'key_port', # int - 'key_timeout', # int or float or Timeout - 'key_retries', # int or Retry - 'key_strict', # bool - 'key_block', # bool - 'key_source_address', # str - 'key_key_file', # str - 'key_cert_file', # str - 'key_cert_reqs', # str - 'key_ca_certs', # str - 'key_ssl_version', # str - 'key_ca_cert_dir', # str - 'key_ssl_context', # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext - 'key_maxsize', # int - 'key_headers', # dict - 'key__proxy', # parsed proxy url - 'key__proxy_headers', # dict - 'key_socket_options', # list of (level (int), optname (int), value (int or str)) tuples - 'key__socks_options', # dict - 'key_assert_hostname', # bool or string - 'key_assert_fingerprint', # str - 'key_server_hostname', #str + "key_scheme", # str + "key_host", # str + "key_port", # int + "key_timeout", # int or float or Timeout + "key_retries", # int or Retry + "key_strict", # bool + "key_block", # bool + "key_source_address", # str + "key_key_file", # str + "key_key_password", # str + "key_cert_file", # str + "key_cert_reqs", # str + "key_ca_certs", # str + "key_ssl_version", # str + "key_ca_cert_dir", # str + "key_ssl_context", # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext + "key_maxsize", # int + "key_headers", # dict + "key__proxy", # parsed proxy url + "key__proxy_headers", # dict + "key_socket_options", # list of (level (int), optname (int), value (int or str)) tuples + "key__socks_options", # dict + "key_assert_hostname", # bool or string + "key_assert_fingerprint", # str + "key_server_hostname", # str ) #: The namedtuple class used to construct keys for the connection pool. #: All custom key schemes should include the fields in this key at a minimum. -PoolKey = collections.namedtuple('PoolKey', _key_fields) +PoolKey = collections.namedtuple("PoolKey", _key_fields) def _default_key_normalizer(key_class, request_context): @@ -77,24 +87,24 @@ def _default_key_normalizer(key_class, request_context): """ # Since we mutate the dictionary, make a copy first context = request_context.copy() - context['scheme'] = context['scheme'].lower() - context['host'] = context['host'].lower() + context["scheme"] = context["scheme"].lower() + context["host"] = context["host"].lower() # These are both dictionaries and need to be transformed into frozensets - for key in ('headers', '_proxy_headers', '_socks_options'): + for key in ("headers", "_proxy_headers", "_socks_options"): if key in context and context[key] is not None: context[key] = frozenset(context[key].items()) # The socket_options key may be a list and needs to be transformed into a # tuple. - socket_opts = context.get('socket_options') + socket_opts = context.get("socket_options") if socket_opts is not None: - context['socket_options'] = tuple(socket_opts) + context["socket_options"] = tuple(socket_opts) # Map the kwargs to the names in the namedtuple - this is necessary since # namedtuples can't have fields starting with '_'. for key in list(context.keys()): - context['key_' + key] = context.pop(key) + context["key_" + key] = context.pop(key) # Default to ``None`` for keys missing from the context for field in key_class._fields: @@ -109,14 +119,11 @@ def _default_key_normalizer(key_class, request_context): #: Each PoolManager makes a copy of this dictionary so they can be configured #: globally here, or individually on the instance. key_fn_by_scheme = { - 'http': functools.partial(_default_key_normalizer, PoolKey), - 'https': functools.partial(_default_key_normalizer, PoolKey), + "http": functools.partial(_default_key_normalizer, PoolKey), + "https": functools.partial(_default_key_normalizer, PoolKey), } -pool_classes_by_scheme = { - 'http': HTTPConnectionPool, - 'https': HTTPSConnectionPool, -} +pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool} class PoolManager(RequestMethods): @@ -152,8 +159,7 @@ class PoolManager(RequestMethods): def __init__(self, num_pools=10, headers=None, **connection_pool_kw): RequestMethods.__init__(self, headers) self.connection_pool_kw = connection_pool_kw - self.pools = RecentlyUsedContainer(num_pools, - dispose_func=lambda p: p.close()) + self.pools = RecentlyUsedContainer(num_pools, dispose_func=lambda p: p.close()) # Locally set the pool classes and keys so other PoolManagers can # override them. @@ -186,10 +192,10 @@ def _new_pool(self, scheme, host, port, request_context=None): # this function has historically only used the scheme, host, and port # in the positional args. When an API change is acceptable these can # be removed. - for key in ('scheme', 'host', 'port'): + for key in ("scheme", "host", "port"): request_context.pop(key, None) - if scheme == 'http': + if scheme == "http": for kw in SSL_KEYWORDS: request_context.pop(kw, None) @@ -204,7 +210,7 @@ def clear(self): """ self.pools.clear() - def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None): + def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None): """ Get a :class:`ConnectionPool` based on the host, port, and scheme. @@ -219,11 +225,11 @@ def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None) raise LocationValueError("No host specified.") request_context = self._merge_pool_kwargs(pool_kwargs) - request_context['scheme'] = scheme or 'http' + request_context["scheme"] = scheme or "http" if not port: - port = port_by_scheme.get(request_context['scheme'].lower(), 80) - request_context['port'] = port - request_context['host'] = host + port = port_by_scheme.get(request_context["scheme"].lower(), 80) + request_context["port"] = port + request_context["host"] = host return self.connection_from_context(request_context) @@ -234,7 +240,7 @@ def connection_from_context(self, request_context): ``request_context`` must at least contain the ``scheme`` key and its value must be a key in ``key_fn_by_scheme`` instance variable. """ - scheme = request_context['scheme'].lower() + scheme = request_context["scheme"].lower() pool_key_constructor = self.key_fn_by_scheme[scheme] pool_key = pool_key_constructor(request_context) @@ -256,9 +262,9 @@ def connection_from_pool_key(self, pool_key, request_context=None): return pool # Make a fresh ConnectionPool of the desired type - scheme = request_context['scheme'] - host = request_context['host'] - port = request_context['port'] + scheme = request_context["scheme"] + host = request_context["host"] + port = request_context["port"] pool = self._new_pool(scheme, host, port, request_context=request_context) self.pools[pool_key] = pool @@ -276,8 +282,9 @@ def connection_from_url(self, url, pool_kwargs=None): not used. """ u = parse_url(url) - return self.connection_from_host(u.host, port=u.port, scheme=u.scheme, - pool_kwargs=pool_kwargs) + return self.connection_from_host( + u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs + ) def _merge_pool_kwargs(self, override): """ @@ -311,11 +318,11 @@ def urlopen(self, method, url, redirect=True, **kw): u = parse_url(url) conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) - kw['assert_same_host'] = False - kw['redirect'] = False + kw["assert_same_host"] = False + kw["redirect"] = False - if 'headers' not in kw: - kw['headers'] = self.headers.copy() + if "headers" not in kw: + kw["headers"] = self.headers.copy() if self.proxy is not None and u.scheme == "http": response = conn.urlopen(method, url, **kw) @@ -331,19 +338,22 @@ def urlopen(self, method, url, redirect=True, **kw): # RFC 7231, Section 6.4.4 if response.status == 303: - method = 'GET' + method = "GET" - retries = kw.get('retries') + retries = kw.get("retries") if not isinstance(retries, Retry): retries = Retry.from_int(retries, redirect=redirect) # Strip headers marked as unsafe to forward to the redirected location. # Check remove_headers_on_redirect to avoid a potential network call within # conn.is_same_host() which may use socket.gethostbyname() in the future. - if (retries.remove_headers_on_redirect - and not conn.is_same_host(redirect_location)): - for header in retries.remove_headers_on_redirect: - kw['headers'].pop(header, None) + if retries.remove_headers_on_redirect and not conn.is_same_host( + redirect_location + ): + headers = list(six.iterkeys(kw["headers"])) + for header in headers: + if header.lower() in retries.remove_headers_on_redirect: + kw["headers"].pop(header, None) try: retries = retries.increment(method, url, response=response, _pool=conn) @@ -352,8 +362,8 @@ def urlopen(self, method, url, redirect=True, **kw): raise return response - kw['retries'] = retries - kw['redirect'] = redirect + kw["retries"] = retries + kw["redirect"] = redirect log.info("Redirecting %s -> %s", url, redirect_location) return self.urlopen(method, redirect_location, **kw) @@ -386,12 +396,21 @@ class ProxyManager(PoolManager): """ - def __init__(self, proxy_url, num_pools=10, headers=None, - proxy_headers=None, **connection_pool_kw): + def __init__( + self, + proxy_url, + num_pools=10, + headers=None, + proxy_headers=None, + **connection_pool_kw + ): if isinstance(proxy_url, HTTPConnectionPool): - proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host, - proxy_url.port) + proxy_url = "%s://%s:%i" % ( + proxy_url.scheme, + proxy_url.host, + proxy_url.port, + ) proxy = parse_url(proxy_url) if not proxy.port: port = port_by_scheme.get(proxy.scheme, 80) @@ -403,30 +422,31 @@ def __init__(self, proxy_url, num_pools=10, headers=None, self.proxy = proxy self.proxy_headers = proxy_headers or {} - connection_pool_kw['_proxy'] = self.proxy - connection_pool_kw['_proxy_headers'] = self.proxy_headers + connection_pool_kw["_proxy"] = self.proxy + connection_pool_kw["_proxy_headers"] = self.proxy_headers - super(ProxyManager, self).__init__( - num_pools, headers, **connection_pool_kw) + super(ProxyManager, self).__init__(num_pools, headers, **connection_pool_kw) - def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None): + def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None): if scheme == "https": return super(ProxyManager, self).connection_from_host( - host, port, scheme, pool_kwargs=pool_kwargs) + host, port, scheme, pool_kwargs=pool_kwargs + ) return super(ProxyManager, self).connection_from_host( - self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs) + self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs + ) def _set_proxy_headers(self, url, headers=None): """ Sets headers needed by proxies: specifically, the Accept and Host headers. Only sets headers not provided by the user. """ - headers_ = {'Accept': '*/*'} + headers_ = {"Accept": "*/*"} netloc = parse_url(url).netloc if netloc: - headers_['Host'] = netloc + headers_["Host"] = netloc if headers: headers_.update(headers) @@ -440,8 +460,8 @@ def urlopen(self, method, url, redirect=True, **kw): # For proxied HTTPS requests, httplib sets the necessary headers # on the CONNECT to the proxy. For HTTP, we'll definitely # need to set 'Host' at the very least. - headers = kw.get('headers', self.headers) - kw['headers'] = self._set_proxy_headers(url, headers) + headers = kw.get("headers", self.headers) + kw["headers"] = self._set_proxy_headers(url, headers) return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw) diff --git a/pipenv/patched/notpip/_vendor/urllib3/request.py b/pipenv/patched/notpip/_vendor/urllib3/request.py index 8f2f44bb21..55f160bbf1 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/request.py +++ b/pipenv/patched/notpip/_vendor/urllib3/request.py @@ -4,7 +4,7 @@ from .packages.six.moves.urllib.parse import urlencode -__all__ = ['RequestMethods'] +__all__ = ["RequestMethods"] class RequestMethods(object): @@ -36,16 +36,25 @@ class RequestMethods(object): explicitly. """ - _encode_url_methods = {'DELETE', 'GET', 'HEAD', 'OPTIONS'} + _encode_url_methods = {"DELETE", "GET", "HEAD", "OPTIONS"} def __init__(self, headers=None): self.headers = headers or {} - def urlopen(self, method, url, body=None, headers=None, - encode_multipart=True, multipart_boundary=None, - **kw): # Abstract - raise NotImplementedError("Classes extending RequestMethods must implement " - "their own ``urlopen`` method.") + def urlopen( + self, + method, + url, + body=None, + headers=None, + encode_multipart=True, + multipart_boundary=None, + **kw + ): # Abstract + raise NotImplementedError( + "Classes extending RequestMethods must implement " + "their own ``urlopen`` method." + ) def request(self, method, url, fields=None, headers=None, **urlopen_kw): """ @@ -60,19 +69,18 @@ def request(self, method, url, fields=None, headers=None, **urlopen_kw): """ method = method.upper() - urlopen_kw['request_url'] = url + urlopen_kw["request_url"] = url if method in self._encode_url_methods: - return self.request_encode_url(method, url, fields=fields, - headers=headers, - **urlopen_kw) + return self.request_encode_url( + method, url, fields=fields, headers=headers, **urlopen_kw + ) else: - return self.request_encode_body(method, url, fields=fields, - headers=headers, - **urlopen_kw) + return self.request_encode_body( + method, url, fields=fields, headers=headers, **urlopen_kw + ) - def request_encode_url(self, method, url, fields=None, headers=None, - **urlopen_kw): + def request_encode_url(self, method, url, fields=None, headers=None, **urlopen_kw): """ Make a request using :meth:`urlopen` with the ``fields`` encoded in the url. This is useful for request methods like GET, HEAD, DELETE, etc. @@ -80,17 +88,24 @@ def request_encode_url(self, method, url, fields=None, headers=None, if headers is None: headers = self.headers - extra_kw = {'headers': headers} + extra_kw = {"headers": headers} extra_kw.update(urlopen_kw) if fields: - url += '?' + urlencode(fields) + url += "?" + urlencode(fields) return self.urlopen(method, url, **extra_kw) - def request_encode_body(self, method, url, fields=None, headers=None, - encode_multipart=True, multipart_boundary=None, - **urlopen_kw): + def request_encode_body( + self, + method, + url, + fields=None, + headers=None, + encode_multipart=True, + multipart_boundary=None, + **urlopen_kw + ): """ Make a request using :meth:`urlopen` with the ``fields`` encoded in the body. This is useful for request methods like POST, PUT, PATCH, etc. @@ -129,22 +144,28 @@ def request_encode_body(self, method, url, fields=None, headers=None, if headers is None: headers = self.headers - extra_kw = {'headers': {}} + extra_kw = {"headers": {}} if fields: - if 'body' in urlopen_kw: + if "body" in urlopen_kw: raise TypeError( - "request got values for both 'fields' and 'body', can only specify one.") + "request got values for both 'fields' and 'body', can only specify one." + ) if encode_multipart: - body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary) + body, content_type = encode_multipart_formdata( + fields, boundary=multipart_boundary + ) else: - body, content_type = urlencode(fields), 'application/x-www-form-urlencoded' + body, content_type = ( + urlencode(fields), + "application/x-www-form-urlencoded", + ) - extra_kw['body'] = body - extra_kw['headers'] = {'Content-Type': content_type} + extra_kw["body"] = body + extra_kw["headers"] = {"Content-Type": content_type} - extra_kw['headers'].update(headers) + extra_kw["headers"].update(headers) extra_kw.update(urlopen_kw) return self.urlopen(method, url, **extra_kw) diff --git a/pipenv/patched/notpip/_vendor/urllib3/response.py b/pipenv/patched/notpip/_vendor/urllib3/response.py index c112690b0a..adc321e713 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/response.py +++ b/pipenv/patched/notpip/_vendor/urllib3/response.py @@ -6,10 +6,20 @@ from socket import timeout as SocketTimeout from socket import error as SocketError +try: + import brotli +except ImportError: + brotli = None + from ._collections import HTTPHeaderDict from .exceptions import ( - BodyNotHttplibCompatible, ProtocolError, DecodeError, ReadTimeoutError, - ResponseNotChunked, IncompleteRead, InvalidHeader + BodyNotHttplibCompatible, + ProtocolError, + DecodeError, + ReadTimeoutError, + ResponseNotChunked, + IncompleteRead, + InvalidHeader, ) from .packages.six import string_types as basestring, PY3 from .packages.six.moves import http_client as httplib @@ -20,10 +30,9 @@ class DeflateDecoder(object): - def __init__(self): self._first_try = True - self._data = b'' + self._data = b"" self._obj = zlib.decompressobj() def __getattr__(self, name): @@ -60,7 +69,6 @@ class GzipDecoderState(object): class GzipDecoder(object): - def __init__(self): self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS) self._state = GzipDecoderState.FIRST_MEMBER @@ -90,6 +98,26 @@ def decompress(self, data): self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS) +if brotli is not None: + + class BrotliDecoder(object): + # Supports both 'brotlipy' and 'Brotli' packages + # since they share an import name. The top branches + # are for 'brotlipy' and bottom branches for 'Brotli' + def __init__(self): + self._obj = brotli.Decompressor() + + def decompress(self, data): + if hasattr(self._obj, "decompress"): + return self._obj.decompress(data) + return self._obj.process(data) + + def flush(self): + if hasattr(self._obj, "flush"): + return self._obj.flush() + return b"" + + class MultiDecoder(object): """ From RFC7231: @@ -100,7 +128,7 @@ class MultiDecoder(object): """ def __init__(self, modes): - self._decoders = [_get_decoder(m.strip()) for m in modes.split(',')] + self._decoders = [_get_decoder(m.strip()) for m in modes.split(",")] def flush(self): return self._decoders[0].flush() @@ -112,12 +140,15 @@ def decompress(self, data): def _get_decoder(mode): - if ',' in mode: + if "," in mode: return MultiDecoder(mode) - if mode == 'gzip': + if mode == "gzip": return GzipDecoder() + if brotli is not None and mode == "br": + return BrotliDecoder() + return DeflateDecoder() @@ -154,14 +185,31 @@ class is also compatible with the Python standard library's :mod:`io` value of Content-Length header, if present. Otherwise, raise error. """ - CONTENT_DECODERS = ['gzip', 'deflate'] + CONTENT_DECODERS = ["gzip", "deflate"] + if brotli is not None: + CONTENT_DECODERS += ["br"] REDIRECT_STATUSES = [301, 302, 303, 307, 308] - def __init__(self, body='', headers=None, status=0, version=0, reason=None, - strict=0, preload_content=True, decode_content=True, - original_response=None, pool=None, connection=None, msg=None, - retries=None, enforce_content_length=False, - request_method=None, request_url=None): + def __init__( + self, + body="", + headers=None, + status=0, + version=0, + reason=None, + strict=0, + preload_content=True, + decode_content=True, + original_response=None, + pool=None, + connection=None, + msg=None, + retries=None, + enforce_content_length=False, + request_method=None, + request_url=None, + auto_close=True, + ): if isinstance(headers, HTTPHeaderDict): self.headers = headers @@ -174,6 +222,7 @@ def __init__(self, body='', headers=None, status=0, version=0, reason=None, self.decode_content = decode_content self.retries = retries self.enforce_content_length = enforce_content_length + self.auto_close = auto_close self._decoder = None self._body = None @@ -189,13 +238,13 @@ def __init__(self, body='', headers=None, status=0, version=0, reason=None, self._pool = pool self._connection = connection - if hasattr(body, 'read'): + if hasattr(body, "read"): self._fp = body # Are we using the chunked-style of transfer encoding? self.chunked = False self.chunk_left = None - tr_enc = self.headers.get('transfer-encoding', '').lower() + tr_enc = self.headers.get("transfer-encoding", "").lower() # Don't incur the penalty of creating a list and then discarding it encodings = (enc.strip() for enc in tr_enc.split(",")) if "chunked" in encodings: @@ -217,7 +266,7 @@ def get_redirect_location(self): location. ``False`` if not a redirect status code. """ if self.status in self.REDIRECT_STATUSES: - return self.headers.get('location') + return self.headers.get("location") return False @@ -256,18 +305,20 @@ def _init_length(self, request_method): """ Set initial length value for Response content if available. """ - length = self.headers.get('content-length') + length = self.headers.get("content-length") if length is not None: if self.chunked: # This Response will fail with an IncompleteRead if it can't be # received as chunked. This method falls back to attempt reading # the response before raising an exception. - log.warning("Received response with both Content-Length and " - "Transfer-Encoding set. This is expressly forbidden " - "by RFC 7230 sec 3.3.2. Ignoring Content-Length and " - "attempting to process response as Transfer-Encoding: " - "chunked.") + log.warning( + "Received response with both Content-Length and " + "Transfer-Encoding set. This is expressly forbidden " + "by RFC 7230 sec 3.3.2. Ignoring Content-Length and " + "attempting to process response as Transfer-Encoding: " + "chunked." + ) return None try: @@ -276,10 +327,12 @@ def _init_length(self, request_method): # (e.g. Content-Length: 42, 42). This line ensures the values # are all valid ints and that as long as the `set` length is 1, # all values are the same. Otherwise, the header is invalid. - lengths = set([int(val) for val in length.split(',')]) + lengths = set([int(val) for val in length.split(",")]) if len(lengths) > 1: - raise InvalidHeader("Content-Length contained multiple " - "unmatching values (%s)" % length) + raise InvalidHeader( + "Content-Length contained multiple " + "unmatching values (%s)" % length + ) length = lengths.pop() except ValueError: length = None @@ -295,7 +348,7 @@ def _init_length(self, request_method): status = 0 # Check for responses that shouldn't include a body - if status in (204, 304) or 100 <= status < 200 or request_method == 'HEAD': + if status in (204, 304) or 100 <= status < 200 or request_method == "HEAD": length = 0 return length @@ -306,29 +359,41 @@ def _init_decoder(self): """ # Note: content-encoding value should be case-insensitive, per RFC 7230 # Section 3.2 - content_encoding = self.headers.get('content-encoding', '').lower() + content_encoding = self.headers.get("content-encoding", "").lower() if self._decoder is None: if content_encoding in self.CONTENT_DECODERS: self._decoder = _get_decoder(content_encoding) - elif ',' in content_encoding: - encodings = [e.strip() for e in content_encoding.split(',') if e.strip() in self.CONTENT_DECODERS] + elif "," in content_encoding: + encodings = [ + e.strip() + for e in content_encoding.split(",") + if e.strip() in self.CONTENT_DECODERS + ] if len(encodings): self._decoder = _get_decoder(content_encoding) + DECODER_ERROR_CLASSES = (IOError, zlib.error) + if brotli is not None: + DECODER_ERROR_CLASSES += (brotli.error,) + def _decode(self, data, decode_content, flush_decoder): """ Decode the data passed in and potentially flush the decoder. """ + if not decode_content: + return data + try: - if decode_content and self._decoder: + if self._decoder: data = self._decoder.decompress(data) - except (IOError, zlib.error) as e: - content_encoding = self.headers.get('content-encoding', '').lower() + except self.DECODER_ERROR_CLASSES as e: + content_encoding = self.headers.get("content-encoding", "").lower() raise DecodeError( "Received response with content-encoding: %s, but " - "failed to decode it." % content_encoding, e) - - if flush_decoder and decode_content: + "failed to decode it." % content_encoding, + e, + ) + if flush_decoder: data += self._flush_decoder() return data @@ -339,10 +404,10 @@ def _flush_decoder(self): being used. """ if self._decoder: - buf = self._decoder.decompress(b'') + buf = self._decoder.decompress(b"") return buf + self._decoder.flush() - return b'' + return b"" @contextmanager def _error_catcher(self): @@ -362,20 +427,20 @@ def _error_catcher(self): except SocketTimeout: # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but # there is yet no clean way to get at it from this context. - raise ReadTimeoutError(self._pool, None, 'Read timed out.') + raise ReadTimeoutError(self._pool, None, "Read timed out.") except BaseSSLError as e: # FIXME: Is there a better way to differentiate between SSLErrors? - if 'read operation timed out' not in str(e): # Defensive: + if "read operation timed out" not in str(e): # Defensive: # This shouldn't happen but just in case we're missing an edge # case, let's avoid swallowing SSL errors. raise - raise ReadTimeoutError(self._pool, None, 'Read timed out.') + raise ReadTimeoutError(self._pool, None, "Read timed out.") except (HTTPException, SocketError) as e: # This includes IncompleteRead. - raise ProtocolError('Connection broken: %r' % e, e) + raise ProtocolError("Connection broken: %r" % e, e) # If no exception is thrown, we should avoid cleaning up # unnecessarily. @@ -430,17 +495,19 @@ def read(self, amt=None, decode_content=None, cache_content=False): return flush_decoder = False - data = None + fp_closed = getattr(self._fp, "closed", False) with self._error_catcher(): if amt is None: # cStringIO doesn't like amt=None - data = self._fp.read() + data = self._fp.read() if not fp_closed else b"" flush_decoder = True else: cache_content = False - data = self._fp.read(amt) - if amt != 0 and not data: # Platform-specific: Buggy versions of Python. + data = self._fp.read(amt) if not fp_closed else b"" + if ( + amt != 0 and not data + ): # Platform-specific: Buggy versions of Python. # Close the connection when no data is returned # # This is redundant to what httplib/http.client _should_ @@ -450,7 +517,10 @@ def read(self, amt=None, decode_content=None, cache_content=False): # no harm in redundantly calling close. self._fp.close() flush_decoder = True - if self.enforce_content_length and self.length_remaining not in (0, None): + if self.enforce_content_length and self.length_remaining not in ( + 0, + None, + ): # This is an edge case that httplib failed to cover due # to concerns of backward compatibility. We're # addressing it here to make sure IncompleteRead is @@ -470,7 +540,7 @@ def read(self, amt=None, decode_content=None, cache_content=False): return data - def stream(self, amt=2**16, decode_content=None): + def stream(self, amt=2 ** 16, decode_content=None): """ A generator wrapper for the read() method. A call will block until ``amt`` bytes have been read from the connection or until the @@ -508,21 +578,24 @@ def from_httplib(ResponseCls, r, **response_kw): headers = r.msg if not isinstance(headers, HTTPHeaderDict): - if PY3: # Python 3 + if PY3: headers = HTTPHeaderDict(headers.items()) - else: # Python 2 + else: + # Python 2.7 headers = HTTPHeaderDict.from_httplib(headers) # HTTPResponse objects in Python 3 don't have a .strict attribute - strict = getattr(r, 'strict', 0) - resp = ResponseCls(body=r, - headers=headers, - status=r.status, - version=r.version, - reason=r.reason, - strict=strict, - original_response=r, - **response_kw) + strict = getattr(r, "strict", 0) + resp = ResponseCls( + body=r, + headers=headers, + status=r.status, + version=r.version, + reason=r.reason, + strict=strict, + original_response=r, + **response_kw + ) return resp # Backwards-compatibility methods for httplib.HTTPResponse @@ -544,13 +617,18 @@ def close(self): if self._connection: self._connection.close() + if not self.auto_close: + io.IOBase.close(self) + @property def closed(self): - if self._fp is None: + if not self.auto_close: + return io.IOBase.closed.__get__(self) + elif self._fp is None: return True - elif hasattr(self._fp, 'isclosed'): + elif hasattr(self._fp, "isclosed"): return self._fp.isclosed() - elif hasattr(self._fp, 'closed'): + elif hasattr(self._fp, "closed"): return self._fp.closed else: return True @@ -561,11 +639,17 @@ def fileno(self): elif hasattr(self._fp, "fileno"): return self._fp.fileno() else: - raise IOError("The file-like object this HTTPResponse is wrapped " - "around has no file descriptor") + raise IOError( + "The file-like object this HTTPResponse is wrapped " + "around has no file descriptor" + ) def flush(self): - if self._fp is not None and hasattr(self._fp, 'flush'): + if ( + self._fp is not None + and hasattr(self._fp, "flush") + and not getattr(self._fp, "closed", False) + ): return self._fp.flush() def readable(self): @@ -578,7 +662,7 @@ def readinto(self, b): if len(temp) == 0: return 0 else: - b[:len(temp)] = temp + b[: len(temp)] = temp return len(temp) def supports_chunked_reads(self): @@ -588,7 +672,7 @@ def supports_chunked_reads(self): attribute. If it is present we assume it returns raw chunks as processed by read_chunked(). """ - return hasattr(self._fp, 'fp') + return hasattr(self._fp, "fp") def _update_chunk_length(self): # First, we'll figure out length of a chunk and then @@ -596,7 +680,7 @@ def _update_chunk_length(self): if self.chunk_left is not None: return line = self._fp.fp.readline() - line = line.split(b';', 1)[0] + line = line.split(b";", 1)[0] try: self.chunk_left = int(line, 16) except ValueError: @@ -645,11 +729,13 @@ def read_chunked(self, amt=None, decode_content=None): if not self.chunked: raise ResponseNotChunked( "Response is not chunked. " - "Header 'transfer-encoding: chunked' is missing.") + "Header 'transfer-encoding: chunked' is missing." + ) if not self.supports_chunked_reads(): raise BodyNotHttplibCompatible( "Body should be httplib.HTTPResponse like. " - "It should have have an fp attribute which returns raw chunks.") + "It should have have an fp attribute which returns raw chunks." + ) with self._error_catcher(): # Don't bother reading the body of a HEAD request. @@ -667,8 +753,9 @@ def read_chunked(self, amt=None, decode_content=None): if self.chunk_left == 0: break chunk = self._handle_chunk(amt) - decoded = self._decode(chunk, decode_content=decode_content, - flush_decoder=False) + decoded = self._decode( + chunk, decode_content=decode_content, flush_decoder=False + ) if decoded: yield decoded @@ -686,7 +773,7 @@ def read_chunked(self, amt=None, decode_content=None): if not line: # Some sites may not end with '\r\n'. break - if line == b'\r\n': + if line == b"\r\n": break # We read everything; close the "file". @@ -703,3 +790,20 @@ def geturl(self): return self.retries.history[-1].redirect_location else: return self._request_url + + def __iter__(self): + buffer = [b""] + for chunk in self.stream(decode_content=True): + if b"\n" in chunk: + chunk = chunk.split(b"\n") + yield b"".join(buffer) + chunk[0] + b"\n" + for x in chunk[1:-1]: + yield x + b"\n" + if chunk[-1]: + buffer = [chunk[-1]] + else: + buffer = [] + else: + buffer.append(chunk) + if buffer: + yield b"".join(buffer) diff --git a/pipenv/patched/notpip/_vendor/urllib3/util/__init__.py b/pipenv/patched/notpip/_vendor/urllib3/util/__init__.py index 2f2770b622..a96c73a9d8 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/util/__init__.py +++ b/pipenv/patched/notpip/_vendor/urllib3/util/__init__.py @@ -1,4 +1,5 @@ from __future__ import absolute_import + # For backwards compatibility, provide imports that used to be here. from .connection import is_connection_dropped from .request import make_headers @@ -12,43 +13,34 @@ resolve_cert_reqs, resolve_ssl_version, ssl_wrap_socket, + PROTOCOL_TLS, ) -from .timeout import ( - current_time, - Timeout, -) +from .timeout import current_time, Timeout from .retry import Retry -from .url import ( - get_host, - parse_url, - split_first, - Url, -) -from .wait import ( - wait_for_read, - wait_for_write -) +from .url import get_host, parse_url, split_first, Url +from .wait import wait_for_read, wait_for_write __all__ = ( - 'HAS_SNI', - 'IS_PYOPENSSL', - 'IS_SECURETRANSPORT', - 'SSLContext', - 'Retry', - 'Timeout', - 'Url', - 'assert_fingerprint', - 'current_time', - 'is_connection_dropped', - 'is_fp_closed', - 'get_host', - 'parse_url', - 'make_headers', - 'resolve_cert_reqs', - 'resolve_ssl_version', - 'split_first', - 'ssl_wrap_socket', - 'wait_for_read', - 'wait_for_write' + "HAS_SNI", + "IS_PYOPENSSL", + "IS_SECURETRANSPORT", + "SSLContext", + "PROTOCOL_TLS", + "Retry", + "Timeout", + "Url", + "assert_fingerprint", + "current_time", + "is_connection_dropped", + "is_fp_closed", + "get_host", + "parse_url", + "make_headers", + "resolve_cert_reqs", + "resolve_ssl_version", + "split_first", + "ssl_wrap_socket", + "wait_for_read", + "wait_for_write", ) diff --git a/pipenv/patched/notpip/_vendor/urllib3/util/connection.py b/pipenv/patched/notpip/_vendor/urllib3/util/connection.py index 5ad70b2f1c..0e1112628e 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/util/connection.py +++ b/pipenv/patched/notpip/_vendor/urllib3/util/connection.py @@ -14,7 +14,7 @@ def is_connection_dropped(conn): # Platform-specific Note: For platforms like AppEngine, this will always return ``False`` to let the platform handle connection recycling transparently for us. """ - sock = getattr(conn, 'sock', False) + sock = getattr(conn, "sock", False) if sock is False: # Platform-specific: AppEngine return False if sock is None: # Connection already closed (such as by httplib). @@ -30,8 +30,12 @@ def is_connection_dropped(conn): # Platform-specific # library test suite. Added to its signature is only `socket_options`. # One additional modification is that we avoid binding to IPv6 servers # discovered in DNS if the system doesn't have IPv6 functionality. -def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, - source_address=None, socket_options=None): +def create_connection( + address, + timeout=socket._GLOBAL_DEFAULT_TIMEOUT, + source_address=None, + socket_options=None, +): """Connect to *address* and return the socket object. Convenience function. Connect to *address* (a 2-tuple ``(host, @@ -45,8 +49,8 @@ def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, """ host, port = address - if host.startswith('['): - host = host.strip('[]') + if host.startswith("["): + host = host.strip("[]") err = None # Using the value from allowed_gai_family() in the context of getaddrinfo lets @@ -131,4 +135,4 @@ def _has_ipv6(host): return has_ipv6 -HAS_IPV6 = _has_ipv6('::1') +HAS_IPV6 = _has_ipv6("::1") diff --git a/pipenv/patched/notpip/_vendor/urllib3/util/request.py b/pipenv/patched/notpip/_vendor/urllib3/util/request.py index 3ddfcd5594..262a6d6185 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/util/request.py +++ b/pipenv/patched/notpip/_vendor/urllib3/util/request.py @@ -4,12 +4,25 @@ from ..packages.six import b, integer_types from ..exceptions import UnrewindableBodyError -ACCEPT_ENCODING = 'gzip,deflate' +ACCEPT_ENCODING = "gzip,deflate" +try: + import brotli as _unused_module_brotli # noqa: F401 +except ImportError: + pass +else: + ACCEPT_ENCODING += ",br" + _FAILEDTELL = object() -def make_headers(keep_alive=None, accept_encoding=None, user_agent=None, - basic_auth=None, proxy_basic_auth=None, disable_cache=None): +def make_headers( + keep_alive=None, + accept_encoding=None, + user_agent=None, + basic_auth=None, + proxy_basic_auth=None, + disable_cache=None, +): """ Shortcuts for generating request headers. @@ -49,27 +62,27 @@ def make_headers(keep_alive=None, accept_encoding=None, user_agent=None, if isinstance(accept_encoding, str): pass elif isinstance(accept_encoding, list): - accept_encoding = ','.join(accept_encoding) + accept_encoding = ",".join(accept_encoding) else: accept_encoding = ACCEPT_ENCODING - headers['accept-encoding'] = accept_encoding + headers["accept-encoding"] = accept_encoding if user_agent: - headers['user-agent'] = user_agent + headers["user-agent"] = user_agent if keep_alive: - headers['connection'] = 'keep-alive' + headers["connection"] = "keep-alive" if basic_auth: - headers['authorization'] = 'Basic ' + \ - b64encode(b(basic_auth)).decode('utf-8') + headers["authorization"] = "Basic " + b64encode(b(basic_auth)).decode("utf-8") if proxy_basic_auth: - headers['proxy-authorization'] = 'Basic ' + \ - b64encode(b(proxy_basic_auth)).decode('utf-8') + headers["proxy-authorization"] = "Basic " + b64encode( + b(proxy_basic_auth) + ).decode("utf-8") if disable_cache: - headers['cache-control'] = 'no-cache' + headers["cache-control"] = "no-cache" return headers @@ -81,7 +94,7 @@ def set_file_position(body, pos): """ if pos is not None: rewind_body(body, pos) - elif getattr(body, 'tell', None) is not None: + elif getattr(body, "tell", None) is not None: try: pos = body.tell() except (IOError, OSError): @@ -103,16 +116,20 @@ def rewind_body(body, body_pos): :param int pos: Position to seek to in file. """ - body_seek = getattr(body, 'seek', None) + body_seek = getattr(body, "seek", None) if body_seek is not None and isinstance(body_pos, integer_types): try: body_seek(body_pos) except (IOError, OSError): - raise UnrewindableBodyError("An error occurred when rewinding request " - "body for redirect/retry.") + raise UnrewindableBodyError( + "An error occurred when rewinding request " "body for redirect/retry." + ) elif body_pos is _FAILEDTELL: - raise UnrewindableBodyError("Unable to record file position for rewinding " - "request body during a redirect/retry.") + raise UnrewindableBodyError( + "Unable to record file position for rewinding " + "request body during a redirect/retry." + ) else: - raise ValueError("body_pos must be of type integer, " - "instead it was %s." % type(body_pos)) + raise ValueError( + "body_pos must be of type integer, " "instead it was %s." % type(body_pos) + ) diff --git a/pipenv/patched/notpip/_vendor/urllib3/util/response.py b/pipenv/patched/notpip/_vendor/urllib3/util/response.py index 3d5486485a..715868dd10 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/util/response.py +++ b/pipenv/patched/notpip/_vendor/urllib3/util/response.py @@ -52,11 +52,10 @@ def assert_header_parsing(headers): # This will fail silently if we pass in the wrong kind of parameter. # To make debugging easier add an explicit check. if not isinstance(headers, httplib.HTTPMessage): - raise TypeError('expected httplib.Message, got {0}.'.format( - type(headers))) + raise TypeError("expected httplib.Message, got {0}.".format(type(headers))) - defects = getattr(headers, 'defects', None) - get_payload = getattr(headers, 'get_payload', None) + defects = getattr(headers, "defects", None) + get_payload = getattr(headers, "get_payload", None) unparsed_data = None if get_payload: @@ -84,4 +83,4 @@ def is_response_to_head(response): method = response._method if isinstance(method, int): # Platform-specific: Appengine return method == 3 - return method.upper() == 'HEAD' + return method.upper() == "HEAD" diff --git a/pipenv/patched/notpip/_vendor/urllib3/util/retry.py b/pipenv/patched/notpip/_vendor/urllib3/util/retry.py index e7d0abd610..5a049fe65e 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/util/retry.py +++ b/pipenv/patched/notpip/_vendor/urllib3/util/retry.py @@ -21,8 +21,9 @@ # Data structure for representing the metadata of requests that result in a retry. -RequestHistory = namedtuple('RequestHistory', ["method", "url", "error", - "status", "redirect_location"]) +RequestHistory = namedtuple( + "RequestHistory", ["method", "url", "error", "status", "redirect_location"] +) class Retry(object): @@ -146,21 +147,33 @@ class Retry(object): request. """ - DEFAULT_METHOD_WHITELIST = frozenset([ - 'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE']) + DEFAULT_METHOD_WHITELIST = frozenset( + ["HEAD", "GET", "PUT", "DELETE", "OPTIONS", "TRACE"] + ) RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503]) - DEFAULT_REDIRECT_HEADERS_BLACKLIST = frozenset(['Authorization']) + DEFAULT_REDIRECT_HEADERS_BLACKLIST = frozenset(["Authorization"]) #: Maximum backoff time. BACKOFF_MAX = 120 - def __init__(self, total=10, connect=None, read=None, redirect=None, status=None, - method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None, - backoff_factor=0, raise_on_redirect=True, raise_on_status=True, - history=None, respect_retry_after_header=True, - remove_headers_on_redirect=DEFAULT_REDIRECT_HEADERS_BLACKLIST): + def __init__( + self, + total=10, + connect=None, + read=None, + redirect=None, + status=None, + method_whitelist=DEFAULT_METHOD_WHITELIST, + status_forcelist=None, + backoff_factor=0, + raise_on_redirect=True, + raise_on_status=True, + history=None, + respect_retry_after_header=True, + remove_headers_on_redirect=DEFAULT_REDIRECT_HEADERS_BLACKLIST, + ): self.total = total self.connect = connect @@ -179,19 +192,25 @@ def __init__(self, total=10, connect=None, read=None, redirect=None, status=None self.raise_on_status = raise_on_status self.history = history or tuple() self.respect_retry_after_header = respect_retry_after_header - self.remove_headers_on_redirect = remove_headers_on_redirect + self.remove_headers_on_redirect = frozenset( + [h.lower() for h in remove_headers_on_redirect] + ) def new(self, **kw): params = dict( total=self.total, - connect=self.connect, read=self.read, redirect=self.redirect, status=self.status, + connect=self.connect, + read=self.read, + redirect=self.redirect, + status=self.status, method_whitelist=self.method_whitelist, status_forcelist=self.status_forcelist, backoff_factor=self.backoff_factor, raise_on_redirect=self.raise_on_redirect, raise_on_status=self.raise_on_status, history=self.history, - remove_headers_on_redirect=self.remove_headers_on_redirect + remove_headers_on_redirect=self.remove_headers_on_redirect, + respect_retry_after_header=self.respect_retry_after_header, ) params.update(kw) return type(self)(**params) @@ -216,8 +235,11 @@ def get_backoff_time(self): :rtype: float """ # We want to consider only the last consecutive errors sequence (Ignore redirects). - consecutive_errors_len = len(list(takewhile(lambda x: x.redirect_location is None, - reversed(self.history)))) + consecutive_errors_len = len( + list( + takewhile(lambda x: x.redirect_location is None, reversed(self.history)) + ) + ) if consecutive_errors_len <= 1: return 0 @@ -273,7 +295,7 @@ def sleep(self, response=None): this method will return immediately. """ - if response: + if self.respect_retry_after_header and response: slept = self.sleep_for_retry(response) if slept: return @@ -314,8 +336,12 @@ def is_retry(self, method, status_code, has_retry_after=False): if self.status_forcelist and status_code in self.status_forcelist: return True - return (self.total and self.respect_retry_after_header and - has_retry_after and (status_code in self.RETRY_AFTER_STATUS_CODES)) + return ( + self.total + and self.respect_retry_after_header + and has_retry_after + and (status_code in self.RETRY_AFTER_STATUS_CODES) + ) def is_exhausted(self): """ Are we out of retries? """ @@ -326,8 +352,15 @@ def is_exhausted(self): return min(retry_counts) < 0 - def increment(self, method=None, url=None, response=None, error=None, - _pool=None, _stacktrace=None): + def increment( + self, + method=None, + url=None, + response=None, + error=None, + _pool=None, + _stacktrace=None, + ): """ Return a new Retry object with incremented retry counters. :param response: A response object, or None, if the server did not @@ -350,7 +383,7 @@ def increment(self, method=None, url=None, response=None, error=None, read = self.read redirect = self.redirect status_count = self.status - cause = 'unknown' + cause = "unknown" status = None redirect_location = None @@ -372,7 +405,7 @@ def increment(self, method=None, url=None, response=None, error=None, # Redirect retry? if redirect is not None: redirect -= 1 - cause = 'too many redirects' + cause = "too many redirects" redirect_location = response.get_redirect_location() status = response.status @@ -383,16 +416,21 @@ def increment(self, method=None, url=None, response=None, error=None, if response and response.status: if status_count is not None: status_count -= 1 - cause = ResponseError.SPECIFIC_ERROR.format( - status_code=response.status) + cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status) status = response.status - history = self.history + (RequestHistory(method, url, error, status, redirect_location),) + history = self.history + ( + RequestHistory(method, url, error, status, redirect_location), + ) new_retry = self.new( total=total, - connect=connect, read=read, redirect=redirect, status=status_count, - history=history) + connect=connect, + read=read, + redirect=redirect, + status=status_count, + history=history, + ) if new_retry.is_exhausted(): raise MaxRetryError(_pool, url, error or ResponseError(cause)) @@ -402,9 +440,10 @@ def increment(self, method=None, url=None, response=None, error=None, return new_retry def __repr__(self): - return ('{cls.__name__}(total={self.total}, connect={self.connect}, ' - 'read={self.read}, redirect={self.redirect}, status={self.status})').format( - cls=type(self), self=self) + return ( + "{cls.__name__}(total={self.total}, connect={self.connect}, " + "read={self.read}, redirect={self.redirect}, status={self.status})" + ).format(cls=type(self), self=self) # For backwards compatibility (equivalent to pre-v1.9): diff --git a/pipenv/patched/notpip/_vendor/urllib3/util/ssl_.py b/pipenv/patched/notpip/_vendor/urllib3/util/ssl_.py index b16d6523dd..8f1abfe47f 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/util/ssl_.py +++ b/pipenv/patched/notpip/_vendor/urllib3/util/ssl_.py @@ -2,11 +2,12 @@ import errno import warnings import hmac -import socket +import sys from binascii import hexlify, unhexlify from hashlib import md5, sha1, sha256 +from .url import IPV4_RE, BRACELESS_IPV6_ADDRZ_RE from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning from ..packages import six @@ -17,11 +18,7 @@ IS_SECURETRANSPORT = False # Maps the length of a digest to a possible hash function producing this digest -HASHFUNC_MAP = { - 32: md5, - 40: sha1, - 64: sha256, -} +HASHFUNC_MAP = {32: md5, 40: sha1, 64: sha256} def _const_compare_digest_backport(a, b): @@ -37,17 +34,27 @@ def _const_compare_digest_backport(a, b): return result == 0 -_const_compare_digest = getattr(hmac, 'compare_digest', - _const_compare_digest_backport) - +_const_compare_digest = getattr(hmac, "compare_digest", _const_compare_digest_backport) try: # Test for SSL features import ssl - from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23 + from ssl import wrap_socket, CERT_REQUIRED from ssl import HAS_SNI # Has SNI? except ImportError: pass +try: # Platform-specific: Python 3.6 + from ssl import PROTOCOL_TLS + + PROTOCOL_SSLv23 = PROTOCOL_TLS +except ImportError: + try: + from ssl import PROTOCOL_SSLv23 as PROTOCOL_TLS + + PROTOCOL_SSLv23 = PROTOCOL_TLS + except ImportError: + PROTOCOL_SSLv23 = PROTOCOL_TLS = 2 + try: from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION @@ -56,25 +63,6 @@ def _const_compare_digest_backport(a, b): OP_NO_COMPRESSION = 0x20000 -# Python 2.7 doesn't have inet_pton on non-Linux so we fallback on inet_aton in -# those cases. This means that we can only detect IPv4 addresses in this case. -if hasattr(socket, 'inet_pton'): - inet_pton = socket.inet_pton -else: - # Maybe we can use ipaddress if the user has urllib3[secure]? - try: - from pipenv.patched.notpip._vendor import ipaddress - - def inet_pton(_, host): - if isinstance(host, bytes): - host = host.decode('ascii') - return ipaddress.ip_address(host) - - except ImportError: # Platform-specific: Non-Linux - def inet_pton(_, host): - return socket.inet_aton(host) - - # A secure default. # Sources for more information on TLS ciphers: # @@ -83,36 +71,37 @@ def inet_pton(_, host): # - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/ # # The general intent is: -# - Prefer TLS 1.3 cipher suites # - prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE), # - prefer ECDHE over DHE for better performance, # - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and # security, # - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common, -# - disable NULL authentication, MD5 MACs and DSS for security reasons. -DEFAULT_CIPHERS = ':'.join([ - 'TLS13-AES-256-GCM-SHA384', - 'TLS13-CHACHA20-POLY1305-SHA256', - 'TLS13-AES-128-GCM-SHA256', - 'ECDH+AESGCM', - 'ECDH+CHACHA20', - 'DH+AESGCM', - 'DH+CHACHA20', - 'ECDH+AES256', - 'DH+AES256', - 'ECDH+AES128', - 'DH+AES', - 'RSA+AESGCM', - 'RSA+AES', - '!aNULL', - '!eNULL', - '!MD5', -]) +# - disable NULL authentication, MD5 MACs, DSS, and other +# insecure ciphers for security reasons. +# - NOTE: TLS 1.3 cipher suites are managed through a different interface +# not exposed by CPython (yet!) and are enabled by default if they're available. +DEFAULT_CIPHERS = ":".join( + [ + "ECDHE+AESGCM", + "ECDHE+CHACHA20", + "DHE+AESGCM", + "DHE+CHACHA20", + "ECDH+AESGCM", + "DH+AESGCM", + "ECDH+AES", + "DH+AES", + "RSA+AESGCM", + "RSA+AES", + "!aNULL", + "!eNULL", + "!MD5", + "!DSS", + ] +) try: from ssl import SSLContext # Modern SSL? except ImportError: - import sys class SSLContext(object): # Platform-specific: Python 2 def __init__(self, protocol_version): @@ -141,21 +130,21 @@ def set_ciphers(self, cipher_suite): def wrap_socket(self, socket, server_hostname=None, server_side=False): warnings.warn( - 'A true SSLContext object is not available. This prevents ' - 'urllib3 from configuring SSL appropriately and may cause ' - 'certain SSL connections to fail. You can upgrade to a newer ' - 'version of Python to solve this. For more information, see ' - 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' - '#ssl-warnings', - InsecurePlatformWarning + "A true SSLContext object is not available. This prevents " + "urllib3 from configuring SSL appropriately and may cause " + "certain SSL connections to fail. You can upgrade to a newer " + "version of Python to solve this. For more information, see " + "https://urllib3.readthedocs.io/en/latest/advanced-usage.html" + "#ssl-warnings", + InsecurePlatformWarning, ) kwargs = { - 'keyfile': self.keyfile, - 'certfile': self.certfile, - 'ca_certs': self.ca_certs, - 'cert_reqs': self.verify_mode, - 'ssl_version': self.protocol, - 'server_side': server_side, + "keyfile": self.keyfile, + "certfile": self.certfile, + "ca_certs": self.ca_certs, + "cert_reqs": self.verify_mode, + "ssl_version": self.protocol, + "server_side": server_side, } return wrap_socket(socket, ciphers=self.ciphers, **kwargs) @@ -170,12 +159,11 @@ def assert_fingerprint(cert, fingerprint): Fingerprint as string of hexdigits, can be interspersed by colons. """ - fingerprint = fingerprint.replace(':', '').lower() + fingerprint = fingerprint.replace(":", "").lower() digest_length = len(fingerprint) hashfunc = HASHFUNC_MAP.get(digest_length) if not hashfunc: - raise SSLError( - 'Fingerprint of invalid length: {0}'.format(fingerprint)) + raise SSLError("Fingerprint of invalid length: {0}".format(fingerprint)) # We need encode() here for py32; works on py2 and p33. fingerprint_bytes = unhexlify(fingerprint.encode()) @@ -183,8 +171,11 @@ def assert_fingerprint(cert, fingerprint): cert_digest = hashfunc(cert).digest() if not _const_compare_digest(cert_digest, fingerprint_bytes): - raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".' - .format(fingerprint, hexlify(cert_digest))) + raise SSLError( + 'Fingerprints did not match. Expected "{0}", got "{1}".'.format( + fingerprint, hexlify(cert_digest) + ) + ) def resolve_cert_reqs(candidate): @@ -199,12 +190,12 @@ def resolve_cert_reqs(candidate): constant which can directly be passed to wrap_socket. """ if candidate is None: - return CERT_NONE + return CERT_REQUIRED if isinstance(candidate, str): res = getattr(ssl, candidate, None) if res is None: - res = getattr(ssl, 'CERT_' + candidate) + res = getattr(ssl, "CERT_" + candidate) return res return candidate @@ -215,19 +206,20 @@ def resolve_ssl_version(candidate): like resolve_cert_reqs """ if candidate is None: - return PROTOCOL_SSLv23 + return PROTOCOL_TLS if isinstance(candidate, str): res = getattr(ssl, candidate, None) if res is None: - res = getattr(ssl, 'PROTOCOL_' + candidate) + res = getattr(ssl, "PROTOCOL_" + candidate) return res return candidate -def create_urllib3_context(ssl_version=None, cert_reqs=None, - options=None, ciphers=None): +def create_urllib3_context( + ssl_version=None, cert_reqs=None, options=None, ciphers=None +): """All arguments have the same meaning as ``ssl_wrap_socket``. By default, this function does a lot of the same work that @@ -261,7 +253,7 @@ def create_urllib3_context(ssl_version=None, cert_reqs=None, Constructed SSLContext object with specified options :rtype: SSLContext """ - context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23) + context = SSLContext(ssl_version or PROTOCOL_TLS) context.set_ciphers(ciphers or DEFAULT_CIPHERS) @@ -280,18 +272,40 @@ def create_urllib3_context(ssl_version=None, cert_reqs=None, context.options |= options + # Enable post-handshake authentication for TLS 1.3, see GH #1634. PHA is + # necessary for conditional client cert authentication with TLS 1.3. + # The attribute is None for OpenSSL <= 1.1.0 or does not exist in older + # versions of Python. We only enable on Python 3.7.4+ or if certificate + # verification is enabled to work around Python issue #37428 + # See: https://bugs.python.org/issue37428 + if (cert_reqs == ssl.CERT_REQUIRED or sys.version_info >= (3, 7, 4)) and getattr( + context, "post_handshake_auth", None + ) is not None: + context.post_handshake_auth = True + context.verify_mode = cert_reqs - if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2 + if ( + getattr(context, "check_hostname", None) is not None + ): # Platform-specific: Python 3.2 # We do our own verification, including fingerprints and alternative # hostnames. So disable it here context.check_hostname = False return context -def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, - ca_certs=None, server_hostname=None, - ssl_version=None, ciphers=None, ssl_context=None, - ca_cert_dir=None): +def ssl_wrap_socket( + sock, + keyfile=None, + certfile=None, + cert_reqs=None, + ca_certs=None, + server_hostname=None, + ssl_version=None, + ciphers=None, + ssl_context=None, + ca_cert_dir=None, + key_password=None, +): """ All arguments except for server_hostname, ssl_context, and ca_cert_dir have the same meaning as they do when using :func:`ssl.wrap_socket`. @@ -307,14 +321,15 @@ def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, A directory containing CA certificates in multiple separate files, as supported by OpenSSL's -CApath flag or the capath argument to SSLContext.load_verify_locations(). + :param key_password: + Optional password if the keyfile is encrypted. """ context = ssl_context if context is None: # Note: This branch of code and all the variables in it are no longer # used by urllib3 itself. We should consider deprecating and removing # this code. - context = create_urllib3_context(ssl_version, cert_reqs, - ciphers=ciphers) + context = create_urllib3_context(ssl_version, cert_reqs, ciphers=ciphers) if ca_certs or ca_cert_dir: try: @@ -327,55 +342,66 @@ def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, if e.errno == errno.ENOENT: raise SSLError(e) raise - elif getattr(context, 'load_default_certs', None) is not None: + + elif ssl_context is None and hasattr(context, "load_default_certs"): # try to load OS default certs; works well on Windows (require Python3.4+) context.load_default_certs() + # Attempt to detect if we get the goofy behavior of the + # keyfile being encrypted and OpenSSL asking for the + # passphrase via the terminal and instead error out. + if keyfile and key_password is None and _is_key_file_encrypted(keyfile): + raise SSLError("Client private key is encrypted, password is required") + if certfile: - context.load_cert_chain(certfile, keyfile) + if key_password is None: + context.load_cert_chain(certfile, keyfile) + else: + context.load_cert_chain(certfile, keyfile, key_password) # If we detect server_hostname is an IP address then the SNI # extension should not be used according to RFC3546 Section 3.1 # We shouldn't warn the user if SNI isn't available but we would # not be using SNI anyways due to IP address for server_hostname. - if ((server_hostname is not None and not is_ipaddress(server_hostname)) - or IS_SECURETRANSPORT): + if ( + server_hostname is not None and not is_ipaddress(server_hostname) + ) or IS_SECURETRANSPORT: if HAS_SNI and server_hostname is not None: return context.wrap_socket(sock, server_hostname=server_hostname) warnings.warn( - 'An HTTPS request has been made, but the SNI (Server Name ' - 'Indication) extension to TLS is not available on this platform. ' - 'This may cause the server to present an incorrect TLS ' - 'certificate, which can cause validation failures. You can upgrade to ' - 'a newer version of Python to solve this. For more information, see ' - 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' - '#ssl-warnings', - SNIMissingWarning + "An HTTPS request has been made, but the SNI (Server Name " + "Indication) extension to TLS is not available on this platform. " + "This may cause the server to present an incorrect TLS " + "certificate, which can cause validation failures. You can upgrade to " + "a newer version of Python to solve this. For more information, see " + "https://urllib3.readthedocs.io/en/latest/advanced-usage.html" + "#ssl-warnings", + SNIMissingWarning, ) return context.wrap_socket(sock) def is_ipaddress(hostname): - """Detects whether the hostname given is an IP address. + """Detects whether the hostname given is an IPv4 or IPv6 address. + Also detects IPv6 addresses with Zone IDs. :param str hostname: Hostname to examine. :return: True if the hostname is an IP address, False otherwise. """ - if six.PY3 and isinstance(hostname, bytes): + if not six.PY2 and isinstance(hostname, bytes): # IDN A-label bytes are ASCII compatible. - hostname = hostname.decode('ascii') + hostname = hostname.decode("ascii") + return bool(IPV4_RE.match(hostname) or BRACELESS_IPV6_ADDRZ_RE.match(hostname)) - families = [socket.AF_INET] - if hasattr(socket, 'AF_INET6'): - families.append(socket.AF_INET6) - for af in families: - try: - inet_pton(af, hostname) - except (socket.error, ValueError, OSError): - pass - else: - return True +def _is_key_file_encrypted(key_file): + """Detects if a key file is encrypted or not.""" + with open(key_file, "r") as f: + for line in f: + # Look for Proc-Type: 4,ENCRYPTED + if "ENCRYPTED" in line: + return True + return False diff --git a/pipenv/patched/notpip/_vendor/urllib3/util/timeout.py b/pipenv/patched/notpip/_vendor/urllib3/util/timeout.py index cec817e6ef..c1dc1e9712 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/util/timeout.py +++ b/pipenv/patched/notpip/_vendor/urllib3/util/timeout.py @@ -1,4 +1,5 @@ from __future__ import absolute_import + # The default socket timeout, used by httplib to indicate that no timeout was # specified by the user from socket import _GLOBAL_DEFAULT_TIMEOUT @@ -45,19 +46,20 @@ class Timeout(object): :type total: integer, float, or None :param connect: - The maximum amount of time to wait for a connection attempt to a server - to succeed. Omitting the parameter will default the connect timeout to - the system default, probably `the global default timeout in socket.py + The maximum amount of time (in seconds) to wait for a connection + attempt to a server to succeed. Omitting the parameter will default the + connect timeout to the system default, probably `the global default + timeout in socket.py <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_. None will set an infinite timeout for connection attempts. :type connect: integer, float, or None :param read: - The maximum amount of time to wait between consecutive - read operations for a response from the server. Omitting - the parameter will default the read timeout to the system - default, probably `the global default timeout in socket.py + The maximum amount of time (in seconds) to wait between consecutive + read operations for a response from the server. Omitting the parameter + will default the read timeout to the system default, probably `the + global default timeout in socket.py <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_. None will set an infinite timeout. @@ -91,14 +93,18 @@ class Timeout(object): DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT def __init__(self, total=None, connect=_Default, read=_Default): - self._connect = self._validate_timeout(connect, 'connect') - self._read = self._validate_timeout(read, 'read') - self.total = self._validate_timeout(total, 'total') + self._connect = self._validate_timeout(connect, "connect") + self._read = self._validate_timeout(read, "read") + self.total = self._validate_timeout(total, "total") self._start_connect = None def __str__(self): - return '%s(connect=%r, read=%r, total=%r)' % ( - type(self).__name__, self._connect, self._read, self.total) + return "%s(connect=%r, read=%r, total=%r)" % ( + type(self).__name__, + self._connect, + self._read, + self.total, + ) @classmethod def _validate_timeout(cls, value, name): @@ -118,22 +124,31 @@ def _validate_timeout(cls, value, name): return value if isinstance(value, bool): - raise ValueError("Timeout cannot be a boolean value. It must " - "be an int, float or None.") + raise ValueError( + "Timeout cannot be a boolean value. It must " + "be an int, float or None." + ) try: float(value) except (TypeError, ValueError): - raise ValueError("Timeout value %s was %s, but it must be an " - "int, float or None." % (name, value)) + raise ValueError( + "Timeout value %s was %s, but it must be an " + "int, float or None." % (name, value) + ) try: if value <= 0: - raise ValueError("Attempted to set %s timeout to %s, but the " - "timeout cannot be set to a value less " - "than or equal to 0." % (name, value)) - except TypeError: # Python 3 - raise ValueError("Timeout value %s was %s, but it must be an " - "int, float or None." % (name, value)) + raise ValueError( + "Attempted to set %s timeout to %s, but the " + "timeout cannot be set to a value less " + "than or equal to 0." % (name, value) + ) + except TypeError: + # Python 3 + raise ValueError( + "Timeout value %s was %s, but it must be an " + "int, float or None." % (name, value) + ) return value @@ -165,8 +180,7 @@ def clone(self): # We can't use copy.deepcopy because that will also create a new object # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to # detect the user default. - return Timeout(connect=self._connect, read=self._read, - total=self.total) + return Timeout(connect=self._connect, read=self._read, total=self.total) def start_connect(self): """ Start the timeout clock, used during a connect() attempt @@ -182,14 +196,15 @@ def start_connect(self): def get_connect_duration(self): """ Gets the time elapsed since the call to :meth:`start_connect`. - :return: Elapsed time. + :return: Elapsed time in seconds. :rtype: float :raises urllib3.exceptions.TimeoutStateError: if you attempt to get duration for a timer that hasn't been started. """ if self._start_connect is None: - raise TimeoutStateError("Can't get connect duration for timer " - "that has not started.") + raise TimeoutStateError( + "Can't get connect duration for timer " "that has not started." + ) return current_time() - self._start_connect @property @@ -227,15 +242,16 @@ def read_timeout(self): :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect` has not yet been called on this object. """ - if (self.total is not None and - self.total is not self.DEFAULT_TIMEOUT and - self._read is not None and - self._read is not self.DEFAULT_TIMEOUT): + if ( + self.total is not None + and self.total is not self.DEFAULT_TIMEOUT + and self._read is not None + and self._read is not self.DEFAULT_TIMEOUT + ): # In case the connect timeout has not yet been established. if self._start_connect is None: return self._read - return max(0, min(self.total - self.get_connect_duration(), - self._read)) + return max(0, min(self.total - self.get_connect_duration(), self._read)) elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT: return max(0, self.total - self.get_connect_duration()) else: diff --git a/pipenv/patched/notpip/_vendor/urllib3/util/url.py b/pipenv/patched/notpip/_vendor/urllib3/util/url.py index 6b6f9968d7..007157aeb8 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/util/url.py +++ b/pipenv/patched/notpip/_vendor/urllib3/util/url.py @@ -1,34 +1,110 @@ from __future__ import absolute_import +import re from collections import namedtuple from ..exceptions import LocationParseError +from ..packages import six -url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'] +url_attrs = ["scheme", "auth", "host", "port", "path", "query", "fragment"] # We only want to normalize urls with an HTTP(S) scheme. # urllib3 infers URLs without a scheme (None) to be http. -NORMALIZABLE_SCHEMES = ('http', 'https', None) - - -class Url(namedtuple('Url', url_attrs)): +NORMALIZABLE_SCHEMES = ("http", "https", None) + +# Almost all of these patterns were derived from the +# 'rfc3986' module: https://github.com/python-hyper/rfc3986 +PERCENT_RE = re.compile(r"%[a-fA-F0-9]{2}") +SCHEME_RE = re.compile(r"^(?:[a-zA-Z][a-zA-Z0-9+-]*:|/)") +URI_RE = re.compile( + r"^(?:([a-zA-Z][a-zA-Z0-9+.-]*):)?" + r"(?://([^/?#]*))?" + r"([^?#]*)" + r"(?:\?([^#]*))?" + r"(?:#(.*))?$", + re.UNICODE | re.DOTALL, +) + +IPV4_PAT = r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}" +HEX_PAT = "[0-9A-Fa-f]{1,4}" +LS32_PAT = "(?:{hex}:{hex}|{ipv4})".format(hex=HEX_PAT, ipv4=IPV4_PAT) +_subs = {"hex": HEX_PAT, "ls32": LS32_PAT} +_variations = [ + # 6( h16 ":" ) ls32 + "(?:%(hex)s:){6}%(ls32)s", + # "::" 5( h16 ":" ) ls32 + "::(?:%(hex)s:){5}%(ls32)s", + # [ h16 ] "::" 4( h16 ":" ) ls32 + "(?:%(hex)s)?::(?:%(hex)s:){4}%(ls32)s", + # [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32 + "(?:(?:%(hex)s:)?%(hex)s)?::(?:%(hex)s:){3}%(ls32)s", + # [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32 + "(?:(?:%(hex)s:){0,2}%(hex)s)?::(?:%(hex)s:){2}%(ls32)s", + # [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32 + "(?:(?:%(hex)s:){0,3}%(hex)s)?::%(hex)s:%(ls32)s", + # [ *4( h16 ":" ) h16 ] "::" ls32 + "(?:(?:%(hex)s:){0,4}%(hex)s)?::%(ls32)s", + # [ *5( h16 ":" ) h16 ] "::" h16 + "(?:(?:%(hex)s:){0,5}%(hex)s)?::%(hex)s", + # [ *6( h16 ":" ) h16 ] "::" + "(?:(?:%(hex)s:){0,6}%(hex)s)?::", +] + +UNRESERVED_PAT = r"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._!\-~" +IPV6_PAT = "(?:" + "|".join([x % _subs for x in _variations]) + ")" +ZONE_ID_PAT = "(?:%25|%)(?:[" + UNRESERVED_PAT + "]|%[a-fA-F0-9]{2})+" +IPV6_ADDRZ_PAT = r"\[" + IPV6_PAT + r"(?:" + ZONE_ID_PAT + r")?\]" +REG_NAME_PAT = r"(?:[^\[\]%:/?#]|%[a-fA-F0-9]{2})*" +TARGET_RE = re.compile(r"^(/[^?]*)(?:\?([^#]+))?(?:#(.*))?$") + +IPV4_RE = re.compile("^" + IPV4_PAT + "$") +IPV6_RE = re.compile("^" + IPV6_PAT + "$") +IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT + "$") +BRACELESS_IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT[2:-2] + "$") +ZONE_ID_RE = re.compile("(" + ZONE_ID_PAT + r")\]$") + +SUBAUTHORITY_PAT = (u"^(?:(.*)@)?(%s|%s|%s)(?::([0-9]{0,5}))?$") % ( + REG_NAME_PAT, + IPV4_PAT, + IPV6_ADDRZ_PAT, +) +SUBAUTHORITY_RE = re.compile(SUBAUTHORITY_PAT, re.UNICODE | re.DOTALL) + +UNRESERVED_CHARS = set( + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._-~" +) +SUB_DELIM_CHARS = set("!$&'()*+,;=") +USERINFO_CHARS = UNRESERVED_CHARS | SUB_DELIM_CHARS | {":"} +PATH_CHARS = USERINFO_CHARS | {"@", "/"} +QUERY_CHARS = FRAGMENT_CHARS = PATH_CHARS | {"?"} + + +class Url(namedtuple("Url", url_attrs)): """ - Datastructure for representing an HTTP URL. Used as a return value for + Data structure for representing an HTTP URL. Used as a return value for :func:`parse_url`. Both the scheme and host are normalized as they are both case-insensitive according to RFC 3986. """ + __slots__ = () - def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None, - query=None, fragment=None): - if path and not path.startswith('/'): - path = '/' + path - if scheme: + def __new__( + cls, + scheme=None, + auth=None, + host=None, + port=None, + path=None, + query=None, + fragment=None, + ): + if path and not path.startswith("/"): + path = "/" + path + if scheme is not None: scheme = scheme.lower() - if host and scheme in NORMALIZABLE_SCHEMES: - host = host.lower() - return super(Url, cls).__new__(cls, scheme, auth, host, port, path, - query, fragment) + return super(Url, cls).__new__( + cls, scheme, auth, host, port, path, query, fragment + ) @property def hostname(self): @@ -38,10 +114,10 @@ def hostname(self): @property def request_uri(self): """Absolute path including the query string.""" - uri = self.path or '/' + uri = self.path or "/" if self.query is not None: - uri += '?' + self.query + uri += "?" + self.query return uri @@ -49,7 +125,7 @@ def request_uri(self): def netloc(self): """Network location including host and port""" if self.port: - return '%s:%d' % (self.host, self.port) + return "%s:%d" % (self.host, self.port) return self.host @property @@ -72,23 +148,23 @@ def url(self): 'http://username:password@host.com:80/path?query#fragment' """ scheme, auth, host, port, path, query, fragment = self - url = '' + url = u"" # We use "is not None" we want things to happen with empty strings (or 0 port) if scheme is not None: - url += scheme + '://' + url += scheme + u"://" if auth is not None: - url += auth + '@' + url += auth + u"@" if host is not None: url += host if port is not None: - url += ':' + str(port) + url += u":" + str(port) if path is not None: url += path if query is not None: - url += '?' + query + url += u"?" + query if fragment is not None: - url += '#' + fragment + url += u"#" + fragment return url @@ -98,6 +174,8 @@ def __str__(self): def split_first(s, delims): """ + .. deprecated:: 1.25 + Given a string and an iterable of delimiters, split on the first found delimiter. Return two split parts and the matched delimiter. @@ -124,15 +202,150 @@ def split_first(s, delims): min_delim = d if min_idx is None or min_idx < 0: - return s, '', None + return s, "", None + + return s[:min_idx], s[min_idx + 1 :], min_delim + + +def _encode_invalid_chars(component, allowed_chars, encoding="utf-8"): + """Percent-encodes a URI component without reapplying + onto an already percent-encoded component. + """ + if component is None: + return component + + component = six.ensure_text(component) + + # Try to see if the component we're encoding is already percent-encoded + # so we can skip all '%' characters but still encode all others. + percent_encodings = PERCENT_RE.findall(component) + + # Normalize existing percent-encoded bytes. + for enc in percent_encodings: + if not enc.isupper(): + component = component.replace(enc, enc.upper()) + + uri_bytes = component.encode("utf-8", "surrogatepass") + is_percent_encoded = len(percent_encodings) == uri_bytes.count(b"%") + + encoded_component = bytearray() + + for i in range(0, len(uri_bytes)): + # Will return a single character bytestring on both Python 2 & 3 + byte = uri_bytes[i : i + 1] + byte_ord = ord(byte) + if (is_percent_encoded and byte == b"%") or ( + byte_ord < 128 and byte.decode() in allowed_chars + ): + encoded_component.extend(byte) + continue + encoded_component.extend(b"%" + (hex(byte_ord)[2:].encode().zfill(2).upper())) + + return encoded_component.decode(encoding) + - return s[:min_idx], s[min_idx + 1:], min_delim +def _remove_path_dot_segments(path): + # See http://tools.ietf.org/html/rfc3986#section-5.2.4 for pseudo-code + segments = path.split("/") # Turn the path into a list of segments + output = [] # Initialize the variable to use to store output + + for segment in segments: + # '.' is the current directory, so ignore it, it is superfluous + if segment == ".": + continue + # Anything other than '..', should be appended to the output + elif segment != "..": + output.append(segment) + # In this case segment == '..', if we can, we should pop the last + # element + elif output: + output.pop() + + # If the path starts with '/' and the output is empty or the first string + # is non-empty + if path.startswith("/") and (not output or output[0]): + output.insert(0, "") + + # If the path starts with '/.' or '/..' ensure we add one more empty + # string to add a trailing '/' + if path.endswith(("/.", "/..")): + output.append("") + + return "/".join(output) + + +def _normalize_host(host, scheme): + if host: + if isinstance(host, six.binary_type): + host = six.ensure_str(host) + + if scheme in NORMALIZABLE_SCHEMES: + is_ipv6 = IPV6_ADDRZ_RE.match(host) + if is_ipv6: + match = ZONE_ID_RE.search(host) + if match: + start, end = match.span(1) + zone_id = host[start:end] + + if zone_id.startswith("%25") and zone_id != "%25": + zone_id = zone_id[3:] + else: + zone_id = zone_id[1:] + zone_id = "%" + _encode_invalid_chars(zone_id, UNRESERVED_CHARS) + return host[:start].lower() + zone_id + host[end:] + else: + return host.lower() + elif not IPV4_RE.match(host): + return six.ensure_str( + b".".join([_idna_encode(label) for label in host.split(".")]) + ) + return host + + +def _idna_encode(name): + if name and any([ord(x) > 128 for x in name]): + try: + from pipenv.patched.notpip._vendor import idna + except ImportError: + six.raise_from( + LocationParseError("Unable to parse URL without the 'idna' module"), + None, + ) + try: + return idna.encode(name.lower(), strict=True, std3_rules=True) + except idna.IDNAError: + six.raise_from( + LocationParseError(u"Name '%s' is not a valid IDNA label" % name), None + ) + return name.lower().encode("ascii") + + +def _encode_target(target): + """Percent-encodes a request target so that there are no invalid characters""" + if not target.startswith("/"): + return target + + path, query, fragment = TARGET_RE.match(target).groups() + target = _encode_invalid_chars(path, PATH_CHARS) + query = _encode_invalid_chars(query, QUERY_CHARS) + fragment = _encode_invalid_chars(fragment, FRAGMENT_CHARS) + if query is not None: + target += "?" + query + if fragment is not None: + target += "#" + target + return target def parse_url(url): """ Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is performed to parse incomplete urls. Fields not provided will be None. + This parser is RFC 3986 compliant. + + The parser logic and helper functions are based heavily on + work done in the ``rfc3986`` module. + + :param str url: URL to parse into a :class:`.Url` namedtuple. Partly backwards-compatible with :mod:`urlparse`. @@ -145,81 +358,77 @@ def parse_url(url): >>> parse_url('/foo?bar') Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...) """ - - # While this code has overlap with stdlib's urlparse, it is much - # simplified for our needs and less annoying. - # Additionally, this implementations does silly things to be optimal - # on CPython. - if not url: # Empty return Url() - scheme = None - auth = None - host = None - port = None - path = None - fragment = None - query = None - - # Scheme - if '://' in url: - scheme, url = url.split('://', 1) - - # Find the earliest Authority Terminator - # (http://tools.ietf.org/html/rfc3986#section-3.2) - url, path_, delim = split_first(url, ['/', '?', '#']) - - if delim: - # Reassemble the path - path = delim + path_ - - # Auth - if '@' in url: - # Last '@' denotes end of auth part - auth, url = url.rsplit('@', 1) - - # IPv6 - if url and url[0] == '[': - host, url = url.split(']', 1) - host += ']' - - # Port - if ':' in url: - _host, port = url.split(':', 1) - - if not host: - host = _host - - if port: - # If given, ports must be integers. No whitespace, no plus or - # minus prefixes, no non-integer digits such as ^2 (superscript). - if not port.isdigit(): - raise LocationParseError(url) - try: - port = int(port) - except ValueError: - raise LocationParseError(url) + source_url = url + if not SCHEME_RE.search(url): + url = "//" + url + + try: + scheme, authority, path, query, fragment = URI_RE.match(url).groups() + normalize_uri = scheme is None or scheme.lower() in NORMALIZABLE_SCHEMES + + if scheme: + scheme = scheme.lower() + + if authority: + auth, host, port = SUBAUTHORITY_RE.match(authority).groups() + if auth and normalize_uri: + auth = _encode_invalid_chars(auth, USERINFO_CHARS) + if port == "": + port = None else: - # Blank ports are cool, too. (rfc3986#section-3.2.3) - port = None + auth, host, port = None, None, None - elif not host and url: - host = url + if port is not None: + port = int(port) + if not (0 <= port <= 65535): + raise LocationParseError(url) + host = _normalize_host(host, scheme) + + if normalize_uri and path: + path = _remove_path_dot_segments(path) + path = _encode_invalid_chars(path, PATH_CHARS) + if normalize_uri and query: + query = _encode_invalid_chars(query, QUERY_CHARS) + if normalize_uri and fragment: + fragment = _encode_invalid_chars(fragment, FRAGMENT_CHARS) + + except (ValueError, AttributeError): + return six.raise_from(LocationParseError(source_url), None) + + # For the sake of backwards compatibility we put empty + # string values for path if there are any defined values + # beyond the path in the URL. + # TODO: Remove this when we break backwards compatibility. if not path: - return Url(scheme, auth, host, port, path, query, fragment) + if query is not None or fragment is not None: + path = "" + else: + path = None - # Fragment - if '#' in path: - path, fragment = path.split('#', 1) + # Ensure that each part of the URL is a `str` for + # backwards compatibility. + if isinstance(url, six.text_type): + ensure_func = six.ensure_text + else: + ensure_func = six.ensure_str - # Query - if '?' in path: - path, query = path.split('?', 1) + def ensure_type(x): + return x if x is None else ensure_func(x) - return Url(scheme, auth, host, port, path, query, fragment) + return Url( + scheme=ensure_type(scheme), + auth=ensure_type(auth), + host=ensure_type(host), + port=port, + path=ensure_type(path), + query=ensure_type(query), + fragment=ensure_type(fragment), + ) def get_host(url): @@ -227,4 +436,4 @@ def get_host(url): Deprecated. Use :func:`parse_url` instead. """ p = parse_url(url) - return p.scheme or 'http', p.hostname, p.port + return p.scheme or "http", p.hostname, p.port diff --git a/pipenv/patched/notpip/_vendor/urllib3/util/wait.py b/pipenv/patched/notpip/_vendor/urllib3/util/wait.py index 4db71bafd8..d71d2fd722 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/util/wait.py +++ b/pipenv/patched/notpip/_vendor/urllib3/util/wait.py @@ -2,6 +2,7 @@ from functools import partial import select import sys + try: from time import monotonic except ImportError: @@ -40,6 +41,8 @@ class NoWayToWaitForSocketError(Exception): # Modern Python, that retries syscalls by default def _retry_on_intr(fn, timeout): return fn(timeout) + + else: # Old and broken Pythons. def _retry_on_intr(fn, timeout): diff --git a/pipenv/patched/notpip/_vendor/vendor.txt b/pipenv/patched/notpip/_vendor/vendor.txt index 7b5482550b..aadd35261a 100644 --- a/pipenv/patched/notpip/_vendor/vendor.txt +++ b/pipenv/patched/notpip/_vendor/vendor.txt @@ -1,23 +1,23 @@ appdirs==1.4.3 CacheControl==0.12.5 colorama==0.4.1 -distlib==0.2.8 +contextlib2==0.6.0 +distlib==0.2.9.post0 distro==1.4.0 html5lib==1.0.1 ipaddress==1.0.22 # Only needed on 2.6 and 2.7 -lockfile==0.12.2 -msgpack==0.5.6 -packaging==19.0 -pep517==0.5.0 +msgpack==0.6.2 +packaging==19.2 +pep517==0.7.0 progress==1.5 -pyparsing==2.4.0 -pytoml==0.1.20 -requests==2.21.0 - certifi==2019.3.9 +pyparsing==2.4.2 +pytoml==0.1.21 +requests==2.22.0 + certifi==2019.9.11 chardet==3.0.4 idna==2.8 - urllib3==1.25.2 + urllib3==1.25.6 retrying==1.3.3 -setuptools==41.0.1 +setuptools==41.4.0 six==1.12.0 webencodings==0.5.1 diff --git a/pipenv/patched/notpip/contextlib2.LICENSE.txt b/pipenv/patched/notpip/contextlib2.LICENSE.txt new file mode 100644 index 0000000000..5de20277df --- /dev/null +++ b/pipenv/patched/notpip/contextlib2.LICENSE.txt @@ -0,0 +1,122 @@ + + +A. HISTORY OF THE SOFTWARE +========================== + +contextlib2 is a derivative of the contextlib module distributed by the PSF +as part of the Python standard library. According, it is itself redistributed +under the PSF license (reproduced in full below). As the contextlib module +was added only in Python 2.5, the licenses for earlier Python versions are +not applicable and have not been included. + +Python was created in the early 1990s by Guido van Rossum at Stichting +Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands +as a successor of a language called ABC. Guido remains Python's +principal author, although it includes many contributions from others. + +In 1995, Guido continued his work on Python at the Corporation for +National Research Initiatives (CNRI, see http://www.cnri.reston.va.us) +in Reston, Virginia where he released several versions of the +software. + +In May 2000, Guido and the Python core development team moved to +BeOpen.com to form the BeOpen PythonLabs team. In October of the same +year, the PythonLabs team moved to Digital Creations (now Zope +Corporation, see http://www.zope.com). In 2001, the Python Software +Foundation (PSF, see http://www.python.org/psf/) was formed, a +non-profit organization created specifically to own Python-related +Intellectual Property. Zope Corporation is a sponsoring member of +the PSF. + +All Python releases are Open Source (see http://www.opensource.org for +the Open Source Definition). Historically, most, but not all, Python +releases have also been GPL-compatible; the table below summarizes +the various releases that included the contextlib module. + + Release Derived Year Owner GPL- + from compatible? (1) + + 2.5 2.4 2006 PSF yes + 2.5.1 2.5 2007 PSF yes + 2.5.2 2.5.1 2008 PSF yes + 2.5.3 2.5.2 2008 PSF yes + 2.6 2.5 2008 PSF yes + 2.6.1 2.6 2008 PSF yes + 2.6.2 2.6.1 2009 PSF yes + 2.6.3 2.6.2 2009 PSF yes + 2.6.4 2.6.3 2009 PSF yes + 2.6.5 2.6.4 2010 PSF yes + 3.0 2.6 2008 PSF yes + 3.0.1 3.0 2009 PSF yes + 3.1 3.0.1 2009 PSF yes + 3.1.1 3.1 2009 PSF yes + 3.1.2 3.1.1 2010 PSF yes + 3.1.3 3.1.2 2010 PSF yes + 3.1.4 3.1.3 2011 PSF yes + 3.2 3.1 2011 PSF yes + 3.2.1 3.2 2011 PSF yes + 3.2.2 3.2.1 2011 PSF yes + 3.3 3.2 2012 PSF yes + +Footnotes: + +(1) GPL-compatible doesn't mean that we're distributing Python under + the GPL. All Python licenses, unlike the GPL, let you distribute + a modified version without making your changes open source. The + GPL-compatible licenses make it possible to combine Python with + other software that is released under the GPL; the others don't. + +Thanks to the many outside volunteers who have worked under Guido's +direction to make these releases possible. + + +B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON +=============================================================== + +PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +-------------------------------------------- + +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby +grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +analyze, test, perform and/or display publicly, prepare derivative works, +distribute, and otherwise use Python alone or in any derivative version, +provided, however, that PSF's License Agreement and PSF's notice of copyright, +i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +2011 Python Software Foundation; All Rights Reserved" are retained in Python +alone or in any derivative version prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. diff --git a/pipenv/patched/notpip/idna.LICENSE.rst b/pipenv/patched/notpip/idna.LICENSE.rst new file mode 100644 index 0000000000..3ee64fba29 --- /dev/null +++ b/pipenv/patched/notpip/idna.LICENSE.rst @@ -0,0 +1,80 @@ +License +------- + +Copyright (c) 2013-2018, Kim Davies. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +#. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +#. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided with + the distribution. + +#. Neither the name of the copyright holder nor the names of the + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +#. THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS "AS IS" AND ANY + EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR + CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH + DAMAGE. + +Portions of the codec implementation and unit tests are derived from the +Python standard library, which carries the `Python Software Foundation +License <https://docs.python.org/2/license.html>`_: + + Copyright (c) 2001-2014 Python Software Foundation; All Rights Reserved + +Portions of the unit tests are derived from the Unicode standard, which +is subject to the Unicode, Inc. License Agreement: + + Copyright (c) 1991-2014 Unicode, Inc. All rights reserved. + Distributed under the Terms of Use in + <http://www.unicode.org/copyright.html>. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of the Unicode data files and any associated documentation + (the "Data Files") or Unicode software and any associated documentation + (the "Software") to deal in the Data Files or Software + without restriction, including without limitation the rights to use, + copy, modify, merge, publish, distribute, and/or sell copies of + the Data Files or Software, and to permit persons to whom the Data Files + or Software are furnished to do so, provided that + + (a) this copyright and permission notice appear with all copies + of the Data Files or Software, + + (b) this copyright and permission notice appear in associated + documentation, and + + (c) there is clear notice in each modified Data File or in the Software + as well as in the documentation associated with the Data File(s) or + Software that the data or software has been modified. + + THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF + ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE + WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS + NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL + DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + PERFORMANCE OF THE DATA FILES OR SOFTWARE. + + Except as contained in this notice, the name of a copyright holder + shall not be used in advertising or otherwise to promote the sale, + use or other dealings in these Data Files or Software without prior + written authorization of the copyright holder. diff --git a/pipenv/patched/notpip/COPYING b/pipenv/patched/notpip/msgpack.COPYING similarity index 100% rename from pipenv/patched/notpip/COPYING rename to pipenv/patched/notpip/msgpack.COPYING diff --git a/pipenv/patched/notpip/LICENSE.BSD b/pipenv/patched/notpip/packaging.LICENSE.BSD similarity index 100% rename from pipenv/patched/notpip/LICENSE.BSD rename to pipenv/patched/notpip/packaging.LICENSE.BSD diff --git a/pipenv/patched/notpip/urllib3.LICENSE b/pipenv/patched/notpip/urllib3.LICENSE deleted file mode 100644 index 1c3283ee5b..0000000000 --- a/pipenv/patched/notpip/urllib3.LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -This is the MIT license: http://www.opensource.org/licenses/mit-license.php - -Copyright 2008-2016 Andrey Petrov and contributors (see CONTRIBUTORS.txt) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this -software and associated documentation files (the "Software"), to deal in the Software -without restriction, including without limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons -to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or -substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, -INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE -FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/pipenv/patched/patched.txt b/pipenv/patched/patched.txt index e34df9fd59..ce803faf65 100644 --- a/pipenv/patched/patched.txt +++ b/pipenv/patched/patched.txt @@ -1,5 +1,5 @@ safety crayons==0.1.2 pipfile==0.0.2 -pip-tools==3.5.0 -pip==19.0.3 +pip-tools==4.3.0 +pip==19.3.1 diff --git a/pipenv/patched/piptools/__init__.py b/pipenv/patched/piptools/__init__.py index e69de29bb2..9f0c95aa56 100644 --- a/pipenv/patched/piptools/__init__.py +++ b/pipenv/patched/piptools/__init__.py @@ -0,0 +1,11 @@ +import locale + +from piptools.click import secho + +# Needed for locale.getpreferredencoding(False) to work +# in pip._internal.utils.encoding.auto_decode +try: + locale.setlocale(locale.LC_ALL, "") +except locale.Error as e: # pragma: no cover + # setlocale can apparently crash if locale are uninitialized + secho("Ignoring error when setting locale: {}".format(e), fg="red") diff --git a/pipenv/patched/piptools/__main__.py b/pipenv/patched/piptools/__main__.py index b08b8494c3..2d8b75e85d 100644 --- a/pipenv/patched/piptools/__main__.py +++ b/pipenv/patched/piptools/__main__.py @@ -1,4 +1,5 @@ import click + from piptools.scripts import compile, sync @@ -7,10 +8,10 @@ def cli(): pass -cli.add_command(compile.cli, 'compile') -cli.add_command(sync.cli, 'sync') +cli.add_command(compile.cli, "compile") +cli.add_command(sync.cli, "sync") # Enable ``python -m piptools ...``. -if __name__ == '__main__': # pragma: no branch +if __name__ == "__main__": # pragma: no branch cli() diff --git a/pipenv/patched/piptools/_compat/__init__.py b/pipenv/patched/piptools/_compat/__init__.py index 19adcbc523..fd8ecddd7f 100644 --- a/pipenv/patched/piptools/_compat/__init__.py +++ b/pipenv/patched/piptools/_compat/__init__.py @@ -1,36 +1,44 @@ # coding: utf-8 # flake8: noqa -from __future__ import (absolute_import, division, print_function, - unicode_literals) +from __future__ import absolute_import, division, print_function, unicode_literals import six -if six.PY2: - from .tempfile import TemporaryDirectory - from .contextlib import ExitStack -else: - from tempfile import TemporaryDirectory - from contextlib import ExitStack - from .pip_compat import ( - InstallRequirement, - parse_requirements, - RequirementSet, - user_cache_dir, + DEV_PKGS, FAVORITE_HASH, - is_file_url, - url_to_path, - PackageFinder, + PIP_VERSION, FormatControl, + InstallationCandidate, + InstallCommand, + InstallationError, + InstallRequirement, + Link, + PackageFinder, + PyPI, + RequirementSet, + RequirementTracker, + Resolver, + SafeFileCache, + VcsSupport, Wheel, - Command, + WheelCache, cmdoptions, get_installed_distributions, - PyPI, - install_req_from_line, install_req_from_editable, + install_req_from_line, + is_dir_url, + is_file_url, + is_vcs_url, + parse_requirements, + path_to_url, + pip_version, stdlib_pkgs, - DEV_PKGS, - SafeFileCache, - InstallationError + url_to_path, + user_cache_dir, ) + +if six.PY2: + from .tempfile import TemporaryDirectory +else: + from tempfile import TemporaryDirectory diff --git a/pipenv/patched/piptools/_compat/contextlib.py b/pipenv/patched/piptools/_compat/contextlib.py index b0e161bb8c..04039ccb01 100644 --- a/pipenv/patched/piptools/_compat/contextlib.py +++ b/pipenv/patched/piptools/_compat/contextlib.py @@ -1,123 +1,18 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, - unicode_literals) - -import sys -from collections import deque - - -# Inspired by discussions on http://bugs.python.org/issue13585 -class ExitStack(object): - """Context manager for dynamic management of a stack of exit callbacks - - For example: - - with ExitStack() as stack: - files = [stack.enter_context(open(fname)) for fname in filenames] - # All opened files will automatically be closed at the end of - # the with statement, even if attempts to open files later - # in the list throw an exception - +# Ported from python 3.7 contextlib.py +class nullcontext(object): + """Context manager that does no additional processing. + Used as a stand-in for a normal context manager, when a particular + block of code is only sometimes used with a normal context manager: + cm = optional_cm if condition else nullcontext() + with cm: + # Perform operation, using optional_cm if condition is True """ - def __init__(self): - self._exit_callbacks = deque() - - def pop_all(self): - """Preserve the context stack by transferring it to a new instance""" - new_stack = type(self)() - new_stack._exit_callbacks = self._exit_callbacks - self._exit_callbacks = deque() - return new_stack - - def _push_cm_exit(self, cm, cm_exit): - """Helper to correctly register callbacks to __exit__ methods""" - def _exit_wrapper(*exc_details): - return cm_exit(cm, *exc_details) - _exit_wrapper.__self__ = cm - self.push(_exit_wrapper) - def push(self, exit): - """Registers a callback with the standard __exit__ method signature - - Can suppress exceptions the same way __exit__ methods can. - - Also accepts any object with an __exit__ method (registering the - method instead of the object itself) - """ - # We use an unbound method rather than a bound method to follow - # the standard lookup behaviour for special methods - _cb_type = type(exit) - try: - exit_method = _cb_type.__exit__ - except AttributeError: - # Not a context manager, so assume its a callable - self._exit_callbacks.append(exit) - else: - self._push_cm_exit(exit, exit_method) - return exit # Allow use as a decorator - - def callback(self, callback, *args, **kwds): - """Registers an arbitrary callback and arguments. - - Cannot suppress exceptions. - """ - def _exit_wrapper(exc_type, exc, tb): - callback(*args, **kwds) - # We changed the signature, so using @wraps is not appropriate, but - # setting __wrapped__ may still help with introspection - _exit_wrapper.__wrapped__ = callback - self.push(_exit_wrapper) - return callback # Allow use as a decorator - - def enter_context(self, cm): - """Enters the supplied context manager - - If successful, also pushes its __exit__ method as a callback and - returns the result of the __enter__ method. - """ - # We look up the special methods on the type to match the with - # statement - _cm_type = type(cm) - _exit = _cm_type.__exit__ - result = _cm_type.__enter__(cm) - self._push_cm_exit(cm, _exit) - return result - - def close(self): - """Immediately unwind the context stack""" - self.__exit__(None, None, None) + def __init__(self, enter_result=None): + self.enter_result = enter_result def __enter__(self): - return self - - def __exit__(self, *exc_details): - if not self._exit_callbacks: - return + return self.enter_result - # This looks complicated, but it is really just - # setting up a chain of try-expect statements to ensure - # that outer callbacks still get invoked even if an - # inner one throws an exception - def _invoke_next_callback(exc_details): - # Callbacks are removed from the list in FIFO order - # but the recursion means they're invoked in LIFO order - cb = self._exit_callbacks.popleft() - if not self._exit_callbacks: - # Innermost callback is invoked directly - return cb(*exc_details) - # More callbacks left, so descend another level in the stack - try: - suppress_exc = _invoke_next_callback(exc_details) - except: - suppress_exc = cb(*sys.exc_info()) - # Check if this cb suppressed the inner exception - if not suppress_exc: - raise - else: - # Check if inner cb suppressed the original exception - if suppress_exc: - exc_details = (None, None, None) - suppress_exc = cb(*exc_details) or suppress_exc - return suppress_exc - # Kick off the recursive chain - return _invoke_next_callback(exc_details) + def __exit__(self, *excinfo): + pass diff --git a/pipenv/patched/piptools/_compat/pip_compat.py b/pipenv/patched/piptools/_compat/pip_compat.py index 715144a389..765bd49ed4 100644 --- a/pipenv/patched/piptools/_compat/pip_compat.py +++ b/pipenv/patched/piptools/_compat/pip_compat.py @@ -1,51 +1,94 @@ # -*- coding=utf-8 -*- -__all__ = [ - "InstallRequirement", - "parse_requirements", - "RequirementSet", - "FAVORITE_HASH", - "is_file_url", - "path_to_url", - "url_to_path", - "PackageFinder", - "FormatControl", - "Wheel", - "Command", - "cmdoptions", - "get_installed_distributions", - "PyPI", - "stdlib_pkgs", - "DEV_PKGS", - "install_req_from_line", - "install_req_from_editable", - "user_cache_dir", - "SafeFileCache", - "InstallationError" -] - +from __future__ import absolute_import +import importlib import os +from appdirs import user_cache_dir os.environ["PIP_SHIMS_BASE_MODULE"] = str("pipenv.patched.notpip") +import pip_shims.shims +from pip_shims.models import ShimmedPathCollection, ImportTypes + +InstallationCandidate = ShimmedPathCollection("InstallationCandidate", ImportTypes.CLASS) +InstallationCandidate.create_path("models.candidate", "18.0", "9999") +InstallationCandidate.create_path("index", "7.0.3", "10.9.9") + +PIP_VERSION = tuple(map(int, pip_shims.shims.parsed_pip_version.parsed_version.base_version.split("."))) + +RequirementTracker = pip_shims.shims.RequirementTracker + +def do_import(module_path, subimport=None, old_path=None): + old_path = old_path or module_path + pip_path = os.environ.get("PIP_SHIMS_BASE_MODULE", "pip") + prefixes = ["{}._internal".format(pip_path), pip_path] + paths = [module_path, old_path] + search_order = [ + "{0}.{1}".format(p, pth) for p in prefixes for pth in paths if pth is not None + ] + package = subimport if subimport else None + for to_import in search_order: + if not subimport: + to_import, _, package = to_import.rpartition(".") + try: + imported = importlib.import_module(to_import) + except ImportError: + continue + else: + return getattr(imported, package) + + +InstallRequirement = pip_shims.shims.InstallRequirement +InstallationError = pip_shims.shims.InstallationError +parse_requirements = pip_shims.shims.parse_requirements +RequirementSet = pip_shims.shims.RequirementSet +SafeFileCache = pip_shims.shims.SafeFileCache +FAVORITE_HASH = pip_shims.shims.FAVORITE_HASH +path_to_url = pip_shims.shims.path_to_url +url_to_path = pip_shims.shims.url_to_path +PackageFinder = pip_shims.shims.PackageFinder +FormatControl = pip_shims.shims.FormatControl +InstallCommand = pip_shims.shims.InstallCommand +Wheel = pip_shims.shims.Wheel +cmdoptions = pip_shims.shims.cmdoptions +get_installed_distributions = pip_shims.shims.get_installed_distributions +PyPI = pip_shims.shims.PyPI +stdlib_pkgs = pip_shims.shims.stdlib_pkgs +DEV_PKGS = pip_shims.shims.DEV_PKGS +Link = pip_shims.shims.Link +Session = do_import("_vendor.requests.sessions", "Session") +Resolver = pip_shims.shims.Resolver +VcsSupport = pip_shims.shims.VcsSupport +WheelCache = pip_shims.shims.WheelCache +pip_version = pip_shims.shims.pip_version + +# pip 18.1 has refactored InstallRequirement constructors use by pip-tools. +if PIP_VERSION < (18, 1): + install_req_from_line = InstallRequirement.from_line + install_req_from_editable = InstallRequirement.from_editable +else: + install_req_from_line = do_import("req.constructors", "install_req_from_line") + install_req_from_editable = do_import( + "req.constructors", "install_req_from_editable" + ) + + +def is_vcs_url(link): + if PIP_VERSION < (19, 3): + _is_vcs_url = do_import("download", "is_vcs_url") + return _is_vcs_url(link) + + return link.is_vcs + + +def is_file_url(link): + if PIP_VERSION < (19, 3): + _is_file_url = do_import("download", "is_file_url") + return _is_file_url(link) + + return link.is_file + + +def is_dir_url(link): + if PIP_VERSION < (19, 3): + _is_dir_url = do_import("download", "is_dir_url") + return _is_dir_url(link) -from pip_shims.shims import ( - InstallRequirement, - parse_requirements, - RequirementSet, - FAVORITE_HASH, - is_file_url, - path_to_url, - url_to_path, - PackageFinder, - FormatControl, - Wheel, - Command, - cmdoptions, - get_installed_distributions, - PyPI, - stdlib_pkgs, - DEV_PKGS, - install_req_from_line, - install_req_from_editable, - USER_CACHE_DIR as user_cache_dir, - SafeFileCache, - InstallationError -) + return link.is_existing_dir() diff --git a/pipenv/patched/piptools/_compat/tempfile.py b/pipenv/patched/piptools/_compat/tempfile.py index a003d0805d..dc7e9ef997 100644 --- a/pipenv/patched/piptools/_compat/tempfile.py +++ b/pipenv/patched/piptools/_compat/tempfile.py @@ -40,8 +40,10 @@ def cleanup(self): # up due to missing globals if "None" not in str(ex): raise - print("ERROR: {!r} while cleaning up {!r}".format(ex, self,), - file=_sys.stderr) + print( + "ERROR: {!r} while cleaning up {!r}".format(ex, self), + file=_sys.stderr, + ) return self._closed = True diff --git a/pipenv/patched/piptools/cache.py b/pipenv/patched/piptools/cache.py index 610a4f37d1..14a276db95 100644 --- a/pipenv/patched/piptools/cache.py +++ b/pipenv/patched/piptools/cache.py @@ -1,6 +1,5 @@ # coding: utf-8 -from __future__ import (absolute_import, division, print_function, - unicode_literals) +from __future__ import absolute_import, division, print_function, unicode_literals import json import os @@ -19,23 +18,24 @@ def __init__(self, path): def __str__(self): lines = [ - 'The dependency cache seems to have been corrupted.', - 'Inspect, or delete, the following file:', - ' {}'.format(self.path), + "The dependency cache seems to have been corrupted.", + "Inspect, or delete, the following file:", + " {}".format(self.path), ] return os.linesep.join(lines) def read_cache_file(cache_file_path): - with open(cache_file_path, 'r') as cache_file: + with open(cache_file_path, "r") as cache_file: try: doc = json.load(cache_file) except ValueError: raise CorruptCacheError(cache_file_path) # Check version and load the contents - assert doc['__format__'] == 1, 'Unknown cache file format' - return doc['dependencies'] + if doc["__format__"] != 1: + raise AssertionError("Unknown cache file format") + return doc["dependencies"] class DependencyCache(object): @@ -48,13 +48,14 @@ class DependencyCache(object): Where X.Y indicates the Python version. """ + def __init__(self, cache_dir=None): if cache_dir is None: cache_dir = CACHE_DIR if not os.path.isdir(cache_dir): os.makedirs(cache_dir) - py_version = '.'.join(str(digit) for digit in sys.version_info[:2]) - cache_filename = 'depcache-py{}.json'.format(py_version) + py_version = ".".join(str(digit) for digit in sys.version_info[:2]) + cache_filename = "depcache-py{}.json".format(py_version) self._cache_file = os.path.join(cache_dir, cache_filename) self._cache = None @@ -71,13 +72,14 @@ def cache(self): def as_cache_key(self, ireq): """ - Given a requirement, return its cache key. This behavior is a little weird in order to allow backwards - compatibility with cache files. For a requirement without extras, this will return, for example: + Given a requirement, return its cache key. This behavior is a little weird + in order to allow backwards compatibility with cache files. For a requirement + without extras, this will return, for example: ("ipython", "2.1.0") - For a requirement with extras, the extras will be comma-separated and appended to the version, inside brackets, - like so: + For a requirement with extras, the extras will be comma-separated and appended + to the version, inside brackets, like so: ("ipython", "2.1.0[nbconvert,notebook]") """ @@ -97,11 +99,8 @@ def read_cache(self): def write_cache(self): """Writes the cache to disk as JSON.""" - doc = { - '__format__': 1, - 'dependencies': self._cache, - } - with open(self._cache_file, 'w') as f: + doc = {"__format__": 1, "dependencies": self._cache} + with open(self._cache_file, "w") as f: json.dump(doc, f, sort_keys=True) def clear(self): @@ -122,10 +121,6 @@ def __setitem__(self, ireq, values): self.cache[pkgname][pkgversion_and_extras] = values self.write_cache() - def get(self, ireq, default=None): - pkgname, pkgversion_and_extras = self.as_cache_key(ireq) - return self.cache.get(pkgname, {}).get(pkgversion_and_extras, default) - def reverse_dependencies(self, ireqs): """ Returns a lookup table of reverse dependencies for all the given ireqs. @@ -157,8 +152,10 @@ def _reverse_dependencies(self, cache_keys): 'pyflakes': ['flake8']} """ - # First, collect all the dependencies into a sequence of (parent, child) tuples, like [('flake8', 'pep8'), - # ('flake8', 'mccabe'), ...] - return lookup_table((key_from_req(Requirement(dep_name)), name) - for name, version_and_extras in cache_keys - for dep_name in self.cache[name][version_and_extras]) + # First, collect all the dependencies into a sequence of (parent, child) + # tuples, like [('flake8', 'pep8'), ('flake8', 'mccabe'), ...] + return lookup_table( + (key_from_req(Requirement(dep_name)), name) + for name, version_and_extras in cache_keys + for dep_name in self.cache[name][version_and_extras] + ) diff --git a/pipenv/patched/piptools/click.py b/pipenv/patched/piptools/click.py index 4bab11cb7e..86f1612c6a 100644 --- a/pipenv/patched/piptools/click.py +++ b/pipenv/patched/piptools/click.py @@ -1,6 +1,6 @@ from __future__ import absolute_import import click -click.disable_unicode_literals_warning = True - from click import * # noqa + +click.disable_unicode_literals_warning = True diff --git a/pipenv/patched/piptools/exceptions.py b/pipenv/patched/piptools/exceptions.py index 77c5bd4047..5aac84bb3b 100644 --- a/pipenv/patched/piptools/exceptions.py +++ b/pipenv/patched/piptools/exceptions.py @@ -19,40 +19,35 @@ def __str__(self): else: versions.append(version) - lines = [ - 'Could not find a version that matches {}'.format(self.ireq), - ] + lines = ["Could not find a version that matches {}".format(self.ireq)] if versions: - lines.append('Tried: {}'.format(', '.join(versions))) + lines.append("Tried: {}".format(", ".join(versions))) if pre_versions: if self.finder.allow_all_prereleases: - line = 'Tried' + line = "Tried" else: - line = 'Skipped' + line = "Skipped" - line += ' pre-versions: {}'.format(', '.join(pre_versions)) + line += " pre-versions: {}".format(", ".join(pre_versions)) lines.append(line) if versions or pre_versions: - lines.append('There are incompatible versions in the resolved dependencies.') + lines.append( + "There are incompatible versions in the resolved dependencies:" + ) + source_ireqs = getattr(self.ireq, "_source_ireqs", []) + lines.extend(" {}".format(ireq) for ireq in source_ireqs) else: - lines.append('No versions found') - lines.append('{} {} reachable?'.format( - 'Were' if len(self.finder.index_urls) > 1 else 'Was', ' or '.join(self.finder.index_urls)) + lines.append("No versions found") + lines.append( + "{} {} reachable?".format( + "Were" if len(self.finder.index_urls) > 1 else "Was", + " or ".join(self.finder.index_urls), + ) ) - return '\n'.join(lines) - - -class UnsupportedConstraint(PipToolsError): - def __init__(self, message, constraint): - super(UnsupportedConstraint, self).__init__(message) - self.constraint = constraint - - def __str__(self): - message = super(UnsupportedConstraint, self).__str__() - return '{} (constraint was: {})'.format(message, str(self.constraint)) + return "\n".join(lines) class IncompatibleRequirements(PipToolsError): diff --git a/pipenv/patched/piptools/io.py b/pipenv/patched/piptools/io.py deleted file mode 100644 index b6bca6756b..0000000000 --- a/pipenv/patched/piptools/io.py +++ /dev/null @@ -1,644 +0,0 @@ -# -*- coding: utf-8 -*- -# -# NOTE: -# The classes in this module are vendored from boltons: -# https://github.com/mahmoud/boltons/blob/master/boltons/fileutils.py -# -"""Virtually every Python programmer has used Python for wrangling -disk contents, and ``fileutils`` collects solutions to some of the -most commonly-found gaps in the standard library. -""" - -from __future__ import print_function - -import os -import re -import sys -import stat -import errno -import fnmatch -from shutil import copy2, copystat, Error - - -__all__ = ['mkdir_p', 'atomic_save', 'AtomicSaver', 'FilePerms', - 'iter_find_files', 'copytree'] - - -FULL_PERMS = 511 # 0777 that both Python 2 and 3 can digest -RW_PERMS = 438 -_SINGLE_FULL_PERM = 7 # or 07 in Python 2 -try: - basestring -except NameError: - unicode = str # Python 3 compat - basestring = (str, bytes) - - -def mkdir_p(path): - """Creates a directory and any parent directories that may need to - be created along the way, without raising errors for any existing - directories. This function mimics the behavior of the ``mkdir -p`` - command available in Linux/BSD environments, but also works on - Windows. - """ - try: - os.makedirs(path) - except OSError as exc: - if exc.errno == errno.EEXIST and os.path.isdir(path): - return - raise - return - - -class FilePerms(object): - """The :class:`FilePerms` type is used to represent standard POSIX - filesystem permissions: - - * Read - * Write - * Execute - - Across three classes of user: - - * Owning (u)ser - * Owner's (g)roup - * Any (o)ther user - - This class assists with computing new permissions, as well as - working with numeric octal ``777``-style and ``rwx``-style - permissions. Currently it only considers the bottom 9 permission - bits; it does not support sticky bits or more advanced permission - systems. - - Args: - user (str): A string in the 'rwx' format, omitting characters - for which owning user's permissions are not provided. - group (str): A string in the 'rwx' format, omitting characters - for which owning group permissions are not provided. - other (str): A string in the 'rwx' format, omitting characters - for which owning other/world permissions are not provided. - - There are many ways to use :class:`FilePerms`: - - >>> FilePerms(user='rwx', group='xrw', other='wxr') # note character order - FilePerms(user='rwx', group='rwx', other='rwx') - >>> int(FilePerms('r', 'r', '')) - 288 - >>> oct(288)[-3:] # XXX Py3k - '440' - - See also the :meth:`FilePerms.from_int` and - :meth:`FilePerms.from_path` classmethods for useful alternative - ways to construct :class:`FilePerms` objects. - """ - # TODO: consider more than the lower 9 bits - class _FilePermProperty(object): - _perm_chars = 'rwx' - _perm_set = frozenset('rwx') - _perm_val = {'r': 4, 'w': 2, 'x': 1} # for sorting - - def __init__(self, attribute, offset): - self.attribute = attribute - self.offset = offset - - def __get__(self, fp_obj, type_=None): - if fp_obj is None: - return self - return getattr(fp_obj, self.attribute) - - def __set__(self, fp_obj, value): - cur = getattr(fp_obj, self.attribute) - if cur == value: - return - try: - invalid_chars = set(str(value)) - self._perm_set - except TypeError: - raise TypeError('expected string, not %r' % value) - if invalid_chars: - raise ValueError('got invalid chars %r in permission' - ' specification %r, expected empty string' - ' or one or more of %r' - % (invalid_chars, value, self._perm_chars)) - - sort_key = (lambda c: self._perm_val[c]) - new_value = ''.join(sorted(set(value), - key=sort_key, reverse=True)) - setattr(fp_obj, self.attribute, new_value) - self._update_integer(fp_obj, new_value) - - def _update_integer(self, fp_obj, value): - mode = 0 - key = 'xwr' - for symbol in value: - bit = 2 ** key.index(symbol) - mode |= (bit << (self.offset * 3)) - fp_obj._integer |= mode - - def __init__(self, user='', group='', other=''): - self._user, self._group, self._other = '', '', '' - self._integer = 0 - self.user = user - self.group = group - self.other = other - - @classmethod - def from_int(cls, i): - """Create a :class:`FilePerms` object from an integer. - - >>> FilePerms.from_int(0o644) # note the leading zero-oh for octal - FilePerms(user='rw', group='r', other='r') - """ - i &= FULL_PERMS - key = ('', 'x', 'w', 'xw', 'r', 'rx', 'rw', 'rwx') - parts = [] - while i: - parts.append(key[i & _SINGLE_FULL_PERM]) - i >>= 3 - parts.reverse() - return cls(*parts) - - @classmethod - def from_path(cls, path): - """Make a new :class:`FilePerms` object based on the permissions - assigned to the file or directory at *path*. - - Args: - path (str): Filesystem path of the target file. - - >>> from os.path import expanduser - >>> 'r' in FilePerms.from_path(expanduser('~')).user # probably - True - """ - stat_res = os.stat(path) - return cls.from_int(stat.S_IMODE(stat_res.st_mode)) - - def __int__(self): - return self._integer - - # Sphinx tip: attribute docstrings come after the attribute - user = _FilePermProperty('_user', 2) - "Stores the ``rwx``-formatted *user* permission." - group = _FilePermProperty('_group', 1) - "Stores the ``rwx``-formatted *group* permission." - other = _FilePermProperty('_other', 0) - "Stores the ``rwx``-formatted *other* permission." - - def __repr__(self): - cn = self.__class__.__name__ - return ('%s(user=%r, group=%r, other=%r)' - % (cn, self.user, self.group, self.other)) - -#### - - -_TEXT_OPENFLAGS = os.O_RDWR | os.O_CREAT | os.O_EXCL -if hasattr(os, 'O_NOINHERIT'): - _TEXT_OPENFLAGS |= os.O_NOINHERIT -if hasattr(os, 'O_NOFOLLOW'): - _TEXT_OPENFLAGS |= os.O_NOFOLLOW -_BIN_OPENFLAGS = _TEXT_OPENFLAGS -if hasattr(os, 'O_BINARY'): - _BIN_OPENFLAGS |= os.O_BINARY - - -try: - import fcntl as fcntl -except ImportError: - def set_cloexec(fd): - "Dummy set_cloexec for platforms without fcntl support" - pass -else: - def set_cloexec(fd): - """Does a best-effort :func:`fcntl.fcntl` call to set a fd to be - automatically closed by any future child processes. - - Implementation from the :mod:`tempfile` module. - """ - try: - flags = fcntl.fcntl(fd, fcntl.F_GETFD, 0) - except IOError: - pass - else: - # flags read successfully, modify - flags |= fcntl.FD_CLOEXEC - fcntl.fcntl(fd, fcntl.F_SETFD, flags) - return - - -def atomic_save(dest_path, **kwargs): - """A convenient interface to the :class:`AtomicSaver` type. See the - :class:`AtomicSaver` documentation for details. - """ - return AtomicSaver(dest_path, **kwargs) - - -def path_to_unicode(path): - if isinstance(path, unicode): - return path - encoding = sys.getfilesystemencoding() or sys.getdefaultencoding() - return path.decode(encoding) - - -if os.name == 'nt': - import ctypes - from ctypes import c_wchar_p - from ctypes.wintypes import DWORD, LPVOID - - _ReplaceFile = ctypes.windll.kernel32.ReplaceFile - _ReplaceFile.argtypes = [c_wchar_p, c_wchar_p, c_wchar_p, - DWORD, LPVOID, LPVOID] - - def replace(src, dst): - # argument names match stdlib docs, docstring below - try: - # ReplaceFile fails if the dest file does not exist, so - # first try to rename it into position - os.rename(src, dst) - return - except WindowsError as we: - if we.errno == errno.EEXIST: - pass # continue with the ReplaceFile logic below - else: - raise - - src = path_to_unicode(src) - dst = path_to_unicode(dst) - res = _ReplaceFile(c_wchar_p(dst), c_wchar_p(src), - None, 0, None, None) - if not res: - raise OSError('failed to replace %r with %r' % (dst, src)) - return - - def atomic_rename(src, dst, overwrite=False): - "Rename *src* to *dst*, replacing *dst* if *overwrite is True" - if overwrite: - replace(src, dst) - else: - os.rename(src, dst) - return -else: - # wrapper func for cross compat + docs - def replace(src, dst): - # os.replace does the same thing on unix - return os.rename(src, dst) - - def atomic_rename(src, dst, overwrite=False): - "Rename *src* to *dst*, replacing *dst* if *overwrite is True" - if overwrite: - os.rename(src, dst) - else: - os.link(src, dst) - os.unlink(dst) - return - - -_atomic_rename = atomic_rename # backwards compat - -replace.__doc__ = """Similar to :func:`os.replace` in Python 3.3+, -this function will atomically create or replace the file at path -*dst* with the file at path *src*. - -On Windows, this function uses the ReplaceFile API for maximum -possible atomicity on a range of filesystems. -""" - - -class AtomicSaver(object): - """``AtomicSaver`` is a configurable `context manager`_ that provides - a writable :class:`file` which will be moved into place as long as - no exceptions are raised within the context manager's block. These - "part files" are created in the same directory as the destination - path to ensure atomic move operations (i.e., no cross-filesystem - moves occur). - - Args: - dest_path (str): The path where the completed file will be - written. - overwrite (bool): Whether to overwrite the destination file if - it exists at completion time. Defaults to ``True``. - file_perms (int): Integer representation of file permissions - for the newly-created file. Defaults are, when the - destination path already exists, to copy the permissions - from the previous file, or if the file did not exist, to - respect the user's configured `umask`_, usually resulting - in octal 0644 or 0664. - part_file (str): Name of the temporary *part_file*. Defaults - to *dest_path* + ``.part``. Note that this argument is - just the filename, and not the full path of the part - file. To guarantee atomic saves, part files are always - created in the same directory as the destination path. - overwrite_part (bool): Whether to overwrite the *part_file*, - should it exist at setup time. Defaults to ``False``, - which results in an :exc:`OSError` being raised on - pre-existing part files. Be careful of setting this to - ``True`` in situations when multiple threads or processes - could be writing to the same part file. - rm_part_on_exc (bool): Remove *part_file* on exception cases. - Defaults to ``True``, but ``False`` can be useful for - recovery in some cases. Note that resumption is not - automatic and by default an :exc:`OSError` is raised if - the *part_file* exists. - - Practically, the AtomicSaver serves a few purposes: - - * Avoiding overwriting an existing, valid file with a partially - written one. - * Providing a reasonable guarantee that a part file only has one - writer at a time. - * Optional recovery of partial data in failure cases. - - .. _context manager: https://docs.python.org/3/reference/compound_stmts.html#with - .. _umask: https://en.wikipedia.org/wiki/Umask - - """ - _default_file_perms = RW_PERMS - - # TODO: option to abort if target file modify date has changed since start? - def __init__(self, dest_path, **kwargs): - self.dest_path = dest_path - self.overwrite = kwargs.pop('overwrite', True) - self.file_perms = kwargs.pop('file_perms', None) - self.overwrite_part = kwargs.pop('overwrite_part', False) - self.part_filename = kwargs.pop('part_file', None) - self.rm_part_on_exc = kwargs.pop('rm_part_on_exc', True) - self.text_mode = kwargs.pop('text_mode', False) # for windows - self.buffering = kwargs.pop('buffering', -1) - if kwargs: - raise TypeError('unexpected kwargs: %r' % (kwargs.keys(),)) - - self.dest_path = os.path.abspath(self.dest_path) - self.dest_dir = os.path.dirname(self.dest_path) - if not self.part_filename: - self.part_path = dest_path + '.part' - else: - self.part_path = os.path.join(self.dest_dir, self.part_filename) - self.mode = 'w+' if self.text_mode else 'w+b' - self.open_flags = _TEXT_OPENFLAGS if self.text_mode else _BIN_OPENFLAGS - - self.part_file = None - - def _open_part_file(self): - do_chmod = True - file_perms = self.file_perms - if file_perms is None: - try: - # try to copy from file being replaced - stat_res = os.stat(self.dest_path) - file_perms = stat.S_IMODE(stat_res.st_mode) - except (OSError, IOError): - # default if no destination file exists - file_perms = self._default_file_perms - do_chmod = False # respect the umask - - fd = os.open(self.part_path, self.open_flags, file_perms) - set_cloexec(fd) - self.part_file = os.fdopen(fd, self.mode, self.buffering) - - # if default perms are overridden by the user or previous dest_path - # chmod away the effects of the umask - if do_chmod: - try: - os.chmod(self.part_path, file_perms) - except (OSError, IOError): - self.part_file.close() - raise - return - - def setup(self): - """Called on context manager entry (the :keyword:`with` statement), - the ``setup()`` method creates the temporary file in the same - directory as the destination file. - - ``setup()`` tests for a writable directory with rename permissions - early, as the part file may not be written to immediately (not - using :func:`os.access` because of the potential issues of - effective vs. real privileges). - - If the caller is not using the :class:`AtomicSaver` as a - context manager, this method should be called explicitly - before writing. - """ - if os.path.lexists(self.dest_path): - if not self.overwrite: - raise OSError(errno.EEXIST, - 'Overwrite disabled and file already exists', - self.dest_path) - if self.overwrite_part and os.path.lexists(self.part_path): - os.unlink(self.part_path) - self._open_part_file() - return - - def __enter__(self): - self.setup() - return self.part_file - - def __exit__(self, exc_type, exc_val, exc_tb): - self.part_file.close() - if exc_type: - if self.rm_part_on_exc: - try: - os.unlink(self.part_path) - except Exception: - pass # avoid masking original error - return - try: - atomic_rename(self.part_path, self.dest_path, - overwrite=self.overwrite) - except OSError: - if self.rm_part_on_exc: - try: - os.unlink(self.part_path) - except Exception: - pass # avoid masking original error - raise # could not save destination file - return - - -def iter_find_files(directory, patterns, ignored=None): - """Returns a generator that yields file paths under a *directory*, - matching *patterns* using `glob`_ syntax (e.g., ``*.txt``). Also - supports *ignored* patterns. - - Args: - directory (str): Path that serves as the root of the - search. Yielded paths will include this as a prefix. - patterns (str or list): A single pattern or list of - glob-formatted patterns to find under *directory*. - ignored (str or list): A single pattern or list of - glob-formatted patterns to ignore. - - For example, finding Python files in the directory of this module: - - >>> files = set(iter_find_files(os.path.dirname(__file__), '*.py')) - - Or, Python files while ignoring emacs lockfiles: - - >>> filenames = iter_find_files('.', '*.py', ignored='.#*') - - .. _glob: https://en.wikipedia.org/wiki/Glob_%28programming%29 - - """ - if isinstance(patterns, basestring): - patterns = [patterns] - pats_re = re.compile('|'.join([fnmatch.translate(p) for p in patterns])) - - if not ignored: - ignored = [] - elif isinstance(ignored, basestring): - ignored = [ignored] - ign_re = re.compile('|'.join([fnmatch.translate(p) for p in ignored])) - for root, dirs, files in os.walk(directory): - for basename in files: - if pats_re.match(basename): - if ignored and ign_re.match(basename): - continue - filename = os.path.join(root, basename) - yield filename - return - - -def copy_tree(src, dst, symlinks=False, ignore=None): - """The ``copy_tree`` function is an exact copy of the built-in - :func:`shutil.copytree`, with one key difference: it will not - raise an exception if part of the tree already exists. It achieves - this by using :func:`mkdir_p`. - - Args: - src (str): Path of the source directory to copy. - dst (str): Destination path. Existing directories accepted. - symlinks (bool): If ``True``, copy symlinks rather than their - contents. - ignore (callable): A callable that takes a path and directory - listing, returning the files within the listing to be ignored. - - For more details, check out :func:`shutil.copytree` and - :func:`shutil.copy2`. - - """ - names = os.listdir(src) - if ignore is not None: - ignored_names = ignore(src, names) - else: - ignored_names = set() - - mkdir_p(dst) - errors = [] - for name in names: - if name in ignored_names: - continue - srcname = os.path.join(src, name) - dstname = os.path.join(dst, name) - try: - if symlinks and os.path.islink(srcname): - linkto = os.readlink(srcname) - os.symlink(linkto, dstname) - elif os.path.isdir(srcname): - copytree(srcname, dstname, symlinks, ignore) - else: - # Will raise a SpecialFileError for unsupported file types - copy2(srcname, dstname) - # catch the Error from the recursive copytree so that we can - # continue with other files - except Error as e: - errors.extend(e.args[0]) - except EnvironmentError as why: - errors.append((srcname, dstname, str(why))) - try: - copystat(src, dst) - except OSError as why: - if WindowsError is not None and isinstance(why, WindowsError): - # Copying file access times may fail on Windows - pass - else: - errors.append((src, dst, str(why))) - if errors: - raise Error(errors) - - -copytree = copy_tree # alias for drop-in replacement of shutil - - -try: - file -except NameError: - file = object - - -# like open(os.devnull) but with even fewer side effects -class DummyFile(file): - # TODO: raise ValueErrors on closed for all methods? - # TODO: enforce read/write - def __init__(self, path, mode='r', buffering=None): - self.name = path - self.mode = mode - self.closed = False - self.errors = None - self.isatty = False - self.encoding = None - self.newlines = None - self.softspace = 0 - - def close(self): - self.closed = True - - def fileno(self): - return -1 - - def flush(self): - if self.closed: - raise ValueError('I/O operation on a closed file') - return - - def next(self): - raise StopIteration() - - def read(self, size=0): - if self.closed: - raise ValueError('I/O operation on a closed file') - return '' - - def readline(self, size=0): - if self.closed: - raise ValueError('I/O operation on a closed file') - return '' - - def readlines(self, size=0): - if self.closed: - raise ValueError('I/O operation on a closed file') - return [] - - def seek(self): - if self.closed: - raise ValueError('I/O operation on a closed file') - return - - def tell(self): - if self.closed: - raise ValueError('I/O operation on a closed file') - return 0 - - def truncate(self): - if self.closed: - raise ValueError('I/O operation on a closed file') - return - - def write(self, string): - if self.closed: - raise ValueError('I/O operation on a closed file') - return - - def writelines(self, list_of_strings): - if self.closed: - raise ValueError('I/O operation on a closed file') - return - - def __next__(self): - raise StopIteration() - - def __enter__(self): - if self.closed: - raise ValueError('I/O operation on a closed file') - return - - def __exit__(self, exc_type, exc_val, exc_tb): - return diff --git a/pipenv/patched/piptools/locations.py b/pipenv/patched/piptools/locations.py index 9fcea0af65..7abf5c76a8 100644 --- a/pipenv/patched/piptools/locations.py +++ b/pipenv/patched/piptools/locations.py @@ -1,23 +1,27 @@ import os from shutil import rmtree -from .click import secho from ._compat import user_cache_dir +from .click import secho # The user_cache_dir helper comes straight from pipenv.patched.notpip itself try: - from pipenv.environments import PIPENV_CACHE_DIR - CACHE_DIR = PIPENV_CACHE_DIR + from pipenv.environments import PIPENV_CACHE_DIR as CACHE_DIR except ImportError: - CACHE_DIR = user_cache_dir('pipenv') + CACHE_DIR = user_cache_dir("pipenv") # NOTE # We used to store the cache dir under ~/.pip-tools, which is not the # preferred place to store caches for any platform. This has been addressed # in pip-tools==1.0.5, but to be good citizens, we point this out explicitly # to the user when this directory is still found. -LEGACY_CACHE_DIR = os.path.expanduser('~/.pip-tools') +LEGACY_CACHE_DIR = os.path.expanduser("~/.pip-tools") if os.path.exists(LEGACY_CACHE_DIR): - secho('Removing old cache dir {} (new cache dir is {})'.format(LEGACY_CACHE_DIR, CACHE_DIR), fg='yellow') + secho( + "Removing old cache dir {} (new cache dir is {})".format( + LEGACY_CACHE_DIR, CACHE_DIR + ), + fg="yellow", + ) rmtree(LEGACY_CACHE_DIR) diff --git a/pipenv/patched/piptools/logging.py b/pipenv/patched/piptools/logging.py index f0bd17842c..488a8a2e5e 100644 --- a/pipenv/patched/piptools/logging.py +++ b/pipenv/patched/piptools/logging.py @@ -1,8 +1,5 @@ # coding: utf-8 -from __future__ import (absolute_import, division, print_function, - unicode_literals) - -import sys +from __future__ import absolute_import, division, print_function, unicode_literals from . import click @@ -12,6 +9,7 @@ def __init__(self, verbosity=0): self.verbosity = verbosity def log(self, *args, **kwargs): + kwargs.setdefault("err", True) click.secho(*args, **kwargs) def debug(self, *args, **kwargs): @@ -23,13 +21,11 @@ def info(self, *args, **kwargs): self.log(*args, **kwargs) def warning(self, *args, **kwargs): - kwargs.setdefault('fg', 'yellow') - kwargs.setdefault('file', sys.stderr) + kwargs.setdefault("fg", "yellow") self.log(*args, **kwargs) def error(self, *args, **kwargs): - kwargs.setdefault('fg', 'red') - kwargs.setdefault('file', sys.stderr) + kwargs.setdefault("fg", "red") self.log(*args, **kwargs) diff --git a/pipenv/patched/piptools/pip.py b/pipenv/patched/piptools/pip.py deleted file mode 100644 index 0419a8ab77..0000000000 --- a/pipenv/patched/piptools/pip.py +++ /dev/null @@ -1,30 +0,0 @@ -import optparse - -from ._compat import Command, cmdoptions - - -class PipCommand(Command): - name = 'PipCommand' - - -def get_pip_command(): - # Use pip's parser for pip.conf management and defaults. - # General options (find_links, index_url, extra_index_url, trusted_host, - # and pre) are defered to pip. - pip_command = PipCommand() - pip_command.parser.add_option(cmdoptions.no_binary()) - pip_command.parser.add_option(cmdoptions.only_binary()) - index_opts = cmdoptions.make_option_group( - cmdoptions.index_group, - pip_command.parser, - ) - pip_command.parser.insert_option_group(0, index_opts) - pip_command.parser.add_option(optparse.Option('--pre', action='store_true', default=False)) - - return pip_command - - -pip_command = get_pip_command() - -# Get default values of the pip's options (including options from pipenv.patched.notpip.conf). -pip_defaults = pip_command.parser.get_default_values() diff --git a/pipenv/patched/piptools/repositories/base.py b/pipenv/patched/piptools/repositories/base.py index 57e85fda0e..dd73ff32ee 100644 --- a/pipenv/patched/piptools/repositories/base.py +++ b/pipenv/patched/piptools/repositories/base.py @@ -1,6 +1,5 @@ # coding: utf-8 -from __future__ import (absolute_import, division, print_function, - unicode_literals) +from __future__ import absolute_import, division, print_function, unicode_literals from abc import ABCMeta, abstractmethod from contextlib import contextmanager @@ -10,7 +9,6 @@ @add_metaclass(ABCMeta) class BaseRepository(object): - def clear_caches(self): """Should clear any caches used by the implementation.""" @@ -27,7 +25,7 @@ def find_best_match(self, ireq): @abstractmethod def get_dependencies(self, ireq): """ - Given a pinned or an editable InstallRequirement, returns a set of + Given a pinned, URL, or editable InstallRequirement, returns a set of dependencies (also InstallRequirements, but not necessarily pinned). They indicate the secondary dependencies for the given requirement. """ diff --git a/pipenv/patched/piptools/repositories/local.py b/pipenv/patched/piptools/repositories/local.py index 36bafdb989..c1bcf9d1ba 100644 --- a/pipenv/patched/piptools/repositories/local.py +++ b/pipenv/patched/piptools/repositories/local.py @@ -1,12 +1,12 @@ # coding: utf-8 -from __future__ import (absolute_import, division, print_function, - unicode_literals) +from __future__ import absolute_import, division, print_function, unicode_literals from contextlib import contextmanager -from piptools.utils import as_tuple, key_from_req, make_install_requirement -from .base import BaseRepository from .._compat import FAVORITE_HASH +from .base import BaseRepository + +from piptools.utils import as_tuple, key_from_req, make_install_requirement def ireq_satisfied_by_existing_pin(ireq, existing_pin): @@ -28,10 +28,15 @@ class LocalRequirementsRepository(BaseRepository): requirements file, we prefer that version over the best match found in PyPI. This keeps updates to the requirements.txt down to a minimum. """ + def __init__(self, existing_pins, proxied_repository): self.repository = proxied_repository self.existing_pins = existing_pins + @property + def options(self): + return self.repository.options + @property def finder(self): return self.repository.finder @@ -56,8 +61,8 @@ def find_best_match(self, ireq, prereleases=None): if existing_pin and ireq_satisfied_by_existing_pin(ireq, existing_pin): project, version, _ = as_tuple(existing_pin) return make_install_requirement( - project, version, ireq.extras, constraint=ireq.constraint, - markers=ireq.markers + project, version, ireq.extras, ireq.markers, + constraint=ireq.constraint ) else: return self.repository.find_best_match(ireq, prereleases) @@ -69,12 +74,11 @@ def get_hashes(self, ireq): key = key_from_req(ireq.req) existing_pin = self.existing_pins.get(key) if existing_pin and ireq_satisfied_by_existing_pin(ireq, existing_pin): - hashes = existing_pin.options.get('hashes', {}) + hashes = existing_pin.options.get("hashes", {}) hexdigests = hashes.get(FAVORITE_HASH) if hexdigests: return { - ':'.join([FAVORITE_HASH, hexdigest]) - for hexdigest in hexdigests + ":".join([FAVORITE_HASH, hexdigest]) for hexdigest in hexdigests } return self.repository.get_hashes(ireq) diff --git a/pipenv/patched/piptools/repositories/pypi.py b/pipenv/patched/piptools/repositories/pypi.py index 10a0e46954..05a266c471 100644 --- a/pipenv/patched/piptools/repositories/pypi.py +++ b/pipenv/patched/piptools/repositories/pypi.py @@ -1,48 +1,59 @@ # coding: utf-8 -from __future__ import (absolute_import, division, print_function, - unicode_literals) +from __future__ import absolute_import, division, print_function, unicode_literals +import collections import copy import hashlib import os from contextlib import contextmanager +from functools import partial from shutil import rmtree -import pkg_resources - from packaging.requirements import Requirement -from packaging.specifiers import SpecifierSet, Specifier - -os.environ["PIP_SHIMS_BASE_MODULE"] = str("pipenv.patched.notpip") -import pip_shims -from pip_shims.shims import VcsSupport, WheelCache, InstallationError - +from packaging.specifiers import Specifier, SpecifierSet from .._compat import ( - is_file_url, - url_to_path, - PackageFinder, - RequirementSet, - Wheel, FAVORITE_HASH, - TemporaryDirectory, - PyPI, + PIP_VERSION, + InstallationError, InstallRequirement, - SafeFileCache + Link, + PyPI, + RequirementSet, + RequirementTracker, + Resolver as PipResolver, + SafeFileCache, + TemporaryDirectory, + VcsSupport, + Wheel, + WheelCache, + contextlib, + is_dir_url, + is_file_url, + is_vcs_url, + path_to_url, + pip_version, + url_to_path, ) - from ..cache import CACHE_DIR +from ..click import progressbar from ..exceptions import NoCandidateFound -from ..utils import (fs_str, is_pinned_requirement, lookup_table, dedup, - make_install_requirement, clean_requires_python) +from ..logging import log +from ..utils import ( + dedup, + clean_requires_python, + create_install_command, + fs_str, + is_pinned_requirement, + is_url_requirement, + lookup_table, + make_install_requirement, +) from .base import BaseRepository -try: - from pipenv.patched.notpip._internal.req.req_tracker import RequirementTracker -except ImportError: - @contextmanager - def RequirementTracker(): - yield +os.environ["PIP_SHIMS_BASE_MODULE"] = str("pipenv.patched.notpip") +FILE_CHUNK_SIZE = 4096 +FileStream = collections.namedtuple("FileStream", "stream size") class HashCache(SafeFileCache): @@ -93,29 +104,23 @@ class PyPIRepository(BaseRepository): config), but any other PyPI mirror can be used if index_urls is changed/configured on the Finder. """ - def __init__(self, pip_options, session, build_isolation=False, use_json=False): - self.session = session - self.pip_options = pip_options + + def __init__(self, pip_args, session=None, build_isolation=False, use_json=False): self.build_isolation = build_isolation self.use_json = use_json - index_urls = [pip_options.index_url] + pip_options.extra_index_urls - if pip_options.no_index: - index_urls = [] - - finder_kwargs = { - "find_links": pip_options.find_links, - "index_urls": index_urls, - "trusted_hosts": pip_options.trusted_hosts, - "allow_all_prereleases": pip_options.pre, - "session": self.session, - } - - # pip 19.0 has removed process_dependency_links from the PackageFinder constructor - if pkg_resources.parse_version(pip_shims.shims.pip_version) < pkg_resources.parse_version('19.0'): - finder_kwargs["process_dependency_links"] = pip_options.process_dependency_links + # Use pip's parser for pip.conf management and defaults. + # General options (find_links, index_url, extra_index_url, trusted_host, + # and pre) are deferred to pip. + command = create_install_command() + self.options, _ = command.parse_args(pip_args) - self.finder = PackageFinder(**finder_kwargs) + if session is None: + session = command._build_session(self.options) + self.session = session + self.finder = command._build_package_finder( + options=self.options, session=self.session + ) # Caches # stores project_name => InstallationCandidate mappings for all @@ -134,16 +139,16 @@ def __init__(self, pip_options, session, build_isolation=False, use_json=False): # Setup file paths self.freshen_build_caches() - self._download_dir = fs_str(os.path.join(CACHE_DIR, 'pkgs')) - self._wheel_download_dir = fs_str(os.path.join(CACHE_DIR, 'wheels')) + self._download_dir = fs_str(os.path.join(CACHE_DIR, "pkgs")) + self._wheel_download_dir = fs_str(os.path.join(CACHE_DIR, "wheels")) def freshen_build_caches(self): """ Start with fresh build/source caches. Will remove any old build caches from disk automatically. """ - self._build_dir = TemporaryDirectory(fs_str('build')) - self._source_dir = TemporaryDirectory(fs_str('source')) + self._build_dir = TemporaryDirectory(fs_str("build")) + self._source_dir = TemporaryDirectory(fs_str("source")) @property def build_dir(self): @@ -168,11 +173,13 @@ def find_best_match(self, ireq, prereleases=None): Returns a Version object that indicates the best match for the given InstallRequirement according to the external repository. """ - if ireq.editable: + if ireq.editable or is_url_requirement(ireq): return ireq # return itself as the best match all_candidates = clean_requires_python(self.find_all_candidates(ireq.name)) - candidates_by_version = lookup_table(all_candidates, key=lambda c: c.version, unique=True) + candidates_by_version = lookup_table( + all_candidates, key=lambda c: c.version, unique=True + ) try: matching_versions = ireq.specifier.filter((candidate.version for candidate in all_candidates), prereleases=prereleases) @@ -183,11 +190,31 @@ def find_best_match(self, ireq, prereleases=None): matching_candidates = [candidates_by_version[ver] for ver in matching_versions] if not matching_candidates: raise NoCandidateFound(ireq, all_candidates, self.finder) - best_candidate = max(matching_candidates, key=self.finder._candidate_sort_key) + + if PIP_VERSION < (19, 1): + best_candidate = max( + matching_candidates, key=self.finder._candidate_sort_key + ) + elif PIP_VERSION < (19, 2): + evaluator = self.finder.candidate_evaluator + best_candidate = evaluator.get_best_candidate(matching_candidates) + elif PIP_VERSION < (19, 3): + evaluator = self.finder.make_candidate_evaluator(ireq.name) + best_candidate = evaluator.get_best_candidate(matching_candidates) + else: + evaluator = self.finder.make_candidate_evaluator(ireq.name) + best_candidate_result = evaluator.compute_best_candidate( + matching_candidates + ) + best_candidate = best_candidate_result.best_candidate # Turn the candidate into a pinned InstallRequirement return make_install_requirement( - best_candidate.project, best_candidate.version, ireq.extras, ireq.markers, constraint=ireq.constraint + best_candidate.project, + best_candidate.version, + ireq.extras, + ireq.markers, + constraint=ireq.constraint, ) def get_dependencies(self, ireq): @@ -254,10 +281,8 @@ def resolve_reqs(self, download_dir, ireq, wheel_cache): ireq.populate_link(self.finder, False, False) if ireq.link and not ireq.link.is_wheel: ireq.ensure_has_source_dir(self.source_dir) - try: - from pipenv.patched.notpip._internal.operations.prepare import RequirementPreparer - except ImportError: - # Pip 9 and below + + if PIP_VERSION < (10,): reqset = RequirementSet( self.build_dir, self.source_dir, @@ -266,66 +291,90 @@ def resolve_reqs(self, download_dir, ireq, wheel_cache): session=self.session, ignore_installed=True, ignore_compatibility=False, - wheel_cache=wheel_cache + wheel_cache=wheel_cache, ) results = reqset._prepare_file(self.finder, ireq, ignore_requires_python=True) else: - # pip >= 10 + from pip_shims.shims import RequirementPreparer + preparer_kwargs = { - 'build_dir': self.build_dir, - 'src_dir': self.source_dir, - 'download_dir': download_dir, - 'wheel_download_dir': self._wheel_download_dir, - 'progress_bar': 'off', - 'build_isolation': self.build_isolation, + "build_dir": self.build_dir, + "src_dir": self.source_dir, + "download_dir": download_dir, + "wheel_download_dir": self._wheel_download_dir, + "progress_bar": "off", + "build_isolation": self.build_isolation, } resolver_kwargs = { - 'finder': self.finder, - 'session': self.session, - 'upgrade_strategy': "to-satisfy-only", - 'force_reinstall': False, - 'ignore_dependencies': False, - 'ignore_requires_python': True, - 'ignore_installed': True, - 'ignore_compatibility': False, - 'isolated': True, - 'wheel_cache': wheel_cache, - 'use_user_site': False, - 'use_pep517': True + "finder": self.finder, + "session": self.session, + "upgrade_strategy": "to-satisfy-only", + "force_reinstall": False, + "ignore_dependencies": False, + "ignore_requires_python": True, + "ignore_installed": True, + "use_user_site": False, + "ignore_compatibility": False, + "use_pep517": True, } + make_install_req_kwargs = {"isolated": False, "wheel_cache": wheel_cache} + + if PIP_VERSION < (19, 3): + resolver_kwargs.update(**make_install_req_kwargs) + else: + from pipenv.patched.notpip._internal.req.constructors import install_req_from_req_string + + make_install_req = partial( + install_req_from_req_string, **make_install_req_kwargs + ) + resolver_kwargs["make_install_req"] = make_install_req + + if PIP_VERSION >= (20,): + preparer_kwargs["session"] = self.session + del resolver_kwargs["session"] + resolver = None preparer = None + reqset = None with RequirementTracker() as req_tracker: # Pip 18 uses a requirement tracker to prevent fork bombs if req_tracker: - preparer_kwargs['req_tracker'] = req_tracker + preparer_kwargs["req_tracker"] = req_tracker preparer = RequirementPreparer(**preparer_kwargs) - resolver_kwargs['preparer'] = preparer + resolver_kwargs["preparer"] = preparer reqset = RequirementSet() ireq.is_direct = True - # reqset.add_requirement(ireq) - resolver = pip_shims.shims.Resolver(**resolver_kwargs) - resolver.require_hashes = False - results = resolver._resolve_one(reqset, ireq) - cleanup_fn = getattr(reqset, "cleanup_files", None) - if cleanup_fn is not None: - try: - cleanup_fn() - except OSError: - pass + resolver = PipResolver(**resolver_kwargs) + require_hashes = False + if PIP_VERSION < (20,): + resolver.require_hashes = require_hashes + results = resolver._resolve_one(reqset, ireq) + else: + results = resolver._resolve_one(reqset, ireq, require_hashes) + try: + reqset.cleanup_files() + except (AttributeError, OSError): + pass results = set(results) if results else set() - return results, ireq + + return set(results) def get_legacy_dependencies(self, ireq): """ - Given a pinned or an editable InstallRequirement, returns a set of + Given a pinned, URL, or editable InstallRequirement, returns a set of dependencies (also InstallRequirements, but not necessarily pinned). They indicate the secondary dependencies for the given requirement. """ - if not (ireq.editable or is_pinned_requirement(ireq)): - raise TypeError('Expected pinned or editable InstallRequirement, got {}'.format(ireq)) + if not ( + ireq.editable or is_url_requirement(ireq) or is_pinned_requirement(ireq) + ): + raise TypeError( + "Expected url, pinned or editable InstallRequirement, got {}".format( + ireq + ) + ) if ireq not in self._dependencies_cache: if ireq.editable and (ireq.source_dir and os.path.exists(ireq.source_dir)): @@ -333,7 +382,7 @@ def get_legacy_dependencies(self, ireq): # If a download_dir is passed, pip will unnecessarely # archive the entire source directory download_dir = None - elif ireq.link and not ireq.link.is_artifact: + elif ireq.link and is_vcs_url(ireq.link): # No download_dir for VCS sources. This also works around pip # using git-checkout-index, which gets rid of the .git dir. download_dir = None @@ -344,39 +393,52 @@ def get_legacy_dependencies(self, ireq): if not os.path.isdir(self._wheel_download_dir): os.makedirs(self._wheel_download_dir) - wheel_cache = WheelCache(CACHE_DIR, self.pip_options.format_control) - prev_tracker = os.environ.get('PIP_REQ_TRACKER') + wheel_cache = WheelCache(CACHE_DIR, self.options.format_control) + prev_tracker = os.environ.get("PIP_REQ_TRACKER") try: results, ireq = self.resolve_reqs(download_dir, ireq, wheel_cache) self._dependencies_cache[ireq] = results finally: - if 'PIP_REQ_TRACKER' in os.environ: + if "PIP_REQ_TRACKER" in os.environ: if prev_tracker: - os.environ['PIP_REQ_TRACKER'] = prev_tracker + os.environ["PIP_REQ_TRACKER"] = prev_tracker else: - del os.environ['PIP_REQ_TRACKER'] - try: - self.wheel_cache.cleanup() - except AttributeError: - pass + del os.environ["PIP_REQ_TRACKER"] + + # WheelCache.cleanup() introduced in pip==10.0.0 + if PIP_VERSION >= (10,): + wheel_cache.cleanup() return self._dependencies_cache[ireq] def get_hashes(self, ireq): """ Given an InstallRequirement, return a set of hashes that represent all - of the files for a given requirement. Editable requirements return an + of the files for a given requirement. Unhashable requirements return an empty set. Unpinned requirements raise a TypeError. """ - if ireq.editable: - return set() - vcs = VcsSupport() - if ireq.link and ireq.link.scheme in vcs.all_schemes and 'ssh' in ireq.link.scheme: - return set() + if ireq.link: + link = ireq.link + + if is_vcs_url(link) or (is_file_url(link) and is_dir_url(link)): + # Return empty set for unhashable requirements. + # Unhashable logic modeled on pip's + # RequirementPreparer.prepare_linked_requirement + return set() + + if is_url_requirement(ireq): + # Directly hash URL requirements. + # URL requirements may have been previously downloaded and cached + # locally by self.resolve_reqs() + cached_path = os.path.join(self._download_dir, link.filename) + if os.path.exists(cached_path): + cached_link = Link(path_to_url(cached_path)) + else: + cached_link = link + return {self._get_file_hash(cached_link)} if not is_pinned_requirement(ireq): - raise TypeError( - "Expected pinned requirement, got {}".format(ireq)) + raise TypeError("Expected pinned requirement, got {}".format(ireq)) # We need to get all of the candidates that match our current version # pin, these will represent all of the files that could possibly @@ -386,9 +448,17 @@ def get_hashes(self, ireq): if c.version in ireq.specifier ) + log.debug(" {}".format(ireq.name)) + + def get_candidate_link(candidate): + if PIP_VERSION < (19, 2): + return candidate.location + return candidate.link + return { - h for h in map(lambda c: self._hash_cache.get_hash(c.location), - matching_candidates) if h is not None + h for h in + map(lambda c: self._hash_cache.get_hash(get_candidate_link(c)), matching_candidates) + if h is not None } @contextmanager @@ -396,9 +466,10 @@ def allow_all_wheels(self): """ Monkey patches pip.Wheel to allow wheels from all platforms and Python versions. - This also saves the candidate cache and set a new one, or else the results from the - previous non-patched calls will interfere. + This also saves the candidate cache and set a new one, or else the results from + the previous non-patched calls will interfere. """ + def _wheel_supported(self, tags=None): # Ignore current platform. Support everything. return True @@ -431,7 +502,7 @@ def open_local_or_remote_file(link, session): :type link: pip.index.Link :type session: requests.Session :raises ValueError: If link points to a local directory. - :return: a context manager to the opened file-like object + :return: a context manager to a FileStream with the opened file-like object """ url = link.url_without_fragment @@ -441,13 +512,21 @@ def open_local_or_remote_file(link, session): if os.path.isdir(local_path): raise ValueError("Cannot open directory for read: {}".format(url)) else: - with open(local_path, 'rb') as local_file: - yield local_file + st = os.stat(local_path) + with open(local_path, "rb") as local_file: + yield FileStream(stream=local_file, size=st.st_size) else: # Remote URL headers = {"Accept-Encoding": "identity"} response = session.get(url, headers=headers, stream=True) + + # Content length must be int or None + try: + content_length = int(response.headers["content-length"]) + except (ValueError, KeyError, TypeError): + content_length = None + try: - yield response.raw + yield FileStream(stream=response.raw, size=content_length) finally: response.close() diff --git a/pipenv/patched/piptools/resolver.py b/pipenv/patched/piptools/resolver.py index b642bc9c44..c05666512a 100644 --- a/pipenv/patched/piptools/resolver.py +++ b/pipenv/patched/piptools/resolver.py @@ -1,29 +1,35 @@ # coding: utf-8 -from __future__ import (absolute_import, division, print_function, - unicode_literals) +from __future__ import absolute_import, division, print_function, unicode_literals import copy +import os from functools import partial from itertools import chain, count -import os - -from ._compat import install_req_from_line from . import click +from ._compat import install_req_from_line from .cache import DependencyCache -from .exceptions import UnsupportedConstraint from .logging import log -from .utils import (format_requirement, format_specifier, full_groupby, - is_pinned_requirement, key_from_ireq, key_from_req, UNSAFE_PACKAGES) +from .utils import ( + UNSAFE_PACKAGES, + format_requirement, + format_specifier, + full_groupby, + is_pinned_requirement, + is_url_requirement, + key_from_ireq, + key_from_req, +) -green = partial(click.style, fg='green') -magenta = partial(click.style, fg='magenta') +green = partial(click.style, fg="green") +magenta = partial(click.style, fg="magenta") class RequirementSummary(object): """ Summary of a requirement's properties for comparison purposes. """ + def __init__(self, ireq): self.req = ireq.req self.key = key_from_req(ireq.req) @@ -41,8 +47,66 @@ def __str__(self): return repr([self.key, self.specifier, self.extras]) +def combine_install_requirements(ireqs): + """ + Return a single install requirement that reflects a combination of + all the inputs. + """ + # We will store the source ireqs in a _source_ireqs attribute; + # if any of the inputs have this, then use those sources directly. + source_ireqs = [] + for ireq in ireqs: + source_ireqs.extend(getattr(ireq, "_source_ireqs", [ireq])) + + # deepcopy the accumulator so as to not modify the inputs + combined_ireq = copy.deepcopy(source_ireqs[0]) + for ireq in source_ireqs[1:]: + # NOTE we may be losing some info on dropped reqs here + combined_ireq.req.specifier &= ireq.req.specifier + combined_ireq.constraint &= ireq.constraint + if not combined_ireq.markers: + combined_ireq.markers = ireq.markers + else: + _markers = combined_ireq.markers._markers + if not isinstance(_markers[0], (tuple, list)): + combined_ireq.markers._markers = [_markers, 'and', ireq.markers._markers] + # Return a sorted, de-duped tuple of extras + combined_ireq.extras = tuple( + sorted(set(tuple(combined_ireq.extras) + tuple(ireq.extras))) + ) + + # InstallRequirements objects are assumed to come from only one source, and + # so they support only a single comes_from entry. This function breaks this + # model. As a workaround, we deterministically choose a single source for + # the comes_from entry, and add an extra _source_ireqs attribute to keep + # track of multiple sources for use within pip-tools. + if len(source_ireqs) > 1: + if any(ireq.comes_from is None for ireq in source_ireqs): + # None indicates package was directly specified. + combined_ireq.comes_from = None + else: + # Populate the comes_from field from one of the sources. + # Requirement input order is not stable, so we need to sort: + # We choose the shortest entry in order to keep the printed + # representation as concise as possible. + combined_ireq.comes_from = min( + (ireq.comes_from for ireq in source_ireqs), + key=lambda x: (len(str(x)), str(x)), + ) + combined_ireq._source_ireqs = source_ireqs + return combined_ireq + + class Resolver(object): - def __init__(self, constraints, repository, cache=None, prereleases=False, clear_caches=False, allow_unsafe=False): + def __init__( + self, + constraints, + repository, + cache=None, + prereleases=False, + clear_caches=False, + allow_unsafe=False, + ): """ This class resolves a given set of constraints (a collection of InstallRequirement objects) by consulting the given Repository and the @@ -61,13 +125,21 @@ def __init__(self, constraints, repository, cache=None, prereleases=False, clear @property def constraints(self): - return set(self._group_constraints(chain(self.our_constraints, - self.their_constraints))) + return set( + self._group_constraints( + chain( + sorted(self.our_constraints, key=str), + sorted(self.their_constraints, key=str), + ) + ) + ) def resolve_hashes(self, ireqs): """ Finds acceptable hashes for all of the given InstallRequirements. """ + log.debug("") + log.debug("Generating hashes:") with self.repository.allow_all_wheels(): return {ireq: self.repository.get_hashes(ireq) for ireq in ireqs} @@ -85,24 +157,28 @@ def resolve(self, max_rounds=10): self.dependency_cache.clear() self.repository.clear_caches() - self.check_constraints(chain(self.our_constraints, - self.their_constraints)) - # Ignore existing packages - os.environ[str('PIP_EXISTS_ACTION')] = str('i') # NOTE: str() wrapping necessary for Python 2/3 compat - for current_round in count(start=1): + os.environ[str("PIP_EXISTS_ACTION")] = str( + "i" + ) # NOTE: str() wrapping necessary for Python 2/3 compat + for current_round in count(start=1): # pragma: no branch if current_round > max_rounds: - raise RuntimeError('No stable configuration of concrete packages ' - 'could be found for the given constraints after ' - '%d rounds of resolving.\n' - 'This is likely a bug.' % max_rounds) - - log.debug('') - log.debug(magenta('{:^60}'.format('ROUND {}'.format(current_round)))) + raise RuntimeError( + "No stable configuration of concrete packages " + "could be found for the given constraints after " + "%d rounds of resolving.\n" + "This is likely a bug." % max_rounds + ) + + log.debug("") + log.debug(magenta("{:^60}".format("ROUND {}".format(current_round)))) has_changed, best_matches = self._resolve_one_round() - log.debug('-' * 60) - log.debug('Result of round {}: {}'.format(current_round, - 'not stable' if has_changed else 'stable, done')) + log.debug("-" * 60) + log.debug( + "Result of round {}: {}".format( + current_round, "not stable" if has_changed else "stable, done" + ) + ) if not has_changed: break @@ -113,17 +189,29 @@ def resolve(self, max_rounds=10): # build cache dir for every round, so this can never happen. self.repository.freshen_build_caches() - del os.environ['PIP_EXISTS_ACTION'] + del os.environ["PIP_EXISTS_ACTION"] + # Only include hard requirements and not pip constraints - return {req for req in best_matches if not req.constraint} + results = {req for req in best_matches if not req.constraint} - @staticmethod - def check_constraints(constraints): - for constraint in constraints: - if constraint.link is not None and not constraint.editable and not constraint.is_wheel: - msg = ('pip-compile does not support URLs as packages, unless they are editable. ' - 'Perhaps add -e option?') - raise UnsupportedConstraint(msg, constraint) + # Filter out unsafe requirements. + self.unsafe_constraints = set() + if not self.allow_unsafe: + # reverse_dependencies is used to filter out packages that are only + # required by unsafe packages. This logic is incomplete, as it would + # fail to filter sub-sub-dependencies of unsafe packages. None of the + # UNSAFE_PACKAGES currently have any dependencies at all (which makes + # sense for installation tools) so this seems sufficient. + reverse_dependencies = self.reverse_dependencies(results) + for req in results.copy(): + required_by = reverse_dependencies.get(req.name.lower(), []) + if req.name in UNSAFE_PACKAGES or ( + required_by and all(name in UNSAFE_PACKAGES for name in required_by) + ): + self.unsafe_constraints.add(req) + results.remove(req) + + return results def _group_constraints(self, constraints): """ @@ -145,26 +233,11 @@ def _group_constraints(self, constraints): ireqs = list(ireqs) editable_ireq = next((ireq for ireq in ireqs if ireq.editable), None) if editable_ireq: - yield editable_ireq # ignore all the other specs: the editable one is the one that counts + # ignore all the other specs: the editable one is the one that counts + yield editable_ireq continue - ireqs = iter(ireqs) - # deepcopy the accumulator so as to not modify the self.our_constraints invariant - combined_ireq = copy.deepcopy(next(ireqs)) - combined_ireq.comes_from = None - for ireq in ireqs: - # NOTE we may be losing some info on dropped reqs here - combined_ireq.req.specifier &= ireq.req.specifier - combined_ireq.constraint &= ireq.constraint - if not combined_ireq.markers: - combined_ireq.markers = ireq.markers - else: - _markers = combined_ireq.markers._markers - if not isinstance(_markers[0], (tuple, list)): - combined_ireq.markers._markers = [_markers, 'and', ireq.markers._markers] - # Return a sorted, de-duped tuple of extras - combined_ireq.extras = tuple(sorted(set(tuple(combined_ireq.extras) + tuple(ireq.extras)))) - yield combined_ireq + yield combine_install_requirements(ireqs) def _resolve_one_round(self): """ @@ -179,60 +252,48 @@ def _resolve_one_round(self): """ # Sort this list for readability of terminal output constraints = sorted(self.constraints, key=key_from_ireq) - unsafe_constraints = [] - original_constraints = copy.copy(constraints) - if not self.allow_unsafe: - for constraint in original_constraints: - if constraint.name in UNSAFE_PACKAGES: - constraints.remove(constraint) - constraint.req.specifier = None - unsafe_constraints.append(constraint) - log.debug('Current constraints:') + log.debug("Current constraints:") for constraint in constraints: - log.debug(' {}'.format(constraint)) + log.debug(" {}".format(constraint)) - log.debug('') - log.debug('Finding the best candidates:') + log.debug("") + log.debug("Finding the best candidates:") best_matches = {self.get_best_match(ireq) for ireq in constraints} # Find the new set of secondary dependencies - log.debug('') - log.debug('Finding secondary dependencies:') + log.debug("") + log.debug("Finding secondary dependencies:") - safe_constraints = [] + their_constraints = [] for best_match in best_matches: - for dep in self._iter_dependencies(best_match): - if self.allow_unsafe or dep.name not in UNSAFE_PACKAGES: - safe_constraints.append(dep) + their_constraints.extend(self._iter_dependencies(best_match)) # Grouping constraints to make clean diff between rounds - theirs = set(self._group_constraints(safe_constraints)) + theirs = set(self._group_constraints(sorted(their_constraints, key=str))) # NOTE: We need to compare RequirementSummary objects, since # InstallRequirement does not define equality - diff = {RequirementSummary(t) for t in theirs} - {RequirementSummary(t) for t in self.their_constraints} - removed = ({RequirementSummary(t) for t in self.their_constraints} - - {RequirementSummary(t) for t in theirs}) - unsafe = ({RequirementSummary(t) for t in unsafe_constraints} - - {RequirementSummary(t) for t in self.unsafe_constraints}) - - has_changed = len(diff) > 0 or len(removed) > 0 or len(unsafe) > 0 + diff = {RequirementSummary(t) for t in theirs} - { + RequirementSummary(t) for t in self.their_constraints + } + removed = {RequirementSummary(t) for t in self.their_constraints} - { + RequirementSummary(t) for t in theirs + } + + has_changed = len(diff) > 0 or len(removed) > 0 if has_changed: - log.debug('') - log.debug('New dependencies found in this round:') + log.debug("") + log.debug("New dependencies found in this round:") for new_dependency in sorted(diff, key=lambda req: key_from_req(req.req)): - log.debug(' adding {}'.format(new_dependency)) - log.debug('Removed dependencies in this round:') - for removed_dependency in sorted(removed, key=lambda req: key_from_req(req.req)): - log.debug(' removing {}'.format(removed_dependency)) - log.debug('Unsafe dependencies in this round:') - for unsafe_dependency in sorted(unsafe, key=lambda req: key_from_req(req.req)): - log.debug(' remembering unsafe {}'.format(unsafe_dependency)) + log.debug(" adding {}".format(new_dependency)) + log.debug("Removed dependencies in this round:") + for removed_dependency in sorted( + removed, key=lambda req: key_from_req(req.req) + ): + log.debug(" removing {}".format(removed_dependency)) # Store the last round's results in the their_constraints self.their_constraints = theirs - # Store the last round's unsafe constraints - self.unsafe_constraints = unsafe_constraints return has_changed, best_matches def get_best_match(self, ireq): @@ -250,7 +311,7 @@ def get_best_match(self, ireq): Flask==0.10.1 => Flask==0.10.1 """ - if ireq.editable: + if ireq.editable or is_url_requirement(ireq): # NOTE: it's much quicker to immediately return instead of # hitting the index server best_match = ireq @@ -259,23 +320,29 @@ def get_best_match(self, ireq): # hitting the index server best_match = ireq else: - best_match = self.repository.find_best_match(ireq, prereleases=self.prereleases) + best_match = self.repository.find_best_match( + ireq, prereleases=self.prereleases + ) # Format the best match - log.debug(' found candidate {} (constraint was {})'.format(format_requirement(best_match), - format_specifier(ireq))) + log.debug( + " found candidate {} (constraint was {})".format( + format_requirement(best_match), format_specifier(ireq) + ) + ) + best_match.comes_from = ireq.comes_from return best_match def _iter_dependencies(self, ireq): """ - Given a pinned or editable InstallRequirement, collects all the + Given a pinned, url, or editable InstallRequirement, collects all the secondary dependencies for them, either by looking them up in a local cache, or by reaching out to the repository. Editable requirements will never be looked up, as they may have changed at any time. """ - if ireq.editable: + if ireq.editable or is_url_requirement(ireq): for dependency in self.repository.get_dependencies(ireq): yield dependency return @@ -289,24 +356,40 @@ def _iter_dependencies(self, ireq): ireq.extras = ireq.extra elif not is_pinned_requirement(ireq): - raise TypeError('Expected pinned or editable requirement, got {}'.format(ireq)) + raise TypeError( + "Expected pinned or editable requirement, got {}".format(ireq) + ) # Now, either get the dependencies from the dependency cache (for # speed), or reach out to the external repository to # download and inspect the package version and get dependencies # from there if ireq not in self.dependency_cache: - log.debug(' {} not in cache, need to check index'.format(format_requirement(ireq)), fg='yellow') + log.debug( + " {} not in cache, need to check index".format( + format_requirement(ireq) + ), + fg="yellow", + ) dependencies = self.repository.get_dependencies(ireq) self.dependency_cache[ireq] = sorted(set(format_requirement(ireq) for ireq in dependencies)) # Example: ['Werkzeug>=0.9', 'Jinja2>=2.4'] dependency_strings = self.dependency_cache[ireq] - log.debug(' {:25} requires {}'.format(format_requirement(ireq), - ', '.join(sorted(dependency_strings, key=lambda s: s.lower())) or '-')) + log.debug( + " {:25} requires {}".format( + format_requirement(ireq), + ", ".join(sorted(dependency_strings, key=lambda s: s.lower())) or "-", + ) + ) for dependency_string in dependency_strings: - yield install_req_from_line(dependency_string, constraint=ireq.constraint) + yield install_req_from_line( + dependency_string, constraint=ireq.constraint, comes_from=ireq + ) def reverse_dependencies(self, ireqs): - non_editable = [ireq for ireq in ireqs if not ireq.editable] + is_non_wheel_url = lambda r: is_url_requirement(r) and not r.link.is_wheel + non_editable = [ + ireq for ireq in ireqs if not (ireq.editable or is_non_wheel_url(ireq)) + ] return self.dependency_cache.reverse_dependencies(non_editable) diff --git a/pipenv/patched/piptools/scripts/compile.py b/pipenv/patched/piptools/scripts/compile.py index 2eaea9b3bd..5ac16e3586 100644 --- a/pipenv/patched/piptools/scripts/compile.py +++ b/pipenv/patched/piptools/scripts/compile.py @@ -1,158 +1,292 @@ # coding: utf-8 -from __future__ import (absolute_import, division, print_function, - unicode_literals) +from __future__ import absolute_import, division, print_function, unicode_literals import os import sys import tempfile -from .._compat import ( - install_req_from_line, - parse_requirements, -) +from click.utils import safecall from .. import click +from .._compat import install_req_from_line, parse_requirements from ..exceptions import PipToolsError from ..logging import log -from ..pip import get_pip_command, pip_defaults from ..repositories import LocalRequirementsRepository, PyPIRepository from ..resolver import Resolver -from ..utils import (dedup, is_pinned_requirement, key_from_req, UNSAFE_PACKAGES) +from ..utils import ( + UNSAFE_PACKAGES, + create_install_command, + dedup, + get_trusted_hosts, + is_pinned_requirement, + key_from_ireq, + key_from_req, +) from ..writer import OutputWriter -DEFAULT_REQUIREMENTS_FILE = 'requirements.in' -DEFAULT_REQUIREMENTS_OUTPUT_FILE = 'requirements.txt' +DEFAULT_REQUIREMENTS_FILE = "requirements.in" +DEFAULT_REQUIREMENTS_OUTPUT_FILE = "requirements.txt" + +# Get default values of the pip's options (including options from pipenv.patched.notpip.conf). +install_command = create_install_command() +pip_defaults = install_command.parser.get_default_values() @click.command() @click.version_option() -@click.option('-v', '--verbose', count=True, help="Show more output") -@click.option('-q', '--quiet', count=True, help="Give less output") -@click.option('-n', '--dry-run', is_flag=True, help="Only show what would happen, don't change anything") -@click.option('-p', '--pre', is_flag=True, default=None, help="Allow resolving to prereleases (default is not)") -@click.option('-r', '--rebuild', is_flag=True, help="Clear any caches upfront, rebuild from scratch") -@click.option('-f', '--find-links', multiple=True, help="Look for archives in this directory or on this HTML page", envvar='PIP_FIND_LINKS') # noqa -@click.option('-i', '--index-url', help="Change index URL (defaults to {})".format(pip_defaults.index_url), envvar='PIP_INDEX_URL') # noqa -@click.option('--extra-index-url', multiple=True, help="Add additional index URL to search", envvar='PIP_EXTRA_INDEX_URL') # noqa -@click.option('--cert', help="Path to alternate CA bundle.") -@click.option('--client-cert', help="Path to SSL client certificate, a single file containing the private key and the certificate in PEM format.") # noqa -@click.option('--trusted-host', multiple=True, envvar='PIP_TRUSTED_HOST', - help="Mark this host as trusted, even though it does not have " - "valid or any HTTPS.") -@click.option('--header/--no-header', is_flag=True, default=True, - help="Add header to generated file") -@click.option('--index/--no-index', is_flag=True, default=True, - help="Add index URL to generated file") -@click.option('--emit-trusted-host/--no-emit-trusted-host', is_flag=True, - default=True, help="Add trusted host option to generated file") -@click.option('--annotate/--no-annotate', is_flag=True, default=True, - help="Annotate results, indicating where dependencies come from") -@click.option('-U', '--upgrade', is_flag=True, default=False, - help='Try to upgrade all dependencies to their latest versions') -@click.option('-P', '--upgrade-package', 'upgrade_packages', nargs=1, multiple=True, - help="Specify particular packages to upgrade.") -@click.option('-o', '--output-file', nargs=1, type=str, default=None, - help=('Output file name. Required if more than one input file is given. ' - 'Will be derived from input file otherwise.')) -@click.option('--allow-unsafe', is_flag=True, default=False, - help="Pin packages considered unsafe: {}".format(', '.join(sorted(UNSAFE_PACKAGES)))) -@click.option('--generate-hashes', is_flag=True, default=False, - help="Generate pip 8 style hashes in the resulting requirements file.") -@click.option('--max-rounds', default=10, - help="Maximum number of rounds before resolving the requirements aborts.") -@click.argument('src_files', nargs=-1, type=click.Path(exists=True, allow_dash=True)) -@click.option('--build-isolation/--no-build-isolation', is_flag=True, default=False, - help="Enable isolation when building a modern source distribution. " - "Build dependencies specified by PEP 518 must be already installed " - "if build isolation is disabled.") -def cli(verbose, quiet, dry_run, pre, rebuild, find_links, index_url, extra_index_url, - cert, client_cert, trusted_host, header, index, emit_trusted_host, annotate, - upgrade, upgrade_packages, output_file, allow_unsafe, generate_hashes, - src_files, max_rounds, build_isolation): +@click.pass_context +@click.option("-v", "--verbose", count=True, help="Show more output") +@click.option("-q", "--quiet", count=True, help="Give less output") +@click.option( + "-n", + "--dry-run", + is_flag=True, + help="Only show what would happen, don't change anything", +) +@click.option( + "-p", + "--pre", + is_flag=True, + default=None, + help="Allow resolving to prereleases (default is not)", +) +@click.option( + "-r", + "--rebuild", + is_flag=True, + help="Clear any caches upfront, rebuild from scratch", +) +@click.option( + "-f", + "--find-links", + multiple=True, + help="Look for archives in this directory or on this HTML page", + envvar="PIP_FIND_LINKS", +) +@click.option( + "-i", + "--index-url", + help="Change index URL (defaults to {})".format(pip_defaults.index_url), + envvar="PIP_INDEX_URL", +) +@click.option( + "--extra-index-url", + multiple=True, + help="Add additional index URL to search", + envvar="PIP_EXTRA_INDEX_URL", +) +@click.option("--cert", help="Path to alternate CA bundle.") +@click.option( + "--client-cert", + help="Path to SSL client certificate, a single file containing " + "the private key and the certificate in PEM format.", +) +@click.option( + "--trusted-host", + multiple=True, + envvar="PIP_TRUSTED_HOST", + help="Mark this host as trusted, even though it does not have " + "valid or any HTTPS.", +) +@click.option( + "--header/--no-header", + is_flag=True, + default=True, + help="Add header to generated file", +) +@click.option( + "--index/--no-index", + is_flag=True, + default=True, + help="Add index URL to generated file", +) +@click.option( + "--emit-trusted-host/--no-emit-trusted-host", + is_flag=True, + default=True, + help="Add trusted host option to generated file", +) +@click.option( + "--annotate/--no-annotate", + is_flag=True, + default=True, + help="Annotate results, indicating where dependencies come from", +) +@click.option( + "-U", + "--upgrade", + is_flag=True, + default=False, + help="Try to upgrade all dependencies to their latest versions", +) +@click.option( + "-P", + "--upgrade-package", + "upgrade_packages", + nargs=1, + multiple=True, + help="Specify particular packages to upgrade.", +) +@click.option( + "-o", + "--output-file", + nargs=1, + default=None, + type=click.File("w+b", atomic=True, lazy=True), + help=( + "Output file name. Required if more than one input file is given. " + "Will be derived from input file otherwise." + ), +) +@click.option( + "--allow-unsafe", + is_flag=True, + default=False, + help="Pin packages considered unsafe: {}".format( + ", ".join(sorted(UNSAFE_PACKAGES)) + ), +) +@click.option( + "--generate-hashes", + is_flag=True, + default=False, + help="Generate pip 8 style hashes in the resulting requirements file.", +) +@click.option( + "--max-rounds", + default=10, + help="Maximum number of rounds before resolving the requirements aborts.", +) +@click.argument("src_files", nargs=-1, type=click.Path(exists=True, allow_dash=True)) +@click.option( + "--build-isolation/--no-build-isolation", + is_flag=True, + default=False, + help="Enable isolation when building a modern source distribution. " + "Build dependencies specified by PEP 518 must be already installed " + "if build isolation is disabled.", +) +@click.option( + "--emit-find-links/--no-emit-find-links", + is_flag=True, + default=True, + help="Add the find-links option to generated file", +) +def cli( + ctx, + verbose, + quiet, + dry_run, + pre, + rebuild, + find_links, + index_url, + extra_index_url, + cert, + client_cert, + trusted_host, + header, + index, + emit_trusted_host, + annotate, + upgrade, + upgrade_packages, + output_file, + allow_unsafe, + generate_hashes, + src_files, + max_rounds, + build_isolation, + emit_find_links, +): """Compiles requirements.txt from requirements.in specs.""" log.verbosity = verbose - quiet if len(src_files) == 0: if os.path.exists(DEFAULT_REQUIREMENTS_FILE): src_files = (DEFAULT_REQUIREMENTS_FILE,) - elif os.path.exists('setup.py'): - src_files = ('setup.py',) + elif os.path.exists("setup.py"): + src_files = ("setup.py",) else: - raise click.BadParameter(("If you do not specify an input file, " - "the default is {} or setup.py").format(DEFAULT_REQUIREMENTS_FILE)) - - if src_files == ('-',) and not output_file: - raise click.BadParameter('--output-file is required if input is from stdin') - elif src_files == ('setup.py',) and not output_file: - output_file = DEFAULT_REQUIREMENTS_OUTPUT_FILE + raise click.BadParameter( + ( + "If you do not specify an input file, " + "the default is {} or setup.py" + ).format(DEFAULT_REQUIREMENTS_FILE) + ) - if len(src_files) > 1 and not output_file: - raise click.BadParameter('--output-file is required if two or more input files are given.') + if not output_file: + # An output file must be provided for stdin + if src_files == ("-",): + raise click.BadParameter("--output-file is required if input is from stdin") + # Use default requirements output file if there is a setup.py the source file + elif src_files == ("setup.py",): + file_name = DEFAULT_REQUIREMENTS_OUTPUT_FILE + # An output file must be provided if there are multiple source files + elif len(src_files) > 1: + raise click.BadParameter( + "--output-file is required if two or more input files are given." + ) + # Otherwise derive the output file from the source file + else: + base_name = src_files[0].rsplit(".", 1)[0] + file_name = base_name + ".txt" - if output_file: - dst_file = output_file - else: - base_name = src_files[0].rsplit('.', 1)[0] - dst_file = base_name + '.txt' + output_file = click.open_file(file_name, "w+b", atomic=True, lazy=True) - if upgrade and upgrade_packages: - raise click.BadParameter('Only one of --upgrade or --upgrade-package can be provided as an argument.') + # Close the file at the end of the context execution + ctx.call_on_close(safecall(output_file.close_intelligently)) ### # Setup ### - pip_command = get_pip_command() - pip_args = [] if find_links: for link in find_links: - pip_args.extend(['-f', link]) + pip_args.extend(["-f", link]) if index_url: - pip_args.extend(['-i', index_url]) + pip_args.extend(["-i", index_url]) if extra_index_url: for extra_index in extra_index_url: - pip_args.extend(['--extra-index-url', extra_index]) + pip_args.extend(["--extra-index-url", extra_index]) if cert: - pip_args.extend(['--cert', cert]) + pip_args.extend(["--cert", cert]) if client_cert: - pip_args.extend(['--client-cert', client_cert]) + pip_args.extend(["--client-cert", client_cert]) if pre: - pip_args.extend(['--pre']) + pip_args.extend(["--pre"]) if trusted_host: for host in trusted_host: - pip_args.extend(['--trusted-host', host]) + pip_args.extend(["--trusted-host", host]) - pip_options, _ = pip_command.parse_args(pip_args) + repository = PyPIRepository(pip_args, build_isolation=build_isolation) - session = pip_command._build_session(pip_options) - repository = PyPIRepository(pip_options, session, build_isolation) + # Parse all constraints coming from --upgrade-package/-P + upgrade_reqs_gen = (install_req_from_line(pkg) for pkg in upgrade_packages) + upgrade_install_reqs = { + key_from_req(install_req.req): install_req for install_req in upgrade_reqs_gen + } - upgrade_install_reqs = {} # Proxy with a LocalRequirementsRepository if --upgrade is not specified # (= default invocation) - if not upgrade and os.path.exists(dst_file): - ireqs = parse_requirements(dst_file, finder=repository.finder, session=repository.session, options=pip_options) - # Exclude packages from --upgrade-package/-P from the existing pins: We want to upgrade. - upgrade_reqs_gen = (install_req_from_line(pkg) for pkg in upgrade_packages) - upgrade_install_reqs = {key_from_req(install_req.req): install_req for install_req in upgrade_reqs_gen} - - existing_pins = {key_from_req(ireq.req): ireq - for ireq in ireqs - if is_pinned_requirement(ireq) and key_from_req(ireq.req) not in upgrade_install_reqs} - repository = LocalRequirementsRepository(existing_pins, repository) - - log.debug('Using indexes:') - # remove duplicate index urls before processing - repository.finder.index_urls = list(dedup(repository.finder.index_urls)) - for index_url in repository.finder.index_urls: - log.debug(' {}'.format(index_url)) + if not upgrade and os.path.exists(output_file.name): + ireqs = parse_requirements( + output_file.name, + finder=repository.finder, + session=repository.session, + options=repository.options, + ) - if repository.finder.find_links: - log.debug('') - log.debug('Configuration:') - for find_link in repository.finder.find_links: - log.debug(' -f {}'.format(find_link)) + # Exclude packages from --upgrade-package/-P from the existing + # constraints + existing_pins = { + key_from_req(ireq.req): ireq + for ireq in ireqs + if is_pinned_requirement(ireq) + and key_from_req(ireq.req) not in upgrade_install_reqs + } + repository = LocalRequirementsRepository(existing_pins, repository) ### # Parsing/collecting initial requirements @@ -160,38 +294,68 @@ def cli(verbose, quiet, dry_run, pre, rebuild, find_links, index_url, extra_inde constraints = [] for src_file in src_files: - is_setup_file = os.path.basename(src_file) == 'setup.py' - if is_setup_file or src_file == '-': + is_setup_file = os.path.basename(src_file) == "setup.py" + if is_setup_file or src_file == "-": # pip requires filenames and not files. Since we want to support # piping from stdin, we need to briefly save the input from stdin # to a temporary file and have pip read that. also used for # reading requirements from install_requires in setup.py. - tmpfile = tempfile.NamedTemporaryFile(mode='wt', delete=False) + tmpfile = tempfile.NamedTemporaryFile(mode="wt", delete=False) if is_setup_file: from distutils.core import run_setup + dist = run_setup(src_file) - tmpfile.write('\n'.join(dist.install_requires)) + tmpfile.write("\n".join(dist.install_requires)) else: tmpfile.write(sys.stdin.read()) tmpfile.flush() - constraints.extend(parse_requirements( - tmpfile.name, finder=repository.finder, session=repository.session, options=pip_options)) + constraints.extend( + parse_requirements( + tmpfile.name, + finder=repository.finder, + session=repository.session, + options=repository.options, + ) + ) else: - constraints.extend(parse_requirements( - src_file, finder=repository.finder, session=repository.session, options=pip_options)) + constraints.extend( + parse_requirements( + src_file, + finder=repository.finder, + session=repository.session, + options=repository.options, + ) + ) + + primary_packages = { + key_from_ireq(ireq) for ireq in constraints if not ireq.constraint + } constraints.extend(upgrade_install_reqs.values()) # Filter out pip environment markers which do not match (PEP496) - constraints = [req for req in constraints - if req.markers is None or req.markers.evaluate()] + constraints = [ + req for req in constraints if req.markers is None or req.markers.evaluate() + ] - # Check the given base set of constraints first - Resolver.check_constraints(constraints) + log.debug("Using indexes:") + for index_url in dedup(repository.finder.index_urls): + log.debug(" {}".format(index_url)) + + if repository.finder.find_links: + log.debug("") + log.debug("Configuration:") + for find_link in dedup(repository.finder.find_links): + log.debug(" -f {}".format(find_link)) try: - resolver = Resolver(constraints, repository, prereleases=pre, - clear_caches=rebuild, allow_unsafe=allow_unsafe) + resolver = Resolver( + constraints, + repository, + prereleases=repository.finder.allow_all_prereleases or pre, + clear_caches=rebuild, + allow_unsafe=allow_unsafe, + ) results = resolver.resolve(max_rounds=max_rounds) if generate_hashes: hashes = resolver.resolve_hashes(results) @@ -201,7 +365,7 @@ def cli(verbose, quiet, dry_run, pre, rebuild, find_links, index_url, extra_inde log.error(str(e)) sys.exit(2) - log.debug('') + log.debug("") ## # Output @@ -217,12 +381,14 @@ def cli(verbose, quiet, dry_run, pre, rebuild, find_links, index_url, extra_inde # TODO (1b): perhaps it's easiest if the dependency cache has an API # that could take InstallRequirements directly, like: # - # cache.set(ireq, ...) + # cache.set(ireq, ...) # # then, when ireq is editable, it would store in # # editables[egg_name][link_without_fragment] = deps - # editables['pip-tools']['git+...ols.git@future'] = {'click>=3.0', 'six'} + # editables['pip-tools']['git+...ols.git@future'] = { + # 'click>=3.0', 'six' + # } # # otherwise: # @@ -232,23 +398,34 @@ def cli(verbose, quiet, dry_run, pre, rebuild, find_links, index_url, extra_inde if annotate: reverse_dependencies = resolver.reverse_dependencies(results) - writer = OutputWriter(src_files, dst_file, dry_run=dry_run, - emit_header=header, emit_index=index, - emit_trusted_host=emit_trusted_host, - annotate=annotate, - generate_hashes=generate_hashes, - default_index_url=repository.DEFAULT_INDEX_URL, - index_urls=repository.finder.index_urls, - trusted_hosts=pip_options.trusted_hosts, - format_control=repository.finder.format_control, - allow_unsafe=allow_unsafe) - writer.write(results=results, - unsafe_requirements=resolver.unsafe_constraints, - reverse_dependencies=reverse_dependencies, - primary_packages={key_from_req(ireq.req) for ireq in constraints if not ireq.constraint}, - markers={key_from_req(ireq.req): ireq.markers - for ireq in constraints if ireq.markers}, - hashes=hashes) + writer = OutputWriter( + src_files, + output_file, + click_ctx=ctx, + dry_run=dry_run, + emit_header=header, + emit_index=index, + emit_trusted_host=emit_trusted_host, + annotate=annotate, + generate_hashes=generate_hashes, + default_index_url=repository.DEFAULT_INDEX_URL, + index_urls=repository.finder.index_urls, + trusted_hosts=get_trusted_hosts(repository.finder), + format_control=repository.finder.format_control, + allow_unsafe=allow_unsafe, + find_links=repository.finder.find_links, + emit_find_links=emit_find_links, + ) + writer.write( + results=results, + unsafe_requirements=resolver.unsafe_constraints, + reverse_dependencies=reverse_dependencies, + primary_packages=primary_packages, + markers={ + key_from_ireq(ireq): ireq.markers for ireq in constraints if ireq.markers + }, + hashes=hashes, + ) if dry_run: - log.warning('Dry-run, so nothing updated.') + log.info("Dry-run, so nothing updated.") diff --git a/pipenv/patched/piptools/scripts/sync.py b/pipenv/patched/piptools/scripts/sync.py index 610c1d5e28..40c086a4f0 100644 --- a/pipenv/patched/piptools/scripts/sync.py +++ b/pipenv/patched/piptools/scripts/sync.py @@ -1,52 +1,112 @@ # coding: utf-8 -from __future__ import (absolute_import, division, print_function, - unicode_literals) +from __future__ import absolute_import, division, print_function, unicode_literals import os import sys - from .. import click, sync -from .._compat import parse_requirements, get_installed_distributions +from .._compat import get_installed_distributions, parse_requirements from ..exceptions import PipToolsError from ..logging import log from ..utils import flat_map -DEFAULT_REQUIREMENTS_FILE = 'requirements.txt' +DEFAULT_REQUIREMENTS_FILE = "requirements.txt" @click.command() @click.version_option() -@click.option('-n', '--dry-run', is_flag=True, help="Only show what would happen, don't change anything") -@click.option('--force', is_flag=True, help="Proceed even if conflicts are found") -@click.option('-f', '--find-links', multiple=True, help="Look for archives in this directory or on this HTML page", envvar='PIP_FIND_LINKS') # noqa -@click.option('-i', '--index-url', help="Change index URL (defaults to PyPI)", envvar='PIP_INDEX_URL') -@click.option('--extra-index-url', multiple=True, help="Add additional index URL to search", envvar='PIP_EXTRA_INDEX_URL') # noqa -@click.option('--no-index', is_flag=True, help="Ignore package index (only looking at --find-links URLs instead)") -@click.option('-q', '--quiet', default=False, is_flag=True, help="Give less output") -@click.option('--user', 'user_only', is_flag=True, help="Restrict attention to user directory") -@click.argument('src_files', required=False, type=click.Path(exists=True), nargs=-1) -def cli(dry_run, force, find_links, index_url, extra_index_url, no_index, quiet, user_only, src_files): +@click.option( + "-a", + "--ask", + is_flag=True, + help="Show what would happen, then ask whether to continue", +) +@click.option( + "-n", + "--dry-run", + is_flag=True, + help="Only show what would happen, don't change anything", +) +@click.option("--force", is_flag=True, help="Proceed even if conflicts are found") +@click.option( + "-f", + "--find-links", + multiple=True, + help="Look for archives in this directory or on this HTML page", + envvar="PIP_FIND_LINKS", +) +@click.option( + "-i", + "--index-url", + help="Change index URL (defaults to PyPI)", + envvar="PIP_INDEX_URL", +) +@click.option( + "--extra-index-url", + multiple=True, + help="Add additional index URL to search", + envvar="PIP_EXTRA_INDEX_URL", +) +@click.option( + "--trusted-host", + multiple=True, + help="Mark this host as trusted, even though it does not have valid or any HTTPS.", +) +@click.option( + "--no-index", + is_flag=True, + help="Ignore package index (only looking at --find-links URLs instead)", +) +@click.option("-q", "--quiet", default=False, is_flag=True, help="Give less output") +@click.option( + "--user", "user_only", is_flag=True, help="Restrict attention to user directory" +) +@click.option("--cert", help="Path to alternate CA bundle.") +@click.option( + "--client-cert", + help="Path to SSL client certificate, a single file containing " + "the private key and the certificate in PEM format.", +) +@click.argument("src_files", required=False, type=click.Path(exists=True), nargs=-1) +def cli( + ask, + dry_run, + force, + find_links, + index_url, + extra_index_url, + trusted_host, + no_index, + quiet, + user_only, + cert, + client_cert, + src_files, +): """Synchronize virtual environment with requirements.txt.""" if not src_files: if os.path.exists(DEFAULT_REQUIREMENTS_FILE): src_files = (DEFAULT_REQUIREMENTS_FILE,) else: - msg = 'No requirement files given and no {} found in the current directory' + msg = "No requirement files given and no {} found in the current directory" log.error(msg.format(DEFAULT_REQUIREMENTS_FILE)) sys.exit(2) - if any(src_file.endswith('.in') for src_file in src_files): - msg = ('Some input files have the .in extension, which is most likely an error and can ' - 'cause weird behaviour. You probably meant to use the corresponding *.txt file?') + if any(src_file.endswith(".in") for src_file in src_files): + msg = ( + "Some input files have the .in extension, which is most likely an error " + "and can cause weird behaviour. You probably meant to use " + "the corresponding *.txt file?" + ) if force: - log.warning('WARNING: ' + msg) + log.warning("WARNING: " + msg) else: - log.error('ERROR: ' + msg) + log.error("ERROR: " + msg) sys.exit(2) - requirements = flat_map(lambda src: parse_requirements(src, session=True), - src_files) + requirements = flat_map( + lambda src: parse_requirements(src, session=True), src_files + ) try: requirements = sync.merge(requirements, ignore_conflicts=force) @@ -59,16 +119,31 @@ def cli(dry_run, force, find_links, index_url, extra_index_url, no_index, quiet, install_flags = [] for link in find_links or []: - install_flags.extend(['-f', link]) + install_flags.extend(["-f", link]) if no_index: - install_flags.append('--no-index') + install_flags.append("--no-index") if index_url: - install_flags.extend(['-i', index_url]) + install_flags.extend(["-i", index_url]) if extra_index_url: for extra_index in extra_index_url: - install_flags.extend(['--extra-index-url', extra_index]) + install_flags.extend(["--extra-index-url", extra_index]) + if trusted_host: + for host in trusted_host: + install_flags.extend(["--trusted-host", host]) if user_only: - install_flags.append('--user') + install_flags.append("--user") + if cert: + install_flags.extend(["--cert", cert]) + if client_cert: + install_flags.extend(["--client-cert", client_cert]) - sys.exit(sync.sync(to_install, to_uninstall, verbose=(not quiet), dry_run=dry_run, - install_flags=install_flags)) + sys.exit( + sync.sync( + to_install, + to_uninstall, + verbose=(not quiet), + dry_run=dry_run, + install_flags=install_flags, + ask=ask, + ) + ) diff --git a/pipenv/patched/piptools/sync.py b/pipenv/patched/piptools/sync.py index f111764ee7..00b1ae8e34 100644 --- a/pipenv/patched/piptools/sync.py +++ b/pipenv/patched/piptools/sync.py @@ -2,20 +2,25 @@ import os import sys import tempfile -from subprocess import check_call +from subprocess import check_call # nosec -from piptools._compat import stdlib_pkgs, DEV_PKGS from . import click -from .exceptions import IncompatibleRequirements, UnsupportedConstraint -from .utils import flat_map, format_requirement, key_from_ireq, key_from_req, get_hashes_from_ireq - -PACKAGES_TO_IGNORE = [ - '-markerlib', - 'pip', - 'pip-tools', - 'pip-review', - 'pkg-resources', -] + list(stdlib_pkgs) + list(DEV_PKGS) +from ._compat import DEV_PKGS, stdlib_pkgs +from .exceptions import IncompatibleRequirements +from .utils import ( + flat_map, + format_requirement, + get_hashes_from_ireq, + is_url_requirement, + key_from_ireq, + key_from_req, +) + +PACKAGES_TO_IGNORE = ( + ["-markerlib", "pip", "pip-tools", "pip-review", "pkg-resources"] + + list(stdlib_pkgs) + + list(DEV_PKGS) +) def dependency_tree(installed_keys, root_key): @@ -63,19 +68,19 @@ def get_dists_to_ignore(installed): requirements. """ installed_keys = {key_from_req(r): r for r in installed} - return list(flat_map(lambda req: dependency_tree(installed_keys, req), PACKAGES_TO_IGNORE)) + return list( + flat_map(lambda req: dependency_tree(installed_keys, req), PACKAGES_TO_IGNORE) + ) def merge(requirements, ignore_conflicts): by_key = {} for ireq in requirements: - if ireq.link is not None and not ireq.editable: - msg = ('pip-compile does not support URLs as packages, unless they are editable. ' - 'Perhaps add -e option?') - raise UnsupportedConstraint(msg, ireq) - - key = ireq.link or key_from_req(ireq.req) + # Limitation: URL requirements are merged by precise string match, so + # "file:///example.zip#egg=example", "file:///example.zip", and + # "example==1.0" will not merge with each other + key = key_from_ireq(ireq) if not ignore_conflicts: existing_ireq = by_key.get(key) @@ -87,16 +92,35 @@ def merge(requirements, ignore_conflicts): # TODO: Always pick the largest specifier in case of a conflict by_key[key] = ireq - return by_key.values() +def diff_key_from_ireq(ireq): + """ + Calculate a key for comparing a compiled requirement with installed modules. + For URL requirements, only provide a useful key if the url includes + #egg=name==version, which will set ireq.req.name and ireq.specifier. + Otherwise return ireq.link so the key will not match and the package will + reinstall. Reinstall is necessary to ensure that packages will reinstall + if the URL is changed but the version is not. + """ + if is_url_requirement(ireq): + if ( + ireq.req + and (getattr(ireq.req, "key", None) or getattr(ireq.req, "name", None)) + and ireq.specifier + ): + return key_from_ireq(ireq) + return str(ireq.link) + return key_from_ireq(ireq) + + def diff(compiled_requirements, installed_dists): """ Calculate which packages should be installed or uninstalled, given a set of compiled requirements and a list of currently installed modules. """ - requirements_lut = {r.link or key_from_req(r.req): r for r in compiled_requirements} + requirements_lut = {diff_key_from_ireq(r): r for r in compiled_requirements} satisfied = set() # holds keys to_install = set() # holds InstallRequirement objects @@ -120,33 +144,54 @@ def diff(compiled_requirements, installed_dists): return (to_install, to_uninstall) -def sync(to_install, to_uninstall, verbose=False, dry_run=False, install_flags=None): +def sync( + to_install, + to_uninstall, + verbose=False, + dry_run=False, + install_flags=None, + ask=False, +): """ Install and uninstalls the given sets of modules. """ if not to_uninstall and not to_install: - click.echo("Everything up-to-date") + if verbose: + click.echo("Everything up-to-date") + return 0 pip_flags = [] if not verbose: - pip_flags += ['-q'] + pip_flags += ["-q"] + + if ask: + dry_run = True - if to_uninstall: - if dry_run: + if dry_run: + if to_uninstall: click.echo("Would uninstall:") for pkg in to_uninstall: click.echo(" {}".format(pkg)) - else: - check_call([sys.executable, '-m', 'pip', 'uninstall', '-y'] + pip_flags + sorted(to_uninstall)) - if to_install: - if install_flags is None: - install_flags = [] - if dry_run: + if to_install: click.echo("Would install:") for ireq in to_install: click.echo(" {}".format(format_requirement(ireq))) - else: + + if ask and click.confirm("Would you like to proceed with these changes?"): + dry_run = False + + if not dry_run: + if to_uninstall: + check_call( # nosec + [sys.executable, "-m", "pip", "uninstall", "-y"] + + pip_flags + + sorted(to_uninstall) + ) + + if to_install: + if install_flags is None: + install_flags = [] # prepare requirement lines req_lines = [] for ireq in sorted(to_install, key=key_from_ireq): @@ -154,13 +199,15 @@ def sync(to_install, to_uninstall, verbose=False, dry_run=False, install_flags=N req_lines.append(format_requirement(ireq, hashes=ireq_hashes)) # save requirement lines to a temporary file - tmp_req_file = tempfile.NamedTemporaryFile(mode='wt', delete=False) - tmp_req_file.write('\n'.join(req_lines)) + tmp_req_file = tempfile.NamedTemporaryFile(mode="wt", delete=False) + tmp_req_file.write("\n".join(req_lines)) tmp_req_file.close() try: - check_call( - [sys.executable, '-m', 'pip', 'install', '-r', tmp_req_file.name] + pip_flags + install_flags + check_call( # nosec + [sys.executable, "-m", "pip", "install", "-r", tmp_req_file.name] + + pip_flags + + install_flags ) finally: os.unlink(tmp_req_file.name) diff --git a/pipenv/patched/piptools/utils.py b/pipenv/patched/piptools/utils.py index fb846cc4a6..688158348b 100644 --- a/pipenv/patched/piptools/utils.py +++ b/pipenv/patched/piptools/utils.py @@ -1,25 +1,31 @@ # coding: utf-8 -from __future__ import (absolute_import, division, print_function, - unicode_literals) +from __future__ import absolute_import, division, print_function, unicode_literals import os import sys -from itertools import chain, groupby from collections import OrderedDict +from itertools import chain, groupby import six - +from click.utils import LazyFile +from six.moves import shlex_quote from pipenv.vendor.packaging.specifiers import SpecifierSet, InvalidSpecifier from pipenv.vendor.packaging.version import Version, InvalidVersion, parse as parse_version from pipenv.vendor.packaging.markers import Marker, Op, Value, Variable -from ._compat import install_req_from_line +from ._compat import PIP_VERSION, InstallCommand, install_req_from_line from .click import style - -UNSAFE_PACKAGES = {'setuptools', 'distribute', 'pip'} - +UNSAFE_PACKAGES = {"setuptools", "distribute", "pip"} +COMPILE_EXCLUDE_OPTIONS = { + "--dry-run", + "--quiet", + "--rebuild", + "--upgrade", + "--upgrade-package", + "--verbose", +} def simplify_markers(ireq): @@ -96,19 +102,19 @@ def key_from_ireq(ireq): def key_from_req(req): """Get an all-lowercase version of the requirement's name.""" - if hasattr(req, 'key'): + if hasattr(req, "key"): # from pkg_resources, such as installed dists for pip-sync key = req.key else: # from packaging, such as install requirements from requirements.txt key = req.name - key = key.replace('_', '-').lower() + key = key.replace("_", "-").lower() return key def comment(text): - return style(text, fg='green') + return style(text, fg="green") def make_install_requirement(name, version, extras, markers, constraint=False): @@ -158,18 +164,28 @@ def _requirement_to_str_lowercase_name(requirement): return "".join(parts) +def is_url_requirement(ireq): + """ + Return True if requirement was specified as a path or URL. + ireq.original_link will have been set by InstallRequirement.__init__ + """ + return bool(ireq.original_link) + + def format_requirement(ireq, marker=None, hashes=None): """ Generic formatter for pretty printing InstallRequirements to the terminal in a less verbose way than using its `__str__` method. """ if ireq.editable: - line = '-e {}'.format(ireq.link) + line = "-e {}".format(ireq.link.url) + elif is_url_requirement(ireq): + line = ireq.link.url else: line = _requirement_to_str_lowercase_name(ireq.req) if marker and ';' not in line: - line = '{}; {}'.format(line, marker) + line = "{}; {}".format(line, marker) if hashes: for hash_ in sorted(hashes): @@ -186,7 +202,7 @@ def format_specifier(ireq): # TODO: Ideally, this is carried over to the pip library itself specs = ireq.specifier._specs if ireq.req is not None else [] specs = sorted(specs, key=lambda x: x._spec[1]) - return ','.join(str(s) for s in specs) or '<any>' + return ",".join(str(s) for s in specs) or "<any>" def is_pinned_requirement(ireq): @@ -209,19 +225,20 @@ def is_pinned_requirement(ireq): if ireq.editable: return False - if len(ireq.specifier._specs) != 1: + if ireq.req is None or len(ireq.specifier._specs) != 1: return False op, version = next(iter(ireq.specifier._specs))._spec - return (op == '==' or op == '===') and not version.endswith('.*') + return (op == "==" or op == "===") and not version.endswith(".*") def as_tuple(ireq): """ - Pulls out the (name: str, version:str, extras:(str)) tuple from the pinned InstallRequirement. + Pulls out the (name: str, version:str, extras:(str)) tuple from + the pinned InstallRequirement. """ if not is_pinned_requirement(ireq): - raise TypeError('Expected a pinned InstallRequirement, got {}'.format(ireq)) + raise TypeError("Expected a pinned InstallRequirement, got {}".format(ireq)) name = key_from_req(ireq.req) version = next(iter(ireq.specifier._specs))._spec[1] @@ -288,9 +305,14 @@ def lookup_table(values, key=None, keyval=None, unique=False, use_lists=False): """ if keyval is None: if key is None: - keyval = (lambda v: v) + + def keyval(v): + return v + else: - keyval = (lambda v: (key(v), v)) + + def keyval(v): + return (key(v), v) if unique: return dict(keyval(v) for v in values) @@ -321,7 +343,7 @@ def dedup(iterable): def name_from_req(req): """Get the name of the requirement""" - if hasattr(req, 'project_name'): + if hasattr(req, "project_name"): # from pkg_resources, such as installed dists for pip-sync return req.project_name else: @@ -339,17 +361,13 @@ def fs_str(string): On Python 3 returns the string as is, since Python 3 uses unicode paths and the input string shouldn't be bytes. - >>> fs_str(u'some path component/Something') - 'some path component/Something' - >>> assert isinstance(fs_str('whatever'), str) - >>> assert isinstance(fs_str(u'whatever'), str) - :type string: str|unicode :rtype: str """ if isinstance(string, str): return string - assert not isinstance(string, bytes) + if isinstance(string, bytes): + raise AssertionError return string.encode(_fs_encoding) @@ -358,12 +376,121 @@ def fs_str(string): def get_hashes_from_ireq(ireq): """ - Given an InstallRequirement, return a list of string hashes in the format "{algorithm}:{hash}". - Return an empty list if there are no hashes in the requirement options. + Given an InstallRequirement, return a list of string hashes in + the format "{algorithm}:{hash}". Return an empty list if there are no hashes + in the requirement options. """ result = [] - ireq_hashes = ireq.options.get('hashes', {}) + ireq_hashes = ireq.options.get("hashes", {}) for algorithm, hexdigests in ireq_hashes.items(): for hash_ in hexdigests: result.append("{}:{}".format(algorithm, hash_)) return result + + +def force_text(s): + """ + Return a string representing `s`. + """ + if s is None: + return "" + if not isinstance(s, six.string_types): + return six.text_type(s) + return s + + +def get_compile_command(click_ctx): + """ + Returns a normalized compile command depending on cli context. + + The command will be normalized by: + - expanding options short to long + - removing values that are already default + - sorting the arguments + - removing one-off arguments like '--upgrade' + - removing arguments that don't change build behaviour like '--verbose' + """ + from piptools.scripts.compile import cli + + # Map of the compile cli options (option name -> click.Option) + compile_options = {option.name: option for option in cli.params} + + left_args = [] + right_args = [] + + for option_name, value in click_ctx.params.items(): + option = compile_options[option_name] + + # Get the latest option name (usually it'll be a long name) + option_long_name = option.opts[-1] + + # Collect variadic args separately, they will be added + # at the end of the command later + if option.nargs < 0: + right_args.extend([shlex_quote(force_text(val)) for val in value]) + continue + + # Exclude one-off options (--upgrade/--upgrade-package/--rebuild/...) + # or options that don't change compile behaviour (--verbose/--dry-run/...) + if option_long_name in COMPILE_EXCLUDE_OPTIONS: + continue + + # Skip options without a value + if option.default is None and not value: + continue + + # Skip options with a default value + if option.default == value: + continue + + # Use a file name for file-like objects + if isinstance(value, LazyFile): + value = value.name + + # Convert value to the list + if not isinstance(value, (tuple, list)): + value = [value] + + for val in value: + # Flags don't have a value, thus add to args true or false option long name + if option.is_flag: + # If there are false-options, choose an option name depending on a value + if option.secondary_opts: + # Get the latest false-option + secondary_option_long_name = option.secondary_opts[-1] + arg = option_long_name if val else secondary_option_long_name + # There are no false-options, use true-option + else: + arg = option_long_name + left_args.append(shlex_quote(arg)) + # Append to args the option with a value + else: + left_args.append( + "{option}={value}".format( + option=option_long_name, value=shlex_quote(force_text(val)) + ) + ) + + return " ".join(["pip-compile"] + sorted(left_args) + sorted(right_args)) + + +def create_install_command(): + """ + Return an instance of InstallCommand. + """ + if PIP_VERSION < (19, 3): + return InstallCommand() + + from pipenv.patched.notpip._internal.commands import create_command + + return create_command("install") + + +def get_trusted_hosts(finder): + """ + Returns an iterable of trusted hosts from a given finder. + """ + if PIP_VERSION < (19, 2): + return (host for _, host, _ in finder.secure_origins) + + return finder.trusted_hosts diff --git a/pipenv/patched/piptools/writer.py b/pipenv/patched/piptools/writer.py index 9ac97792f3..47cfbbc4a9 100644 --- a/pipenv/patched/piptools/writer.py +++ b/pipenv/patched/piptools/writer.py @@ -1,21 +1,64 @@ +from __future__ import unicode_literals + import os -import sys from itertools import chain -from ._compat import ExitStack -from .click import unstyle, get_os_args -from .io import AtomicSaver +from .click import unstyle from .logging import log -from .utils import comment, dedup, format_requirement, key_from_req, UNSAFE_PACKAGES +from .utils import ( + UNSAFE_PACKAGES, + comment, + dedup, + format_requirement, + get_compile_command, + key_from_ireq, +) + +MESSAGE_UNHASHED_PACKAGE = comment( + "# WARNING: pip install will require the following package to be hashed." + "\n# Consider using a hashable URL like " + "https://github.com/jazzband/pip-tools/archive/SOMECOMMIT.zip" +) + +MESSAGE_UNSAFE_PACKAGES_UNPINNED = comment( + "# WARNING: The following packages were not pinned, but pip requires them to be" + "\n# pinned when the requirements file includes hashes. " + "Consider using the --allow-unsafe flag." +) + +MESSAGE_UNSAFE_PACKAGES = comment( + "# The following packages are considered to be unsafe in a requirements file:" +) + +MESSAGE_UNINSTALLABLE = ( + "The generated requirements file may be rejected by pip install. " + "See # WARNING lines for details." +) class OutputWriter(object): - def __init__(self, src_files, dst_file, dry_run, emit_header, emit_index, - emit_trusted_host, annotate, generate_hashes, - default_index_url, index_urls, trusted_hosts, format_control, - allow_unsafe): + def __init__( + self, + src_files, + dst_file, + click_ctx, + dry_run, + emit_header, + emit_index, + emit_trusted_host, + annotate, + generate_hashes, + default_index_url, + index_urls, + trusted_hosts, + format_control, + allow_unsafe, + find_links, + emit_find_links, + ): self.src_files = src_files self.dst_file = dst_file + self.click_ctx = click_ctx self.dry_run = dry_run self.emit_header = emit_header self.emit_index = emit_index @@ -27,108 +70,176 @@ def __init__(self, src_files, dst_file, dry_run, emit_header, emit_index, self.trusted_hosts = trusted_hosts self.format_control = format_control self.allow_unsafe = allow_unsafe + self.find_links = find_links + self.emit_find_links = emit_find_links def _sort_key(self, ireq): return (not ireq.editable, str(ireq.req).lower()) def write_header(self): if self.emit_header: - yield comment('#') - yield comment('# This file is autogenerated by pip-compile') - yield comment('# To update, run:') - yield comment('#') - custom_cmd = os.environ.get('CUSTOM_COMPILE_COMMAND') - if custom_cmd: - yield comment('# {}'.format(custom_cmd)) - else: - prog = os.path.basename(sys.argv[0]) - args = ' '.join(get_os_args()) - yield comment('# {prog} {args}'.format(prog=prog, args=args)) - yield comment('#') + yield comment("#") + yield comment("# This file is autogenerated by pip-compile") + yield comment("# To update, run:") + yield comment("#") + compile_command = os.environ.get( + "CUSTOM_COMPILE_COMMAND" + ) or get_compile_command(self.click_ctx) + yield comment("# {}".format(compile_command)) + yield comment("#") def write_index_options(self): if self.emit_index: for index, index_url in enumerate(dedup(self.index_urls)): - if index_url.rstrip('/') == self.default_index_url: + if index_url.rstrip("/") == self.default_index_url: continue - flag = '--index-url' if index == 0 else '--extra-index-url' - yield '{} {}'.format(flag, index_url) + flag = "--index-url" if index == 0 else "--extra-index-url" + yield "{} {}".format(flag, index_url) def write_trusted_hosts(self): if self.emit_trusted_host: for trusted_host in dedup(self.trusted_hosts): - yield '--trusted-host {}'.format(trusted_host) + yield "--trusted-host {}".format(trusted_host) def write_format_controls(self): for nb in dedup(self.format_control.no_binary): - yield '--no-binary {}'.format(nb) + yield "--no-binary {}".format(nb) for ob in dedup(self.format_control.only_binary): - yield '--only-binary {}'.format(ob) + yield "--only-binary {}".format(ob) + + def write_find_links(self): + if self.emit_find_links: + for find_link in dedup(self.find_links): + yield "--find-links {}".format(find_link) def write_flags(self): emitted = False - for line in chain(self.write_index_options(), - self.write_trusted_hosts(), - self.write_format_controls()): + for line in chain( + self.write_index_options(), + self.write_find_links(), + self.write_trusted_hosts(), + self.write_format_controls(), + ): emitted = True yield line if emitted: - yield '' + yield "" + + def _iter_lines( + self, + results, + unsafe_requirements=None, + reverse_dependencies=None, + primary_packages=None, + markers=None, + hashes=None, + ): + # default values + unsafe_requirements = unsafe_requirements or [] + reverse_dependencies = reverse_dependencies or {} + primary_packages = primary_packages or [] + markers = markers or {} + hashes = hashes or {} + + # Check for unhashed or unpinned packages if at least one package does have + # hashes, which will trigger pip install's --require-hashes mode. + warn_uninstallable = False + has_hashes = hashes and any(hash for hash in hashes.values()) + + yielded = False - def _iter_lines(self, results, unsafe_requirements, reverse_dependencies, - primary_packages, markers, hashes): for line in self.write_header(): yield line + yielded = True for line in self.write_flags(): yield line + yielded = True - unsafe_requirements = {r for r in results if r.name in UNSAFE_PACKAGES} if not unsafe_requirements else unsafe_requirements # noqa + unsafe_requirements = ( + {r for r in results if r.name in UNSAFE_PACKAGES} + if not unsafe_requirements + else unsafe_requirements + ) packages = {r for r in results if r.name not in UNSAFE_PACKAGES} - packages = sorted(packages, key=self._sort_key) - - for ireq in packages: - line = self._format_requirement( - ireq, reverse_dependencies, primary_packages, - markers.get(key_from_req(ireq.req)), hashes=hashes) - yield line + if packages: + packages = sorted(packages, key=self._sort_key) + for ireq in packages: + if has_hashes and not hashes.get(ireq): + yield MESSAGE_UNHASHED_PACKAGE + warn_uninstallable = True + line = self._format_requirement( + ireq, + reverse_dependencies, + primary_packages, + markers.get(key_from_ireq(ireq)), + hashes=hashes, + ) + yield line + yielded = True if unsafe_requirements: unsafe_requirements = sorted(unsafe_requirements, key=self._sort_key) - yield '' - yield comment('# The following packages are considered to be unsafe in a requirements file:') + yield "" + yielded = True + if has_hashes and not self.allow_unsafe: + yield MESSAGE_UNSAFE_PACKAGES_UNPINNED + warn_uninstallable = True + else: + yield MESSAGE_UNSAFE_PACKAGES for ireq in unsafe_requirements: - req = self._format_requirement(ireq, - reverse_dependencies, - primary_packages, - marker=markers.get(key_from_req(ireq.req)), - hashes=hashes) + ireq_key = key_from_ireq(ireq) if not self.allow_unsafe: - yield comment('# {}'.format(req)) + yield comment("# {}".format(ireq_key)) else: - yield req - - def write(self, results, unsafe_requirements, reverse_dependencies, - primary_packages, markers, hashes): - with ExitStack() as stack: - f = None + line = self._format_requirement( + ireq, + reverse_dependencies, + primary_packages, + marker=markers.get(ireq_key), + hashes=hashes, + ) + yield line + + # Yield even when there's no real content, so that blank files are written + if not yielded: + yield "" + + if warn_uninstallable: + log.warning(MESSAGE_UNINSTALLABLE) + + def write( + self, + results, + unsafe_requirements, + reverse_dependencies, + primary_packages, + markers, + hashes, + ): + + for line in self._iter_lines( + results, + unsafe_requirements, + reverse_dependencies, + primary_packages, + markers, + hashes, + ): + log.info(line) if not self.dry_run: - f = stack.enter_context(AtomicSaver(self.dst_file)) - - for line in self._iter_lines(results, unsafe_requirements, reverse_dependencies, - primary_packages, markers, hashes): - log.info(line) - if f: - f.write(unstyle(line).encode('utf-8')) - f.write(os.linesep.encode('utf-8')) + self.dst_file.write(unstyle(line).encode("utf-8")) + self.dst_file.write(os.linesep.encode("utf-8")) - def _format_requirement(self, ireq, reverse_dependencies, primary_packages, marker=None, hashes=None): + def _format_requirement( + self, ireq, reverse_dependencies, primary_packages, marker=None, hashes=None + ): ireq_hashes = (hashes if hashes is not None else {}).get(ireq) line = format_requirement(ireq, marker=marker, hashes=ireq_hashes) - if not self.annotate or key_from_req(ireq.req) in primary_packages: + if not self.annotate or key_from_ireq(ireq) in primary_packages: return line # Annotate what packages this package is required by @@ -138,5 +249,6 @@ def _format_requirement(self, ireq, reverse_dependencies, primary_packages, mark line = "{:24}{}{}".format( line, " \\\n " if ireq_hashes else " ", - comment("# via " + annotation)) + comment("# via " + annotation), + ) return line diff --git a/pipenv/utils.py b/pipenv/utils.py index 32f4491c5c..55cb7a9653 100644 --- a/pipenv/utils.py +++ b/pipenv/utils.py @@ -414,19 +414,8 @@ def __repr__(self): @staticmethod @lru_cache() def _get_pip_command(): - from .vendor.pip_shims.shims import Command, cmdoptions - - class PipCommand(Command): - """Needed for pip-tools.""" - - name = "PipCommand" - - from pipenv.patched.piptools.pip import get_pip_command - pip_cmd = get_pip_command() - pip_cmd.parser.add_option(cmdoptions.no_use_pep517()) - pip_cmd.parser.add_option(cmdoptions.use_pep517()) - pip_cmd.parser.add_option(cmdoptions.no_build_isolation()) - return pip_cmd + from .vendor.pip_shims.shims import InstallCommand + return InstallCommand() @classmethod def get_metadata( @@ -753,7 +742,7 @@ def repository(self): if self._repository is None: from pipenv.patched.piptools.repositories.pypi import PyPIRepository self._repository = PyPIRepository( - pip_options=self.pip_options, use_json=False, session=self.session, + self.pip_args, use_json=False, session=self.session, build_isolation=self.pip_options.build_isolation ) return self._repository diff --git a/pipenv/vendor/attr/__init__.py b/pipenv/vendor/attr/__init__.py index 0ebe5197a0..9ff4d47ffe 100644 --- a/pipenv/vendor/attr/__init__.py +++ b/pipenv/vendor/attr/__init__.py @@ -16,9 +16,11 @@ make_class, validate, ) +from ._version_info import VersionInfo -__version__ = "19.1.0" +__version__ = "19.3.0" +__version_info__ = VersionInfo._from_version_string(__version__) __title__ = "attrs" __description__ = "Classes Without Boilerplate" @@ -37,6 +39,7 @@ ib = attr = attrib dataclass = partial(attrs, auto_attribs=True) # happy Easter ;) + __all__ = [ "Attribute", "Factory", diff --git a/pipenv/vendor/attr/__init__.pyi b/pipenv/vendor/attr/__init__.pyi index fcb93b18e3..38f16f06ba 100644 --- a/pipenv/vendor/attr/__init__.pyi +++ b/pipenv/vendor/attr/__init__.pyi @@ -20,12 +20,27 @@ from . import filters as filters from . import converters as converters from . import validators as validators +from ._version_info import VersionInfo + +__version__: str +__version_info__: VersionInfo +__title__: str +__description__: str +__url__: str +__uri__: str +__author__: str +__email__: str +__license__: str +__copyright__: str + _T = TypeVar("_T") _C = TypeVar("_C", bound=type) _ValidatorType = Callable[[Any, Attribute[_T], _T], Any] _ConverterType = Callable[[Any], _T] _FilterType = Callable[[Attribute[_T], _T], bool] +_ReprType = Callable[[Any], str] +_ReprArgType = Union[bool, _ReprType] # FIXME: in reality, if multiple validators are passed they must be in a list or tuple, # but those are invariant and so would prevent subtypes of _ValidatorType from working # when passed in a list or tuple. @@ -49,18 +64,16 @@ class Attribute(Generic[_T]): name: str default: Optional[_T] validator: Optional[_ValidatorType[_T]] - repr: bool + repr: _ReprArgType cmp: bool + eq: bool + order: bool hash: Optional[bool] init: bool converter: Optional[_ConverterType[_T]] metadata: Dict[Any, Any] type: Optional[Type[_T]] kw_only: bool - def __lt__(self, x: Attribute[_T]) -> bool: ... - def __le__(self, x: Attribute[_T]) -> bool: ... - def __gt__(self, x: Attribute[_T]) -> bool: ... - def __ge__(self, x: Attribute[_T]) -> bool: ... # NOTE: We had several choices for the annotation to use for type arg: # 1) Type[_T] @@ -89,16 +102,17 @@ class Attribute(Generic[_T]): def attrib( default: None = ..., validator: None = ..., - repr: bool = ..., - cmp: bool = ..., + repr: _ReprArgType = ..., + cmp: Optional[bool] = ..., hash: Optional[bool] = ..., init: bool = ..., - convert: None = ..., metadata: Optional[Mapping[Any, Any]] = ..., type: None = ..., converter: None = ..., factory: None = ..., kw_only: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., ) -> Any: ... # This form catches an explicit None or no default and infers the type from the other arguments. @@ -106,16 +120,17 @@ def attrib( def attrib( default: None = ..., validator: Optional[_ValidatorArgType[_T]] = ..., - repr: bool = ..., - cmp: bool = ..., + repr: _ReprArgType = ..., + cmp: Optional[bool] = ..., hash: Optional[bool] = ..., init: bool = ..., - convert: Optional[_ConverterType[_T]] = ..., metadata: Optional[Mapping[Any, Any]] = ..., type: Optional[Type[_T]] = ..., converter: Optional[_ConverterType[_T]] = ..., factory: Optional[Callable[[], _T]] = ..., kw_only: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., ) -> _T: ... # This form catches an explicit default argument. @@ -123,16 +138,17 @@ def attrib( def attrib( default: _T, validator: Optional[_ValidatorArgType[_T]] = ..., - repr: bool = ..., - cmp: bool = ..., + repr: _ReprArgType = ..., + cmp: Optional[bool] = ..., hash: Optional[bool] = ..., init: bool = ..., - convert: Optional[_ConverterType[_T]] = ..., metadata: Optional[Mapping[Any, Any]] = ..., type: Optional[Type[_T]] = ..., converter: Optional[_ConverterType[_T]] = ..., factory: Optional[Callable[[], _T]] = ..., kw_only: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., ) -> _T: ... # This form covers type=non-Type: e.g. forward references (str), Any @@ -140,16 +156,17 @@ def attrib( def attrib( default: Optional[_T] = ..., validator: Optional[_ValidatorArgType[_T]] = ..., - repr: bool = ..., - cmp: bool = ..., + repr: _ReprArgType = ..., + cmp: Optional[bool] = ..., hash: Optional[bool] = ..., init: bool = ..., - convert: Optional[_ConverterType[_T]] = ..., metadata: Optional[Mapping[Any, Any]] = ..., type: object = ..., converter: Optional[_ConverterType[_T]] = ..., factory: Optional[Callable[[], _T]] = ..., kw_only: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., ) -> Any: ... @overload def attrs( @@ -157,7 +174,7 @@ def attrs( these: Optional[Dict[str, Any]] = ..., repr_ns: Optional[str] = ..., repr: bool = ..., - cmp: bool = ..., + cmp: Optional[bool] = ..., hash: Optional[bool] = ..., init: bool = ..., slots: bool = ..., @@ -168,6 +185,8 @@ def attrs( kw_only: bool = ..., cache_hash: bool = ..., auto_exc: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., ) -> _C: ... @overload def attrs( @@ -175,7 +194,7 @@ def attrs( these: Optional[Dict[str, Any]] = ..., repr_ns: Optional[str] = ..., repr: bool = ..., - cmp: bool = ..., + cmp: Optional[bool] = ..., hash: Optional[bool] = ..., init: bool = ..., slots: bool = ..., @@ -186,6 +205,8 @@ def attrs( kw_only: bool = ..., cache_hash: bool = ..., auto_exc: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., ) -> Callable[[_C], _C]: ... # TODO: add support for returning NamedTuple from the mypy plugin @@ -204,7 +225,7 @@ def make_class( bases: Tuple[type, ...] = ..., repr_ns: Optional[str] = ..., repr: bool = ..., - cmp: bool = ..., + cmp: Optional[bool] = ..., hash: Optional[bool] = ..., init: bool = ..., slots: bool = ..., @@ -215,6 +236,8 @@ def make_class( kw_only: bool = ..., cache_hash: bool = ..., auto_exc: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., ) -> type: ... # _funcs -- diff --git a/pipenv/vendor/attr/_compat.py b/pipenv/vendor/attr/_compat.py index 9a99dcd96c..a915db8ebe 100644 --- a/pipenv/vendor/attr/_compat.py +++ b/pipenv/vendor/attr/_compat.py @@ -20,7 +20,7 @@ if PY2: from UserDict import IterableUserDict - from collections import Mapping, Sequence # noqa + from collections import Mapping, Sequence # We 'bundle' isclass instead of using inspect as importing inspect is # fairly expensive (order of 10-15 ms for a modern machine in 2016) @@ -106,7 +106,8 @@ def just_warn(*args, **kw): consequences of not setting the cell on Python 2. """ warnings.warn( - "Missing ctypes. Some features like bare super() or accessing " + "Running interpreter doesn't sufficiently support code object " + "introspection. Some features like bare super() or accessing " "__class__ will not work with slotted classes.", RuntimeWarning, stacklevel=2, @@ -124,36 +125,106 @@ def metadata_proxy(d): return types.MappingProxyType(dict(d)) -def import_ctypes(): - """ - Moved into a function for testability. - """ - import ctypes - - return ctypes - - def make_set_closure_cell(): + """Return a function of two arguments (cell, value) which sets + the value stored in the closure cell `cell` to `value`. """ - Moved into a function for testability. - """ + # pypy makes this easy. (It also supports the logic below, but + # why not do the easy/fast thing?) if PYPY: # pragma: no cover def set_closure_cell(cell, value): cell.__setstate__((value,)) + return set_closure_cell + + # Otherwise gotta do it the hard way. + + # Create a function that will set its first cellvar to `value`. + def set_first_cellvar_to(value): + x = value + return + + # This function will be eliminated as dead code, but + # not before its reference to `x` forces `x` to be + # represented as a closure cell rather than a local. + def force_x_to_be_a_cell(): # pragma: no cover + return x + + try: + # Extract the code object and make sure our assumptions about + # the closure behavior are correct. + if PY2: + co = set_first_cellvar_to.func_code + else: + co = set_first_cellvar_to.__code__ + if co.co_cellvars != ("x",) or co.co_freevars != (): + raise AssertionError # pragma: no cover + + # Convert this code object to a code object that sets the + # function's first _freevar_ (not cellvar) to the argument. + if sys.version_info >= (3, 8): + # CPython 3.8+ has an incompatible CodeType signature + # (added a posonlyargcount argument) but also added + # CodeType.replace() to do this without counting parameters. + set_first_freevar_code = co.replace( + co_cellvars=co.co_freevars, co_freevars=co.co_cellvars + ) + else: + args = [co.co_argcount] + if not PY2: + args.append(co.co_kwonlyargcount) + args.extend( + [ + co.co_nlocals, + co.co_stacksize, + co.co_flags, + co.co_code, + co.co_consts, + co.co_names, + co.co_varnames, + co.co_filename, + co.co_name, + co.co_firstlineno, + co.co_lnotab, + # These two arguments are reversed: + co.co_cellvars, + co.co_freevars, + ] + ) + set_first_freevar_code = types.CodeType(*args) + + def set_closure_cell(cell, value): + # Create a function using the set_first_freevar_code, + # whose first closure cell is `cell`. Calling it will + # change the value of that cell. + setter = types.FunctionType( + set_first_freevar_code, {}, "setter", (), (cell,) + ) + # And call it to set the cell. + setter(value) + + # Make sure it works on this interpreter: + def make_func_with_cell(): + x = None + + def func(): + return x # pragma: no cover + + return func + + if PY2: + cell = make_func_with_cell().func_closure[0] + else: + cell = make_func_with_cell().__closure__[0] + set_closure_cell(cell, 100) + if cell.cell_contents != 100: + raise AssertionError # pragma: no cover + + except Exception: + return just_warn else: - try: - ctypes = import_ctypes() - - set_closure_cell = ctypes.pythonapi.PyCell_Set - set_closure_cell.argtypes = (ctypes.py_object, ctypes.py_object) - set_closure_cell.restype = ctypes.c_int - except Exception: - # We try best effort to set the cell, but sometimes it's not - # possible. For example on Jython or on GAE. - set_closure_cell = just_warn - return set_closure_cell + return set_closure_cell set_closure_cell = make_set_closure_cell() diff --git a/pipenv/vendor/attr/_funcs.py b/pipenv/vendor/attr/_funcs.py index b61d239412..c077e4284f 100644 --- a/pipenv/vendor/attr/_funcs.py +++ b/pipenv/vendor/attr/_funcs.py @@ -24,7 +24,7 @@ def asdict( ``attrs``-decorated. :param callable filter: A callable whose return code determines whether an attribute or element is included (``True``) or dropped (``False``). Is - called with the :class:`attr.Attribute` as the first argument and the + called with the `attr.Attribute` as the first argument and the value as the second argument. :param callable dict_factory: A callable to produce dictionaries from. For example, to produce ordered dictionaries instead of normal Python @@ -130,7 +130,7 @@ def astuple( ``attrs``-decorated. :param callable filter: A callable whose return code determines whether an attribute or element is included (``True``) or dropped (``False``). Is - called with the :class:`attr.Attribute` as the first argument and the + called with the `attr.Attribute` as the first argument and the value as the second argument. :param callable tuple_factory: A callable to produce tuples from. For example, to produce lists instead of tuples. @@ -219,7 +219,7 @@ def has(cls): :param type cls: Class to introspect. :raise TypeError: If *cls* is not a class. - :rtype: :class:`bool` + :rtype: bool """ return getattr(cls, "__attrs_attrs__", None) is not None @@ -239,7 +239,7 @@ def assoc(inst, **changes): class. .. deprecated:: 17.1.0 - Use :func:`evolve` instead. + Use `evolve` instead. """ import warnings diff --git a/pipenv/vendor/attr/_make.py b/pipenv/vendor/attr/_make.py index 827175a460..46f9c54ec1 100644 --- a/pipenv/vendor/attr/_make.py +++ b/pipenv/vendor/attr/_make.py @@ -1,10 +1,10 @@ from __future__ import absolute_import, division, print_function import copy -import hashlib import linecache import sys import threading +import uuid import warnings from operator import itemgetter @@ -42,6 +42,9 @@ _empty_metadata_singleton = metadata_proxy({}) +# Unique object for unequivocal getattr() defaults. +_sentinel = object() + class _Nothing(object): """ @@ -71,15 +74,16 @@ def attrib( default=NOTHING, validator=None, repr=True, - cmp=True, + cmp=None, hash=None, init=True, - convert=None, metadata=None, type=None, converter=None, factory=None, kw_only=False, + eq=None, + order=None, ): """ Create a new attribute on a class. @@ -87,30 +91,30 @@ def attrib( .. warning:: Does *not* do anything unless the class is also decorated with - :func:`attr.s`! + `attr.s`! :param default: A value that is used if an ``attrs``-generated ``__init__`` is used and no value is passed while instantiating or the attribute is excluded using ``init=False``. - If the value is an instance of :class:`Factory`, its callable will be + If the value is an instance of `Factory`, its callable will be used to construct a new value (useful for mutable data types like lists or dicts). If a default is not set (or set manually to ``attr.NOTHING``), a value - *must* be supplied when instantiating; otherwise a :exc:`TypeError` + *must* be supplied when instantiating; otherwise a `TypeError` will be raised. The default can also be set using decorator notation as shown below. - :type default: Any value. + :type default: Any value :param callable factory: Syntactic sugar for ``default=attr.Factory(callable)``. - :param validator: :func:`callable` that is called by ``attrs``-generated + :param validator: `callable` that is called by ``attrs``-generated ``__init__`` methods after the instance has been initialized. They - receive the initialized instance, the :class:`Attribute`, and the + receive the initialized instance, the `Attribute`, and the passed value. The return value is *not* inspected so the validator has to throw an @@ -120,18 +124,29 @@ def attrib( all pass. Validators can be globally disabled and re-enabled using - :func:`get_run_validators`. + `get_run_validators`. The validator can also be set using decorator notation as shown below. :type validator: ``callable`` or a ``list`` of ``callable``\\ s. - :param bool repr: Include this attribute in the generated ``__repr__`` - method. - :param bool cmp: Include this attribute in the generated comparison methods - (``__eq__`` et al). + :param repr: Include this attribute in the generated ``__repr__`` + method. If ``True``, include the attribute; if ``False``, omit it. By + default, the built-in ``repr()`` function is used. To override how the + attribute value is formatted, pass a ``callable`` that takes a single + value and returns a string. Note that the resulting string is used + as-is, i.e. it will be used directly *instead* of calling ``repr()`` + (the default). + :type repr: a ``bool`` or a ``callable`` to use a custom function. + :param bool eq: If ``True`` (default), include this attribute in the + generated ``__eq__`` and ``__ne__`` methods that check two instances + for equality. + :param bool order: If ``True`` (default), include this attributes in the + generated ``__lt__``, ``__le__``, ``__gt__`` and ``__ge__`` methods. + :param bool cmp: Setting to ``True`` is equivalent to setting ``eq=True, + order=True``. Deprecated in favor of *eq* and *order*. :param hash: Include this attribute in the generated ``__hash__`` - method. If ``None`` (default), mirror *cmp*'s value. This is the + method. If ``None`` (default), mirror *eq*'s value. This is the correct behavior according the Python spec. Setting this value to anything else than ``None`` is *discouraged*. :type hash: ``bool`` or ``None`` @@ -139,13 +154,13 @@ def attrib( method. It is possible to set this to ``False`` and set a default value. In that case this attributed is unconditionally initialized with the specified default value or factory. - :param callable converter: :func:`callable` that is called by + :param callable converter: `callable` that is called by ``attrs``-generated ``__init__`` methods to converter attribute's value to the desired format. It is given the passed-in value, and the returned value will be used as the new value of the attribute. The value is converted before being passed to the validator, if any. :param metadata: An arbitrary mapping, to be used by third-party - components. See :ref:`extending_metadata`. + components. See `extending_metadata`. :param type: The type of the attribute. In Python 3.6 or greater, the preferred method to specify the type is using a variable annotation (see `PEP 526 <https://www.python.org/dev/peps/pep-0526/>`_). @@ -155,7 +170,7 @@ def attrib( Please note that ``attrs`` doesn't do anything with this metadata by itself. You can use it as part of your own code or for - :doc:`static type checking <types>`. + `static type checking <types>`. :param kw_only: Make this attribute keyword-only (Python 3+) in the generated ``__init__`` (if ``init`` is ``False``, this parameter is ignored). @@ -164,7 +179,7 @@ def attrib( .. versionadded:: 16.3.0 *metadata* .. versionchanged:: 17.1.0 *validator* can be a ``list`` now. .. versionchanged:: 17.1.0 - *hash* is ``None`` and therefore mirrors *cmp* by default. + *hash* is ``None`` and therefore mirrors *eq* by default. .. versionadded:: 17.3.0 *type* .. deprecated:: 17.4.0 *convert* .. versionadded:: 17.4.0 *converter* as a replacement for the deprecated @@ -172,26 +187,18 @@ def attrib( .. versionadded:: 18.1.0 ``factory=f`` is syntactic sugar for ``default=attr.Factory(f)``. .. versionadded:: 18.2.0 *kw_only* + .. versionchanged:: 19.2.0 *convert* keyword argument removed + .. versionchanged:: 19.2.0 *repr* also accepts a custom callable. + .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01. + .. versionadded:: 19.2.0 *eq* and *order* """ + eq, order = _determine_eq_order(cmp, eq, order) + if hash is not None and hash is not True and hash is not False: raise TypeError( "Invalid value for hash. Must be True, False, or None." ) - if convert is not None: - if converter is not None: - raise RuntimeError( - "Can't pass both `convert` and `converter`. " - "Please use `converter` only." - ) - warnings.warn( - "The `convert` argument is deprecated in favor of `converter`. " - "It will be removed after 2019/01.", - DeprecationWarning, - stacklevel=2, - ) - converter = convert - if factory is not None: if default is not NOTHING: raise ValueError( @@ -209,13 +216,15 @@ def attrib( default=default, validator=validator, repr=repr, - cmp=cmp, + cmp=None, hash=hash, init=init, converter=converter, metadata=metadata, type=type, kw_only=kw_only, + eq=eq, + order=order, ) @@ -385,38 +394,20 @@ def _transform_attrs(cls, these, auto_attribs, kw_only): attrs = AttrsClass(base_attrs + own_attrs) + # Mandatory vs non-mandatory attr order only matters when they are part of + # the __init__ signature and when they aren't kw_only (which are moved to + # the end and can be mandatory or non-mandatory in any order, as they will + # be specified as keyword args anyway). Check the order of those attrs: had_default = False - was_kw_only = False - for a in attrs: - if ( - was_kw_only is False - and had_default is True - and a.default is NOTHING - and a.init is True - and a.kw_only is False - ): + for a in (a for a in attrs if a.init is not False and a.kw_only is False): + if had_default is True and a.default is NOTHING: raise ValueError( "No mandatory attributes allowed after an attribute with a " "default value or factory. Attribute in question: %r" % (a,) ) - elif ( - had_default is False - and a.default is not NOTHING - and a.init is not False - and - # Keyword-only attributes without defaults can be specified - # after keyword-only attributes with defaults. - a.kw_only is False - ): + + if had_default is False and a.default is not NOTHING: had_default = True - if was_kw_only is True and a.kw_only is False and a.init is True: - raise ValueError( - "Non keyword-only attributes are not allowed after a " - "keyword-only attribute (unless they are init=False). " - "Attribute in question: {a!r}".format(a=a) - ) - if was_kw_only is False and a.init is True and a.kw_only is True: - was_kw_only = True return _Attributes((attrs, base_attrs, base_attr_map)) @@ -518,7 +509,7 @@ def _patch_original_class(self): for name in self._attr_names: if ( name not in base_names - and getattr(cls, name, None) is not None + and getattr(cls, name, _sentinel) is not _sentinel ): try: delattr(cls, name) @@ -676,7 +667,10 @@ def make_unhashable(self): def add_hash(self): self._cls_dict["__hash__"] = self._add_method_dunders( _make_hash( - self._attrs, frozen=self._frozen, cache_hash=self._cache_hash + self._cls, + self._attrs, + frozen=self._frozen, + cache_hash=self._cache_hash, ) ) @@ -685,6 +679,7 @@ def add_hash(self): def add_init(self): self._cls_dict["__init__"] = self._add_method_dunders( _make_init( + self._cls, self._attrs, self._has_post_init, self._frozen, @@ -697,13 +692,22 @@ def add_init(self): return self - def add_cmp(self): + def add_eq(self): + cd = self._cls_dict + + cd["__eq__"], cd["__ne__"] = ( + self._add_method_dunders(meth) + for meth in _make_eq(self._cls, self._attrs) + ) + + return self + + def add_order(self): cd = self._cls_dict - cd["__eq__"], cd["__ne__"], cd["__lt__"], cd["__le__"], cd[ - "__gt__" - ], cd["__ge__"] = ( - self._add_method_dunders(meth) for meth in _make_cmp(self._attrs) + cd["__lt__"], cd["__le__"], cd["__gt__"], cd["__ge__"] = ( + self._add_method_dunders(meth) + for meth in _make_order(self._cls, self._attrs) ) return self @@ -727,12 +731,45 @@ def _add_method_dunders(self, method): return method +_CMP_DEPRECATION = ( + "The usage of `cmp` is deprecated and will be removed on or after " + "2021-06-01. Please use `eq` and `order` instead." +) + + +def _determine_eq_order(cmp, eq, order): + """ + Validate the combination of *cmp*, *eq*, and *order*. Derive the effective + values of eq and order. + """ + if cmp is not None and any((eq is not None, order is not None)): + raise ValueError("Don't mix `cmp` with `eq' and `order`.") + + # cmp takes precedence due to bw-compatibility. + if cmp is not None: + warnings.warn(_CMP_DEPRECATION, DeprecationWarning, stacklevel=3) + + return cmp, cmp + + # If left None, equality is on and ordering mirrors equality. + if eq is None: + eq = True + + if order is None: + order = eq + + if eq is False and order is True: + raise ValueError("`order` can only be True if `eq` is True too.") + + return eq, order + + def attrs( maybe_cls=None, these=None, repr_ns=None, repr=True, - cmp=True, + cmp=None, hash=None, init=True, slots=False, @@ -743,13 +780,15 @@ def attrs( kw_only=False, cache_hash=False, auto_exc=False, + eq=None, + order=None, ): r""" A class decorator that adds `dunder <https://wiki.python.org/moin/DunderAlias>`_\ -methods according to the - specified attributes using :func:`attr.ib` or the *these* argument. + specified attributes using `attr.ib` or the *these* argument. - :param these: A dictionary of name to :func:`attr.ib` mappings. This is + :param these: A dictionary of name to `attr.ib` mappings. This is useful to avoid the definition of your attributes within the class body because you can't (e.g. if you want to add ``__repr__`` methods to Django models) or don't want to. @@ -757,12 +796,12 @@ def attrs( If *these* is not ``None``, ``attrs`` will *not* search the class body for attributes and will *not* remove any attributes from it. - If *these* is an ordered dict (:class:`dict` on Python 3.6+, - :class:`collections.OrderedDict` otherwise), the order is deduced from + If *these* is an ordered dict (`dict` on Python 3.6+, + `collections.OrderedDict` otherwise), the order is deduced from the order of the attributes inside *these*. Otherwise the order of the definition of the attributes is used. - :type these: :class:`dict` of :class:`str` to :func:`attr.ib` + :type these: `dict` of `str` to `attr.ib` :param str repr_ns: When using nested classes, there's no way in Python 2 to automatically detect that. Therefore it's possible to set the @@ -771,18 +810,29 @@ def attrs( representation of ``attrs`` attributes.. :param bool str: Create a ``__str__`` method that is identical to ``__repr__``. This is usually not necessary except for - :class:`Exception`\ s. - :param bool cmp: Create ``__eq__``, ``__ne__``, ``__lt__``, ``__le__``, - ``__gt__``, and ``__ge__`` methods that compare the class as if it were - a tuple of its ``attrs`` attributes. But the attributes are *only* - compared, if the types of both classes are *identical*! + `Exception`\ s. + :param bool eq: If ``True`` or ``None`` (default), add ``__eq__`` and + ``__ne__`` methods that check two instances for equality. + + They compare the instances as if they were tuples of their ``attrs`` + attributes, but only iff the types of both classes are *identical*! + :type eq: `bool` or `None` + :param bool order: If ``True``, add ``__lt__``, ``__le__``, ``__gt__``, + and ``__ge__`` methods that behave like *eq* above and allow instances + to be ordered. If ``None`` (default) mirror value of *eq*. + :type order: `bool` or `None` + :param cmp: Setting to ``True`` is equivalent to setting ``eq=True, + order=True``. Deprecated in favor of *eq* and *order*, has precedence + over them for backward-compatibility though. Must not be mixed with + *eq* or *order*. + :type cmp: `bool` or `None` :param hash: If ``None`` (default), the ``__hash__`` method is generated - according how *cmp* and *frozen* are set. + according how *eq* and *frozen* are set. 1. If *both* are True, ``attrs`` will generate a ``__hash__`` for you. - 2. If *cmp* is True and *frozen* is False, ``__hash__`` will be set to + 2. If *eq* is True and *frozen* is False, ``__hash__`` will be set to None, marking it unhashable (which it is). - 3. If *cmp* is False, ``__hash__`` will be left untouched meaning the + 3. If *eq* is False, ``__hash__`` will be left untouched meaning the ``__hash__`` method of the base class will be used (if base class is ``object``, this means it will fall back to id-based hashing.). @@ -791,29 +841,29 @@ def attrs( didn't freeze it programmatically) by passing ``True`` or not. Both of these cases are rather special and should be used carefully. - See the `Python documentation \ - <https://docs.python.org/3/reference/datamodel.html#object.__hash__>`_ - and the `GitHub issue that led to the default behavior \ - <https://github.com/python-attrs/attrs/issues/136>`_ for more details. + See our documentation on `hashing`, Python's documentation on + `object.__hash__`, and the `GitHub issue that led to the default \ + behavior <https://github.com/python-attrs/attrs/issues/136>`_ for more + details. :type hash: ``bool`` or ``None`` :param bool init: Create a ``__init__`` method that initializes the ``attrs`` attributes. Leading underscores are stripped for the argument name. If a ``__attrs_post_init__`` method exists on the class, it will be called after the class is fully initialized. - :param bool slots: Create a slots_-style class that's more - memory-efficient. See :ref:`slots` for further ramifications. + :param bool slots: Create a `slotted class <slotted classes>` that's more + memory-efficient. :param bool frozen: Make instances immutable after initialization. If someone attempts to modify a frozen instance, - :exc:`attr.exceptions.FrozenInstanceError` is raised. + `attr.exceptions.FrozenInstanceError` is raised. Please note: 1. This is achieved by installing a custom ``__setattr__`` method - on your class so you can't implement an own one. + on your class, so you can't implement your own. 2. True immutability is impossible in Python. - 3. This *does* have a minor a runtime performance :ref:`impact + 3. This *does* have a minor a runtime performance `impact <how-frozen>` when initializing new instances. In other words: ``__init__`` is slightly slower with ``frozen=True``. @@ -822,24 +872,24 @@ def attrs( circumvent that limitation by using ``object.__setattr__(self, "attribute_name", value)``. - .. _slots: https://docs.python.org/3/reference/datamodel.html#slots :param bool weakref_slot: Make instances weak-referenceable. This has no effect unless ``slots`` is also enabled. :param bool auto_attribs: If True, collect `PEP 526`_-annotated attributes (Python 3.6 and later only) from the class body. In this case, you **must** annotate every field. If ``attrs`` - encounters a field that is set to an :func:`attr.ib` but lacks a type - annotation, an :exc:`attr.exceptions.UnannotatedAttributeError` is + encounters a field that is set to an `attr.ib` but lacks a type + annotation, an `attr.exceptions.UnannotatedAttributeError` is raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't want to set a type. If you assign a value to those attributes (e.g. ``x: int = 42``), that value becomes the default value like if it were passed using - ``attr.ib(default=42)``. Passing an instance of :class:`Factory` also + ``attr.ib(default=42)``. Passing an instance of `Factory` also works as expected. - Attributes annotated as :data:`typing.ClassVar` are **ignored**. + Attributes annotated as `typing.ClassVar`, and attributes that are + neither annotated nor set to an `attr.ib` are **ignored**. .. _`PEP 526`: https://www.python.org/dev/peps/pep-0526/ :param bool kw_only: Make all attributes keyword-only (Python 3+) @@ -852,15 +902,15 @@ def attrs( fields involved in hash code computation or mutations of the objects those fields point to after object creation. If such changes occur, the behavior of the object's hash code is undefined. - :param bool auto_exc: If the class subclasses :class:`BaseException` + :param bool auto_exc: If the class subclasses `BaseException` (which implicitly includes any subclass of any exception), the following happens to behave like a well-behaved Python exceptions class: - - the values for *cmp* and *hash* are ignored and the instances compare - and hash by the instance's ids (N.B. ``attrs`` will *not* remove - existing implementations of ``__hash__`` or the equality methods. It - just won't add own ones.), + - the values for *eq*, *order*, and *hash* are ignored and the + instances compare and hash by the instance's ids (N.B. ``attrs`` will + *not* remove existing implementations of ``__hash__`` or the equality + methods. It just won't add own ones.), - all attributes that are either passed into ``__init__`` or have a default value are additionally available as a tuple in the ``args`` attribute, @@ -879,13 +929,19 @@ def attrs( .. versionadded:: 18.2.0 *weakref_slot* .. deprecated:: 18.2.0 ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a - :class:`DeprecationWarning` if the classes compared are subclasses of + `DeprecationWarning` if the classes compared are subclasses of each other. ``__eq`` and ``__ne__`` never tried to compared subclasses to each other. + .. versionchanged:: 19.2.0 + ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now do not consider + subclasses comparable anymore. .. versionadded:: 18.2.0 *kw_only* .. versionadded:: 18.2.0 *cache_hash* .. versionadded:: 19.1.0 *auto_exc* + .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01. + .. versionadded:: 19.2.0 *eq* and *order* """ + eq, order = _determine_eq_order(cmp, eq, order) def wrap(cls): @@ -910,28 +966,30 @@ def wrap(cls): builder.add_repr(repr_ns) if str is True: builder.add_str() - if cmp is True and not is_exc: - builder.add_cmp() + if eq is True and not is_exc: + builder.add_eq() + if order is True and not is_exc: + builder.add_order() if hash is not True and hash is not False and hash is not None: # Can't use `hash in` because 1 == True for example. raise TypeError( "Invalid value for hash. Must be True, False, or None." ) - elif hash is False or (hash is None and cmp is False): + elif hash is False or (hash is None and eq is False) or is_exc: + # Don't do anything. Should fall back to __object__'s __hash__ + # which is by id. if cache_hash: raise TypeError( "Invalid value for cache_hash. To use hash caching," " hashing must be either explicitly or implicitly " "enabled." ) - elif ( - hash is True - or (hash is None and cmp is True and frozen is True) - and is_exc is False - ): + elif hash is True or (hash is None and eq is True and frozen is True): + # Build a __hash__ if told so, or if it's safe. builder.add_hash() else: + # Raise TypeError on attempts to hash. if cache_hash: raise TypeError( "Invalid value for cache_hash. To use hash caching," @@ -997,19 +1055,44 @@ def _attrs_to_tuple(obj, attrs): return tuple(getattr(obj, a.name) for a in attrs) -def _make_hash(attrs, frozen, cache_hash): +def _generate_unique_filename(cls, func_name): + """ + Create a "filename" suitable for a function being generated. + """ + unique_id = uuid.uuid4() + extra = "" + count = 1 + + while True: + unique_filename = "<attrs generated {0} {1}.{2}{3}>".format( + func_name, + cls.__module__, + getattr(cls, "__qualname__", cls.__name__), + extra, + ) + # To handle concurrency we essentially "reserve" our spot in + # the linecache with a dummy line. The caller can then + # set this value correctly. + cache_line = (1, None, (str(unique_id),), unique_filename) + if ( + linecache.cache.setdefault(unique_filename, cache_line) + == cache_line + ): + return unique_filename + + # Looks like this spot is taken. Try again. + count += 1 + extra = "-{0}".format(count) + + +def _make_hash(cls, attrs, frozen, cache_hash): attrs = tuple( - a - for a in attrs - if a.hash is True or (a.hash is None and a.cmp is True) + a for a in attrs if a.hash is True or (a.hash is None and a.eq is True) ) tab = " " - # We cache the generated hash methods for the same kinds of attributes. - sha1 = hashlib.sha1() - sha1.update(repr(attrs).encode("utf-8")) - unique_filename = "<attrs generated hash %s>" % (sha1.hexdigest(),) + unique_filename = _generate_unique_filename(cls, "hash") type_hash = hash(unique_filename) method_lines = ["def __hash__(self):"] @@ -1066,7 +1149,7 @@ def _add_hash(cls, attrs): """ Add a hash method to *cls*. """ - cls.__hash__ = _make_hash(attrs, frozen=False, cache_hash=False) + cls.__hash__ = _make_hash(cls, attrs, frozen=False, cache_hash=False) return cls @@ -1082,19 +1165,10 @@ def __ne__(self, other): return not result -WARNING_CMP_ISINSTANCE = ( - "Comparision of subclasses using __%s__ is deprecated and will be removed " - "in 2019." -) - - -def _make_cmp(attrs): - attrs = [a for a in attrs if a.cmp] +def _make_eq(cls, attrs): + attrs = [a for a in attrs if a.eq] - # We cache the generated eq methods for the same kinds of attributes. - sha1 = hashlib.sha1() - sha1.update(repr(attrs).encode("utf-8")) - unique_filename = "<attrs generated eq %s>" % (sha1.hexdigest(),) + unique_filename = _generate_unique_filename(cls, "eq") lines = [ "def __eq__(self, other):", " if other.__class__ is not self.__class__:", @@ -1127,8 +1201,11 @@ def _make_cmp(attrs): script.splitlines(True), unique_filename, ) - eq = locs["__eq__"] - ne = __ne__ + return locs["__eq__"], __ne__ + + +def _make_order(cls, attrs): + attrs = [a for a in attrs if a.order] def attrs_to_tuple(obj): """ @@ -1140,67 +1217,49 @@ def __lt__(self, other): """ Automatically created by attrs. """ - if isinstance(other, self.__class__): - if other.__class__ is not self.__class__: - warnings.warn( - WARNING_CMP_ISINSTANCE % ("lt",), DeprecationWarning - ) + if other.__class__ is self.__class__: return attrs_to_tuple(self) < attrs_to_tuple(other) - else: - return NotImplemented + + return NotImplemented def __le__(self, other): """ Automatically created by attrs. """ - if isinstance(other, self.__class__): - if other.__class__ is not self.__class__: - warnings.warn( - WARNING_CMP_ISINSTANCE % ("le",), DeprecationWarning - ) + if other.__class__ is self.__class__: return attrs_to_tuple(self) <= attrs_to_tuple(other) - else: - return NotImplemented + + return NotImplemented def __gt__(self, other): """ Automatically created by attrs. """ - if isinstance(other, self.__class__): - if other.__class__ is not self.__class__: - warnings.warn( - WARNING_CMP_ISINSTANCE % ("gt",), DeprecationWarning - ) + if other.__class__ is self.__class__: return attrs_to_tuple(self) > attrs_to_tuple(other) - else: - return NotImplemented + + return NotImplemented def __ge__(self, other): """ Automatically created by attrs. """ - if isinstance(other, self.__class__): - if other.__class__ is not self.__class__: - warnings.warn( - WARNING_CMP_ISINSTANCE % ("ge",), DeprecationWarning - ) + if other.__class__ is self.__class__: return attrs_to_tuple(self) >= attrs_to_tuple(other) - else: - return NotImplemented - return eq, ne, __lt__, __le__, __gt__, __ge__ + return NotImplemented + + return __lt__, __le__, __gt__, __ge__ -def _add_cmp(cls, attrs=None): +def _add_eq(cls, attrs=None): """ - Add comparison methods to *cls*. + Add equality methods to *cls* with *attrs*. """ if attrs is None: attrs = cls.__attrs_attrs__ - cls.__eq__, cls.__ne__, cls.__lt__, cls.__le__, cls.__gt__, cls.__ge__ = _make_cmp( # noqa - attrs - ) + cls.__eq__, cls.__ne__ = _make_eq(cls, attrs) return cls @@ -1210,9 +1269,17 @@ def _add_cmp(cls, attrs=None): def _make_repr(attrs, ns): """ - Make a repr method for *attr_names* adding *ns* to the full name. + Make a repr method that includes relevant *attrs*, adding *ns* to the full + name. """ - attr_names = tuple(a.name for a in attrs if a.repr) + + # Figure out which attributes to include, and which function to use to + # format them. The a.repr value can be either bool or a custom callable. + attr_names_with_reprs = tuple( + (a.name, repr if a.repr is True else a.repr) + for a in attrs + if a.repr is not False + ) def __repr__(self): """ @@ -1244,12 +1311,14 @@ def __repr__(self): try: result = [class_name, "("] first = True - for name in attr_names: + for name, attr_repr in attr_names_with_reprs: if first: first = False else: result.append(", ") - result.extend((name, "=", repr(getattr(self, name, NOTHING)))) + result.extend( + (name, "=", attr_repr(getattr(self, name, NOTHING))) + ) return "".join(result) + ")" finally: working_set.remove(id(self)) @@ -1269,14 +1338,11 @@ def _add_repr(cls, ns=None, attrs=None): def _make_init( - attrs, post_init, frozen, slots, cache_hash, base_attr_map, is_exc + cls, attrs, post_init, frozen, slots, cache_hash, base_attr_map, is_exc ): attrs = [a for a in attrs if a.init or a.default is not NOTHING] - # We cache the generated init methods for the same kinds of attributes. - sha1 = hashlib.sha1() - sha1.update(repr(attrs).encode("utf-8")) - unique_filename = "<attrs generated init {0}>".format(sha1.hexdigest()) + unique_filename = _generate_unique_filename(cls, "init") script, globs, annotations = _attrs_to_init_script( attrs, frozen, slots, post_init, cache_hash, base_attr_map, is_exc @@ -1321,7 +1387,7 @@ def fields(cls): :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` class. - :rtype: tuple (with name accessors) of :class:`attr.Attribute` + :rtype: tuple (with name accessors) of `attr.Attribute` .. versionchanged:: 16.2.0 Returned tuple allows accessing the fields by name. @@ -1348,7 +1414,7 @@ def fields_dict(cls): class. :rtype: an ordered dict where keys are attribute names and values are - :class:`attr.Attribute`\\ s. This will be a :class:`dict` if it's + `attr.Attribute`\\ s. This will be a `dict` if it's naturally ordered like on Python 3.6+ or an :class:`~collections.OrderedDict` otherwise. @@ -1678,9 +1744,10 @@ class Attribute(object): :attribute name: The name of the attribute. - Plus *all* arguments of :func:`attr.ib`. + Plus *all* arguments of `attr.ib` (except for ``factory`` + which is only syntactic sugar for ``default=Factory(...)``. - For the version history of the fields, see :func:`attr.ib`. + For the version history of the fields, see `attr.ib`. """ __slots__ = ( @@ -1688,7 +1755,8 @@ class Attribute(object): "default", "validator", "repr", - "cmp", + "eq", + "order", "hash", "init", "metadata", @@ -1703,39 +1771,29 @@ def __init__( default, validator, repr, - cmp, + cmp, # XXX: unused, remove along with other cmp code. hash, init, - convert=None, metadata=None, type=None, converter=None, kw_only=False, + eq=None, + order=None, ): + eq, order = _determine_eq_order(cmp, eq, order) + # Cache this descriptor here to speed things up later. bound_setattr = _obj_setattr.__get__(self, Attribute) # Despite the big red warning, people *do* instantiate `Attribute` # themselves. - if convert is not None: - if converter is not None: - raise RuntimeError( - "Can't pass both `convert` and `converter`. " - "Please use `converter` only." - ) - warnings.warn( - "The `convert` argument is deprecated in favor of `converter`." - " It will be removed after 2019/01.", - DeprecationWarning, - stacklevel=2, - ) - converter = convert - bound_setattr("name", name) bound_setattr("default", default) bound_setattr("validator", validator) bound_setattr("repr", repr) - bound_setattr("cmp", cmp) + bound_setattr("eq", eq) + bound_setattr("order", order) bound_setattr("hash", hash) bound_setattr("init", init) bound_setattr("converter", converter) @@ -1753,16 +1811,6 @@ def __init__( def __setattr__(self, name, value): raise FrozenInstanceError() - @property - def convert(self): - warnings.warn( - "The `convert` attribute is deprecated in favor of `converter`. " - "It will be removed after 2019/01.", - DeprecationWarning, - stacklevel=2, - ) - return self.converter - @classmethod def from_counting_attr(cls, name, ca, type=None): # type holds the annotated value. deal with conflicts: @@ -1781,7 +1829,6 @@ def from_counting_attr(cls, name, ca, type=None): "validator", "default", "type", - "convert", ) # exclude methods and deprecated alias } return cls( @@ -1789,9 +1836,19 @@ def from_counting_attr(cls, name, ca, type=None): validator=ca._validator, default=ca._default, type=type, + cmp=None, **inst_dict ) + @property + def cmp(self): + """ + Simulate the presence of a cmp attribute and warn. + """ + warnings.warn(_CMP_DEPRECATION, DeprecationWarning, stacklevel=2) + + return self.eq and self.order + # Don't use attr.assoc since fields(Attribute) doesn't work def _assoc(self, **changes): """ @@ -1839,16 +1896,17 @@ def _setattrs(self, name_values_pairs): default=NOTHING, validator=None, repr=True, - cmp=True, + cmp=None, + eq=True, + order=False, hash=(name != "metadata"), init=True, ) for name in Attribute.__slots__ - if name != "convert" # XXX: remove once `convert` is gone ] Attribute = _add_hash( - _add_cmp(_add_repr(Attribute, attrs=_a), attrs=_a), + _add_eq(_add_repr(Attribute, attrs=_a), attrs=_a), attrs=[a for a in _a if a.hash], ) @@ -1866,7 +1924,8 @@ class _CountingAttr(object): "counter", "_default", "repr", - "cmp", + "eq", + "order", "hash", "init", "metadata", @@ -1881,22 +1940,34 @@ class _CountingAttr(object): default=NOTHING, validator=None, repr=True, - cmp=True, + cmp=None, hash=True, init=True, kw_only=False, + eq=True, + order=False, + ) + for name in ( + "counter", + "_default", + "repr", + "eq", + "order", + "hash", + "init", ) - for name in ("counter", "_default", "repr", "cmp", "hash", "init") ) + ( Attribute( name="metadata", default=None, validator=None, repr=True, - cmp=True, + cmp=None, hash=False, init=True, kw_only=False, + eq=True, + order=False, ), ) cls_counter = 0 @@ -1906,13 +1977,15 @@ def __init__( default, validator, repr, - cmp, + cmp, # XXX: unused, remove along with cmp hash, init, converter, metadata, type, kw_only, + eq, + order, ): _CountingAttr.cls_counter += 1 self.counter = _CountingAttr.cls_counter @@ -1923,7 +1996,8 @@ def __init__( else: self._validator = validator self.repr = repr - self.cmp = cmp + self.eq = eq + self.order = order self.hash = hash self.init = init self.converter = converter @@ -1963,7 +2037,7 @@ def default(self, meth): return meth -_CountingAttr = _add_cmp(_add_repr(_CountingAttr)) +_CountingAttr = _add_eq(_add_repr(_CountingAttr)) @attrs(slots=True, init=False, hash=True) @@ -1971,7 +2045,7 @@ class Factory(object): """ Stores a factory callable. - If passed as the default value to :func:`attr.ib`, the factory is used to + If passed as the default value to `attr.ib`, the factory is used to generate a new value. :param callable factory: A callable that takes either none or exactly one @@ -2004,15 +2078,15 @@ def make_class(name, attrs, bases=(object,), **attributes_arguments): :param attrs: A list of names or a dictionary of mappings of names to attributes. - If *attrs* is a list or an ordered dict (:class:`dict` on Python 3.6+, - :class:`collections.OrderedDict` otherwise), the order is deduced from + If *attrs* is a list or an ordered dict (`dict` on Python 3.6+, + `collections.OrderedDict` otherwise), the order is deduced from the order of the names or attributes inside *attrs*. Otherwise the order of the definition of the attributes is used. - :type attrs: :class:`list` or :class:`dict` + :type attrs: `list` or `dict` :param tuple bases: Classes that the new class will subclass. - :param attributes_arguments: Passed unmodified to :func:`attr.s`. + :param attributes_arguments: Passed unmodified to `attr.s`. :return: A new class with *attrs*. :rtype: type @@ -2044,6 +2118,14 @@ def make_class(name, attrs, bases=(object,), **attributes_arguments): except (AttributeError, ValueError): pass + # We do it here for proper warnings with meaningful stacklevel. + cmp = attributes_arguments.pop("cmp", None) + attributes_arguments["eq"], attributes_arguments[ + "order" + ] = _determine_eq_order( + cmp, attributes_arguments.get("eq"), attributes_arguments.get("order") + ) + return _attrs(these=cls_dict, **attributes_arguments)(type_) diff --git a/pipenv/vendor/attr/_version_info.py b/pipenv/vendor/attr/_version_info.py new file mode 100644 index 0000000000..014e78a1b4 --- /dev/null +++ b/pipenv/vendor/attr/_version_info.py @@ -0,0 +1,85 @@ +from __future__ import absolute_import, division, print_function + +from functools import total_ordering + +from ._funcs import astuple +from ._make import attrib, attrs + + +@total_ordering +@attrs(eq=False, order=False, slots=True, frozen=True) +class VersionInfo(object): + """ + A version object that can be compared to tuple of length 1--4: + + >>> attr.VersionInfo(19, 1, 0, "final") <= (19, 2) + True + >>> attr.VersionInfo(19, 1, 0, "final") < (19, 1, 1) + True + >>> vi = attr.VersionInfo(19, 2, 0, "final") + >>> vi < (19, 1, 1) + False + >>> vi < (19,) + False + >>> vi == (19, 2,) + True + >>> vi == (19, 2, 1) + False + + .. versionadded:: 19.2 + """ + + year = attrib(type=int) + minor = attrib(type=int) + micro = attrib(type=int) + releaselevel = attrib(type=str) + + @classmethod + def _from_version_string(cls, s): + """ + Parse *s* and return a _VersionInfo. + """ + v = s.split(".") + if len(v) == 3: + v.append("final") + + return cls( + year=int(v[0]), minor=int(v[1]), micro=int(v[2]), releaselevel=v[3] + ) + + def _ensure_tuple(self, other): + """ + Ensure *other* is a tuple of a valid length. + + Returns a possibly transformed *other* and ourselves as a tuple of + the same length as *other*. + """ + + if self.__class__ is other.__class__: + other = astuple(other) + + if not isinstance(other, tuple): + raise NotImplementedError + + if not (1 <= len(other) <= 4): + raise NotImplementedError + + return astuple(self)[: len(other)], other + + def __eq__(self, other): + try: + us, them = self._ensure_tuple(other) + except NotImplementedError: + return NotImplemented + + return us == them + + def __lt__(self, other): + try: + us, them = self._ensure_tuple(other) + except NotImplementedError: + return NotImplemented + + # Since alphabetically "dev0" < "final" < "post1" < "post2", we don't + # have to do anything special with releaselevel for now. + return us < them diff --git a/pipenv/vendor/attr/_version_info.pyi b/pipenv/vendor/attr/_version_info.pyi new file mode 100644 index 0000000000..45ced08633 --- /dev/null +++ b/pipenv/vendor/attr/_version_info.pyi @@ -0,0 +1,9 @@ +class VersionInfo: + @property + def year(self) -> int: ... + @property + def minor(self) -> int: ... + @property + def micro(self) -> int: ... + @property + def releaselevel(self) -> str: ... diff --git a/pipenv/vendor/attr/converters.py b/pipenv/vendor/attr/converters.py index 37c4a07a06..8592897847 100644 --- a/pipenv/vendor/attr/converters.py +++ b/pipenv/vendor/attr/converters.py @@ -32,14 +32,14 @@ def default_if_none(default=NOTHING, factory=None): result of *factory*. :param default: Value to be used if ``None`` is passed. Passing an instance - of :class:`attr.Factory` is supported, however the ``takes_self`` option + of `attr.Factory` is supported, however the ``takes_self`` option is *not*. :param callable factory: A callable that takes not parameters whose result is used if ``None`` is passed. :raises TypeError: If **neither** *default* or *factory* is passed. :raises TypeError: If **both** *default* and *factory* are passed. - :raises ValueError: If an instance of :class:`attr.Factory` is passed with + :raises ValueError: If an instance of `attr.Factory` is passed with ``takes_self=True``. .. versionadded:: 18.2.0 diff --git a/pipenv/vendor/attr/exceptions.py b/pipenv/vendor/attr/exceptions.py index b12e41e97a..d1b76185c9 100644 --- a/pipenv/vendor/attr/exceptions.py +++ b/pipenv/vendor/attr/exceptions.py @@ -6,7 +6,7 @@ class FrozenInstanceError(AttributeError): A frozen/immutable instance has been attempted to be modified. It mirrors the behavior of ``namedtuples`` by using the same error message - and subclassing :exc:`AttributeError`. + and subclassing `AttributeError`. .. versionadded:: 16.1.0 """ @@ -55,3 +55,20 @@ class PythonTooOldError(RuntimeError): .. versionadded:: 18.2.0 """ + + +class NotCallableError(TypeError): + """ + A ``attr.ib()`` requiring a callable has been set with a value + that is not callable. + + .. versionadded:: 19.2.0 + """ + + def __init__(self, msg, value): + super(TypeError, self).__init__(msg, value) + self.msg = msg + self.value = value + + def __str__(self): + return str(self.msg) diff --git a/pipenv/vendor/attr/exceptions.pyi b/pipenv/vendor/attr/exceptions.pyi index 48fffcc1e2..736fde2e1d 100644 --- a/pipenv/vendor/attr/exceptions.pyi +++ b/pipenv/vendor/attr/exceptions.pyi @@ -1,3 +1,5 @@ +from typing import Any + class FrozenInstanceError(AttributeError): msg: str = ... @@ -5,3 +7,9 @@ class AttrsAttributeNotFoundError(ValueError): ... class NotAnAttrsClassError(ValueError): ... class DefaultAlreadySetError(RuntimeError): ... class UnannotatedAttributeError(RuntimeError): ... +class PythonTooOldError(RuntimeError): ... + +class NotCallableError(TypeError): + msg: str = ... + value: Any = ... + def __init__(self, msg: str, value: Any) -> None: ... diff --git a/pipenv/vendor/attr/filters.py b/pipenv/vendor/attr/filters.py index f1c69b8bac..dc47e8fa38 100644 --- a/pipenv/vendor/attr/filters.py +++ b/pipenv/vendor/attr/filters.py @@ -1,5 +1,5 @@ """ -Commonly useful filters for :func:`attr.asdict`. +Commonly useful filters for `attr.asdict`. """ from __future__ import absolute_import, division, print_function @@ -23,9 +23,9 @@ def include(*what): Whitelist *what*. :param what: What to whitelist. - :type what: :class:`list` of :class:`type` or :class:`attr.Attribute`\\ s + :type what: `list` of `type` or `attr.Attribute`\\ s - :rtype: :class:`callable` + :rtype: `callable` """ cls, attrs = _split_what(what) @@ -40,9 +40,9 @@ def exclude(*what): Blacklist *what*. :param what: What to blacklist. - :type what: :class:`list` of classes or :class:`attr.Attribute`\\ s. + :type what: `list` of classes or `attr.Attribute`\\ s. - :rtype: :class:`callable` + :rtype: `callable` """ cls, attrs = _split_what(what) diff --git a/pipenv/vendor/attr/validators.py b/pipenv/vendor/attr/validators.py index 7fc4446be4..839d310c38 100644 --- a/pipenv/vendor/attr/validators.py +++ b/pipenv/vendor/attr/validators.py @@ -4,10 +4,23 @@ from __future__ import absolute_import, division, print_function +import re + from ._make import _AndValidator, and_, attrib, attrs +from .exceptions import NotCallableError -__all__ = ["and_", "in_", "instance_of", "optional", "provides"] +__all__ = [ + "and_", + "deep_iterable", + "deep_mapping", + "in_", + "instance_of", + "is_callable", + "matches_re", + "optional", + "provides", +] @attrs(repr=False, slots=True, hash=True) @@ -40,20 +53,92 @@ def __repr__(self): def instance_of(type): """ - A validator that raises a :exc:`TypeError` if the initializer is called + A validator that raises a `TypeError` if the initializer is called with a wrong type for this particular attribute (checks are performed using - :func:`isinstance` therefore it's also valid to pass a tuple of types). + `isinstance` therefore it's also valid to pass a tuple of types). :param type: The type to check for. :type type: type or tuple of types :raises TypeError: With a human readable error message, the attribute - (of type :class:`attr.Attribute`), the expected type, and the value it + (of type `attr.Attribute`), the expected type, and the value it got. """ return _InstanceOfValidator(type) +@attrs(repr=False, frozen=True) +class _MatchesReValidator(object): + regex = attrib() + flags = attrib() + match_func = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not self.match_func(value): + raise ValueError( + "'{name}' must match regex {regex!r}" + " ({value!r} doesn't)".format( + name=attr.name, regex=self.regex.pattern, value=value + ), + attr, + self.regex, + value, + ) + + def __repr__(self): + return "<matches_re validator for pattern {regex!r}>".format( + regex=self.regex + ) + + +def matches_re(regex, flags=0, func=None): + r""" + A validator that raises `ValueError` if the initializer is called + with a string that doesn't match *regex*. + + :param str regex: a regex string to match against + :param int flags: flags that will be passed to the underlying re function + (default 0) + :param callable func: which underlying `re` function to call (options + are `re.fullmatch`, `re.search`, `re.match`, default + is ``None`` which means either `re.fullmatch` or an emulation of + it on Python 2). For performance reasons, they won't be used directly + but on a pre-`re.compile`\ ed pattern. + + .. versionadded:: 19.2.0 + """ + fullmatch = getattr(re, "fullmatch", None) + valid_funcs = (fullmatch, None, re.search, re.match) + if func not in valid_funcs: + raise ValueError( + "'func' must be one of %s." + % ( + ", ".join( + sorted( + e and e.__name__ or "None" for e in set(valid_funcs) + ) + ), + ) + ) + + pattern = re.compile(regex, flags) + if func is re.match: + match_func = pattern.match + elif func is re.search: + match_func = pattern.search + else: + if fullmatch: + match_func = pattern.fullmatch + else: + pattern = re.compile(r"(?:{})\Z".format(regex), flags) + match_func = pattern.match + + return _MatchesReValidator(pattern, flags, match_func) + + @attrs(repr=False, slots=True, hash=True) class _ProvidesValidator(object): interface = attrib() @@ -81,7 +166,7 @@ def __repr__(self): def provides(interface): """ - A validator that raises a :exc:`TypeError` if the initializer is called + A validator that raises a `TypeError` if the initializer is called with an object that does not provide the requested *interface* (checks are performed using ``interface.providedBy(value)`` (see `zope.interface <https://zopeinterface.readthedocs.io/en/latest/>`_). @@ -89,7 +174,7 @@ def provides(interface): :param zope.interface.Interface interface: The interface to check for. :raises TypeError: With a human readable error message, the attribute - (of type :class:`attr.Attribute`), the expected interface, and the + (of type `attr.Attribute`), the expected interface, and the value it got. """ return _ProvidesValidator(interface) @@ -119,7 +204,7 @@ def optional(validator): :param validator: A validator (or a list of validators) that is used for non-``None`` values. - :type validator: callable or :class:`list` of callables. + :type validator: callable or `list` of callables. .. versionadded:: 15.1.0 .. versionchanged:: 17.1.0 *validator* can be a list of validators. @@ -154,15 +239,15 @@ def __repr__(self): def in_(options): """ - A validator that raises a :exc:`ValueError` if the initializer is called + A validator that raises a `ValueError` if the initializer is called with a value that does not belong in the options provided. The check is performed using ``value in options``. :param options: Allowed options. - :type options: list, tuple, :class:`enum.Enum`, ... + :type options: list, tuple, `enum.Enum`, ... :raises ValueError: With a human readable error message, the attribute (of - type :class:`attr.Attribute`), the expected options, and the value it + type `attr.Attribute`), the expected options, and the value it got. .. versionadded:: 17.1.0 @@ -177,7 +262,16 @@ def __call__(self, inst, attr, value): We use a callable class to be able to change the ``__repr__``. """ if not callable(value): - raise TypeError("'{name}' must be callable".format(name=attr.name)) + message = ( + "'{name}' must be callable " + "(got {value!r} that is a {actual!r})." + ) + raise NotCallableError( + msg=message.format( + name=attr.name, value=value, actual=value.__class__ + ), + value=value, + ) def __repr__(self): return "<is_callable validator>" @@ -185,13 +279,15 @@ def __repr__(self): def is_callable(): """ - A validator that raises a :class:`TypeError` if the initializer is called - with a value for this particular attribute that is not callable. + A validator that raises a `attr.exceptions.NotCallableError` if the + initializer is called with a value for this particular attribute + that is not callable. .. versionadded:: 19.1.0 - :raises TypeError: With a human readable error message containing the - attribute (of type :class:`attr.Attribute`) name. + :raises `attr.exceptions.NotCallableError`: With a human readable error + message containing the attribute (`attr.Attribute`) name, + and the value it got. """ return _IsCallableValidator() diff --git a/pipenv/vendor/attr/validators.pyi b/pipenv/vendor/attr/validators.pyi index 01af06845e..9a22abb197 100644 --- a/pipenv/vendor/attr/validators.pyi +++ b/pipenv/vendor/attr/validators.pyi @@ -1,24 +1,66 @@ -from typing import Container, List, Union, TypeVar, Type, Any, Optional, Tuple +from typing import ( + Container, + List, + Union, + TypeVar, + Type, + Any, + Optional, + Tuple, + Iterable, + Mapping, + Callable, + Match, + AnyStr, + overload, +) from . import _ValidatorType _T = TypeVar("_T") +_T1 = TypeVar("_T1") +_T2 = TypeVar("_T2") +_T3 = TypeVar("_T3") +_I = TypeVar("_I", bound=Iterable) +_K = TypeVar("_K") +_V = TypeVar("_V") +_M = TypeVar("_M", bound=Mapping) +# To be more precise on instance_of use some overloads. +# If there are more than 3 items in the tuple then we fall back to Any +@overload +def instance_of(type: Type[_T]) -> _ValidatorType[_T]: ... +@overload +def instance_of(type: Tuple[Type[_T]]) -> _ValidatorType[_T]: ... +@overload def instance_of( - type: Union[Tuple[Type[_T], ...], Type[_T]] -) -> _ValidatorType[_T]: ... + type: Tuple[Type[_T1], Type[_T2]] +) -> _ValidatorType[Union[_T1, _T2]]: ... +@overload +def instance_of( + type: Tuple[Type[_T1], Type[_T2], Type[_T3]] +) -> _ValidatorType[Union[_T1, _T2, _T3]]: ... +@overload +def instance_of(type: Tuple[type, ...]) -> _ValidatorType[Any]: ... def provides(interface: Any) -> _ValidatorType[Any]: ... def optional( validator: Union[_ValidatorType[_T], List[_ValidatorType[_T]]] ) -> _ValidatorType[Optional[_T]]: ... def in_(options: Container[_T]) -> _ValidatorType[_T]: ... def and_(*validators: _ValidatorType[_T]) -> _ValidatorType[_T]: ... +def matches_re( + regex: AnyStr, + flags: int = ..., + func: Optional[ + Callable[[AnyStr, AnyStr, int], Optional[Match[AnyStr]]] + ] = ..., +) -> _ValidatorType[AnyStr]: ... def deep_iterable( member_validator: _ValidatorType[_T], - iterable_validator: Optional[_ValidatorType[_T]], -) -> _ValidatorType[_T]: ... + iterable_validator: Optional[_ValidatorType[_I]] = ..., +) -> _ValidatorType[_I]: ... def deep_mapping( - key_validator: _ValidatorType[_T], - value_validator: _ValidatorType[_T], - mapping_validator: Optional[_ValidatorType[_T]], -) -> _ValidatorType[_T]: ... + key_validator: _ValidatorType[_K], + value_validator: _ValidatorType[_V], + mapping_validator: Optional[_ValidatorType[_M]] = ..., +) -> _ValidatorType[_M]: ... def is_callable() -> _ValidatorType[_T]: ... diff --git a/pipenv/vendor/backports/__init__.py b/pipenv/vendor/backports/__init__.py index e449e521e1..cf89760242 100644 --- a/pipenv/vendor/backports/__init__.py +++ b/pipenv/vendor/backports/__init__.py @@ -1,5 +1,5 @@ __path__ = __import__('pkgutil').extend_path(__path__, __name__) -from . import weakref from . import shutil_get_terminal_size +from . import weakref from . import enum from . import functools_lru_cache diff --git a/pipenv/vendor/cerberus/errors.py b/pipenv/vendor/cerberus/errors.py index 14e27eb84b..41564cab56 100644 --- a/pipenv/vendor/cerberus/errors.py +++ b/pipenv/vendor/cerberus/errors.py @@ -459,7 +459,7 @@ class BasicErrorHandler(BaseErrorHandler): 0x23: "null value not allowed", 0x24: "must be of {constraint} type", 0x25: "must be of dict type", - 0x26: "length of list should be {constraint}, it is {0}", + 0x26: "length of list should be {0}, it is {1}", 0x27: "min length is {constraint}", 0x28: "max length is {constraint}", 0x41: "value does not match regex '{constraint}'", diff --git a/pipenv/vendor/cerberus/schema.py b/pipenv/vendor/cerberus/schema.py index 305e59ff56..3841384e4b 100644 --- a/pipenv/vendor/cerberus/schema.py +++ b/pipenv/vendor/cerberus/schema.py @@ -111,7 +111,10 @@ def __setitem__(self, key, value): self.schema[key] = value def __str__(self): - return str(self.schema) + if hasattr(self, "schema"): + return str(self.schema) + else: + return "No schema data is set yet." def copy(self): return self.__class__(self.validator, self.schema.copy()) @@ -142,13 +145,13 @@ def is_of_rule(x): ('allof_', 'anyof_', 'noneof_', 'oneof_') ) - for field in schema: - for of_rule in (x for x in schema[field] if is_of_rule(x)): + for field, rules in schema.items(): + for of_rule in [x for x in rules if is_of_rule(x)]: operator, rule = of_rule.split('_', 1) - schema[field].update({operator: []}) - for value in schema[field][of_rule]: - schema[field][operator].append({rule: value}) - del schema[field][of_rule] + rules.update({operator: []}) + for value in rules[of_rule]: + rules[operator].append({rule: value}) + del rules[of_rule] return schema @classmethod diff --git a/pipenv/vendor/cerberus/tests/test_errors.py b/pipenv/vendor/cerberus/tests/test_errors.py index e4d9b37a14..d64da78e74 100644 --- a/pipenv/vendor/cerberus/tests/test_errors.py +++ b/pipenv/vendor/cerberus/tests/test_errors.py @@ -321,3 +321,16 @@ def test_basic_error_of_errors(validator): }, ] } + + +def test_wrong_amount_of_items(validator): + # https://github.com/pyeve/cerberus/issues/505 + validator.schema = { + 'test_list': { + 'type': 'list', + 'required': True, + 'items': [{'type': 'string'}, {'type': 'string'}], + } + } + validator({'test_list': ['test']}) + assert validator.errors == {'test_list': ["length of list should be 2, it is 1"]} diff --git a/pipenv/vendor/cerberus/tests/test_schema.py b/pipenv/vendor/cerberus/tests/test_schema.py index 84e5094600..509f446f77 100644 --- a/pipenv/vendor/cerberus/tests/test_schema.py +++ b/pipenv/vendor/cerberus/tests/test_schema.py @@ -88,7 +88,7 @@ def test_validated_schema_cache(): v = Validator({'foozifix': {'coerce': int}}) assert len(v._valid_schemas) == cache_size - max_cache_size = 160 + max_cache_size = 161 assert cache_size <= max_cache_size, ( "There's an unexpected high amount (%s) of cached valid " "definition schemas. Unless you added further tests, " diff --git a/pipenv/vendor/certifi/__init__.py b/pipenv/vendor/certifi/__init__.py index 632db8e132..0d59a05630 100644 --- a/pipenv/vendor/certifi/__init__.py +++ b/pipenv/vendor/certifi/__init__.py @@ -1,3 +1,3 @@ from .core import where -__version__ = "2019.03.09" +__version__ = "2019.11.28" diff --git a/pipenv/vendor/certifi/cacert.pem b/pipenv/vendor/certifi/cacert.pem index 84636dde7d..a4758ef3af 100644 --- a/pipenv/vendor/certifi/cacert.pem +++ b/pipenv/vendor/certifi/cacert.pem @@ -771,36 +771,6 @@ vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep +OkuE6N36B9K -----END CERTIFICATE----- -# Issuer: CN=Class 2 Primary CA O=Certplus -# Subject: CN=Class 2 Primary CA O=Certplus -# Label: "Certplus Class 2 Primary CA" -# Serial: 177770208045934040241468760488327595043 -# MD5 Fingerprint: 88:2c:8c:52:b8:a2:3c:f3:f7:bb:03:ea:ae:ac:42:0b -# SHA1 Fingerprint: 74:20:74:41:72:9c:dd:92:ec:79:31:d8:23:10:8d:c2:81:92:e2:bb -# SHA256 Fingerprint: 0f:99:3c:8a:ef:97:ba:af:56:87:14:0e:d5:9a:d1:82:1b:b4:af:ac:f0:aa:9a:58:b5:d5:7a:33:8a:3a:fb:cb ------BEGIN CERTIFICATE----- -MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAw -PTELMAkGA1UEBhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFz -cyAyIFByaW1hcnkgQ0EwHhcNOTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9 -MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2VydHBsdXMxGzAZBgNVBAMTEkNsYXNz -IDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANxQ -ltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR5aiR -VhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyL -kcAbmXuZVg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCd -EgETjdyAYveVqUSISnFOYFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yas -H7WLO7dDWWuwJKZtkIvEcupdM5i3y95ee++U8Rs+yskhwcWYAqqi9lt3m/V+llU0 -HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRMECDAGAQH/AgEKMAsGA1Ud -DwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJYIZIAYb4 -QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMu -Y29tL0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/ -AN9WM2K191EBkOvDP9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8 -yfFC82x/xXp8HVGIutIKPidd3i1RTtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMR -FcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+7UCmnYR0ObncHoUW2ikbhiMA -ybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW//1IMwrh3KWB -kJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7 -l7+ijrRU ------END CERTIFICATE----- - # Issuer: CN=DST Root CA X3 O=Digital Signature Trust Co. # Subject: CN=DST Root CA X3 O=Digital Signature Trust Co. # Label: "DST Root CA X3" @@ -1219,36 +1189,6 @@ t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== -----END CERTIFICATE----- -# Issuer: CN=Deutsche Telekom Root CA 2 O=Deutsche Telekom AG OU=T-TeleSec Trust Center -# Subject: CN=Deutsche Telekom Root CA 2 O=Deutsche Telekom AG OU=T-TeleSec Trust Center -# Label: "Deutsche Telekom Root CA 2" -# Serial: 38 -# MD5 Fingerprint: 74:01:4a:91:b1:08:c4:58:ce:47:cd:f0:dd:11:53:08 -# SHA1 Fingerprint: 85:a4:08:c0:9c:19:3e:5d:51:58:7d:cd:d6:13:30:fd:8c:de:37:bf -# SHA256 Fingerprint: b6:19:1a:50:d0:c3:97:7f:7d:a9:9b:cd:aa:c8:6a:22:7d:ae:b9:67:9e:c7:0b:a3:b0:c9:d9:22:71:c1:70:d3 ------BEGIN CERTIFICATE----- -MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEc -MBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2Vj -IFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENB -IDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5MjM1OTAwWjBxMQswCQYDVQQGEwJE -RTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxl -U2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290 -IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEU -ha88EOQ5bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhC -QN/Po7qCWWqSG6wcmtoIKyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1Mjwr -rFDa1sPeg5TKqAyZMg4ISFZbavva4VhYAUlfckE8FQYBjl2tqriTtM2e66foai1S -NNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aKSe5TBY8ZTNXeWHmb0moc -QqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTVjlsB9WoH -txa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAP -BgNVHRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOC -AQEAlGRZrTlk5ynrE/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756Abrsp -tJh6sTtU6zkXR34ajgv8HzFZMQSyzhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpa -IzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8rZ7/gFnkm0W09juwzTkZmDLl -6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4Gdyd1Lx+4ivn+ -xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU -Cm26OWMohpLzGITY+9HPBVZkVw== ------END CERTIFICATE----- - # Issuer: CN=Cybertrust Global Root O=Cybertrust, Inc # Subject: CN=Cybertrust Global Root O=Cybertrust, Inc # Label: "Cybertrust Global Root" @@ -3453,46 +3393,6 @@ AAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3CekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ 5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su -----END CERTIFICATE----- -# Issuer: CN=Certinomis - Root CA O=Certinomis OU=0002 433998903 -# Subject: CN=Certinomis - Root CA O=Certinomis OU=0002 433998903 -# Label: "Certinomis - Root CA" -# Serial: 1 -# MD5 Fingerprint: 14:0a:fd:8d:a8:28:b5:38:69:db:56:7e:61:22:03:3f -# SHA1 Fingerprint: 9d:70:bb:01:a5:a4:a0:18:11:2e:f7:1c:01:b9:32:c5:34:e7:88:a8 -# SHA256 Fingerprint: 2a:99:f5:bc:11:74:b7:3c:bb:1d:62:08:84:e0:1c:34:e5:1c:cb:39:78:da:12:5f:0e:33:26:88:83:bf:41:58 ------BEGIN CERTIFICATE----- -MIIFkjCCA3qgAwIBAgIBATANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJGUjET -MBEGA1UEChMKQ2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxHTAb -BgNVBAMTFENlcnRpbm9taXMgLSBSb290IENBMB4XDTEzMTAyMTA5MTcxOFoXDTMz -MTAyMTA5MTcxOFowWjELMAkGA1UEBhMCRlIxEzARBgNVBAoTCkNlcnRpbm9taXMx -FzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMR0wGwYDVQQDExRDZXJ0aW5vbWlzIC0g -Um9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANTMCQosP5L2 -fxSeC5yaah1AMGT9qt8OHgZbn1CF6s2Nq0Nn3rD6foCWnoR4kkjW4znuzuRZWJfl -LieY6pOod5tK8O90gC3rMB+12ceAnGInkYjwSond3IjmFPnVAy//ldu9n+ws+hQV -WZUKxkd8aRi5pwP5ynapz8dvtF4F/u7BUrJ1Mofs7SlmO/NKFoL21prbcpjp3vDF -TKWrteoB4owuZH9kb/2jJZOLyKIOSY008B/sWEUuNKqEUL3nskoTuLAPrjhdsKkb -5nPJWqHZZkCqqU2mNAKthH6yI8H7KsZn9DS2sJVqM09xRLWtwHkziOC/7aOgFLSc -CbAK42C++PhmiM1b8XcF4LVzbsF9Ri6OSyemzTUK/eVNfaoqoynHWmgE6OXWk6Ri -wsXm9E/G+Z8ajYJJGYrKWUM66A0ywfRMEwNvbqY/kXPLynNvEiCL7sCCeN5LLsJJ -wx3tFvYk9CcbXFcx3FXuqB5vbKziRcxXV4p1VxngtViZSTYxPDMBbRZKzbgqg4SG -m/lg0h9tkQPTYKbVPZrdd5A9NaSfD171UkRpucC63M9933zZxKyGIjK8e2uR73r4 -F2iw4lNVYC2vPsKD2NkJK/DAZNuHi5HMkesE/Xa0lZrmFAYb1TQdvtj/dBxThZng -WVJKYe2InmtJiUZ+IFrZ50rlau7SZRFDAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIB -BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTvkUz1pcMw6C8I6tNxIqSSaHh0 -2TAfBgNVHSMEGDAWgBTvkUz1pcMw6C8I6tNxIqSSaHh02TANBgkqhkiG9w0BAQsF -AAOCAgEAfj1U2iJdGlg+O1QnurrMyOMaauo++RLrVl89UM7g6kgmJs95Vn6RHJk/ -0KGRHCwPT5iVWVO90CLYiF2cN/z7ZMF4jIuaYAnq1fohX9B0ZedQxb8uuQsLrbWw -F6YSjNRieOpWauwK0kDDPAUwPk2Ut59KA9N9J0u2/kTO+hkzGm2kQtHdzMjI1xZS -g081lLMSVX3l4kLr5JyTCcBMWwerx20RoFAXlCOotQqSD7J6wWAsOMwaplv/8gzj -qh8c3LigkyfeY+N/IZ865Z764BNqdeuWXGKRlI5nU7aJ+BIJy29SWwNyhlCVCNSN -h4YVH5Uk2KRvms6knZtt0rJ2BobGVgjF6wnaNsIbW0G+YSrjcOa4pvi2WsS9Iff/ -ql+hbHY5ZtbqTFXhADObE5hjyW/QASAJN1LnDE8+zbz1X5YnpyACleAu6AdBBR8V -btaw5BngDwKTACdyxYvRVB9dSsNAl35VpnzBMwQUAR1JIGkLGZOdblgi90AMRgwj -Y/M50n92Uaf0yKHxDHYiI0ZSKS3io0EHVmmY0gUJvGnHWmHNj4FgFU2A3ZDifcRQ -8ow7bkrHxuaAKzyBvBGAFhAn1/DNP3nMcyrDflOR1m749fPH0FFNjkulW+YZFzvW -gQncItzujrnEj1PhZ7szuIgVRs/taTX/dQ1G885x4cVrhkIGuUE= ------END CERTIFICATE----- - # Issuer: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed # Subject: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed # Label: "OISTE WISeKey Global Root GB CA" @@ -4656,3 +4556,47 @@ L5/ndtFhKvshuzHQqp9HpLIiyhY6UFfEW0NnxWViA0kB60PZ2Pierc+xYw5F9KBa LJstxabArahH9CdMOA0uG0k7UvToiIMrVCjU8jVStDKDYmlkDJGcn5fqdBb9HxEG mpv0 -----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority - G4 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2015 Entrust, Inc. - for authorized use only +# Subject: CN=Entrust Root Certification Authority - G4 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2015 Entrust, Inc. - for authorized use only +# Label: "Entrust Root Certification Authority - G4" +# Serial: 289383649854506086828220374796556676440 +# MD5 Fingerprint: 89:53:f1:83:23:b7:7c:8e:05:f1:8c:71:38:4e:1f:88 +# SHA1 Fingerprint: 14:88:4e:86:26:37:b0:26:af:59:62:5c:40:77:ec:35:29:ba:96:01 +# SHA256 Fingerprint: db:35:17:d1:f6:73:2a:2d:5a:b9:7c:53:3e:c7:07:79:ee:32:70:a6:2f:b4:ac:42:38:37:24:60:e6:f0:1e:88 +-----BEGIN CERTIFICATE----- +MIIGSzCCBDOgAwIBAgIRANm1Q3+vqTkPAAAAAFVlrVgwDQYJKoZIhvcNAQELBQAw +gb4xCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQL +Ex9TZWUgd3d3LmVudHJ1c3QubmV0L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykg +MjAxNSBFbnRydXN0LCBJbmMuIC0gZm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMjAw +BgNVBAMTKUVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEc0 +MB4XDTE1MDUyNzExMTExNloXDTM3MTIyNzExNDExNlowgb4xCzAJBgNVBAYTAlVT +MRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1 +c3QubmV0L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxNSBFbnRydXN0LCBJ +bmMuIC0gZm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMjAwBgNVBAMTKUVudHJ1c3Qg +Um9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEc0MIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEAsewsQu7i0TD/pZJH4i3DumSXbcr3DbVZwbPLqGgZ +2K+EbTBwXX7zLtJTmeH+H17ZSK9dE43b/2MzTdMAArzE+NEGCJR5WIoV3imz/f3E +T+iq4qA7ec2/a0My3dl0ELn39GjUu9CH1apLiipvKgS1sqbHoHrmSKvS0VnM1n4j +5pds8ELl3FFLFUHtSUrJ3hCX1nbB76W1NhSXNdh4IjVS70O92yfbYVaCNNzLiGAM +C1rlLAHGVK/XqsEQe9IFWrhAnoanw5CGAlZSCXqc0ieCU0plUmr1POeo8pyvi73T +DtTUXm6Hnmo9RR3RXRv06QqsYJn7ibT/mCzPfB3pAqoEmh643IhuJbNsZvc8kPNX +wbMv9W3y+8qh+CmdRouzavbmZwe+LGcKKh9asj5XxNMhIWNlUpEbsZmOeX7m640A +2Vqq6nPopIICR5b+W45UYaPrL0swsIsjdXJ8ITzI9vF01Bx7owVV7rtNOzK+mndm +nqxpkCIHH2E6lr7lmk/MBTwoWdPBDFSoWWG9yHJM6Nyfh3+9nEg2XpWjDrk4JFX8 +dWbrAuMINClKxuMrLzOg2qOGpRKX/YAr2hRC45K9PvJdXmd0LhyIRyk0X+IyqJwl +N4y6mACXi0mWHv0liqzc2thddG5msP9E36EYxr5ILzeUePiVSj9/E15dWf10hkNj +c0kCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD +VR0OBBYEFJ84xFYjwznooHFs6FRM5Og6sb9nMA0GCSqGSIb3DQEBCwUAA4ICAQAS +5UKme4sPDORGpbZgQIeMJX6tuGguW8ZAdjwD+MlZ9POrYs4QjbRaZIxowLByQzTS +Gwv2LFPSypBLhmb8qoMi9IsabyZIrHZ3CL/FmFz0Jomee8O5ZDIBf9PD3Vht7LGr +hFV0d4QEJ1JrhkzO3bll/9bGXp+aEJlLdWr+aumXIOTkdnrG0CSqkM0gkLpHZPt/ +B7NTeLUKYvJzQ85BK4FqLoUWlFPUa19yIqtRLULVAJyZv967lDtX/Zr1hstWO1uI +AeV8KEsD+UmDfLJ/fOPtjqF/YFOOVZ1QNBIPt5d7bIdKROf1beyAN/BYGW5KaHbw +H5Lk6rWS02FREAutp9lfx1/cH6NcjKF+m7ee01ZvZl4HliDtC3T7Zk6LERXpgUl+ +b7DUUH8i119lAg2m9IUe2K4GS0qn0jFmwvjO5QimpAKWRGhXxNUzzxkvFMSUHHuk +2fCfDrGA4tGeEWSpiBE6doLlYsKA2KSD7ZPvfC+QsDJMlhVoSFLUmQjAJOgc47Ol +IQ6SwJAfzyBfyjs4x7dtOvPmRLgOMWuIjnDrnBdSqEGULoe256YSxXXfW8AKbnuk +5F6G+TaU33fD6Q3AOfF5u0aOq0NZJ7cguyPpVkAh7DE9ZapD8j3fcEThuk0mEDuY +n/PIjhs4ViFqUZPTkcpG2om3PVODLAgfi49T3f+sHw== +-----END CERTIFICATE----- diff --git a/pipenv/vendor/click_completion/__init__.py b/pipenv/vendor/click_completion/__init__.py index 620d79261e..4d6444c73f 100644 --- a/pipenv/vendor/click_completion/__init__.py +++ b/pipenv/vendor/click_completion/__init__.py @@ -19,7 +19,7 @@ from click_completion.lib import get_auto_shell from click_completion.patch import patch as _patch -__version__ = '0.5.1' +__version__ = '0.5.2' _initialized = False diff --git a/pipenv/vendor/click_completion/core.py b/pipenv/vendor/click_completion/core.py index 36150d149e..74da12d022 100644 --- a/pipenv/vendor/click_completion/core.py +++ b/pipenv/vendor/click_completion/core.py @@ -121,6 +121,9 @@ def get_choices(cli, prog_name, args, incomplete): else: for param in ctx.command.get_params(ctx): if (completion_configuration.complete_options or incomplete and not incomplete[:1].isalnum()) and isinstance(param, Option): + # filter hidden click.Option + if getattr(param, 'hidden', False): + continue for opt in param.opts: if match(opt, incomplete): choices.append((opt, param.help)) @@ -131,9 +134,8 @@ def get_choices(cli, prog_name, args, incomplete): choices.append((opt, None)) if isinstance(ctx.command, MultiCommand): for name in ctx.command.list_commands(ctx): - command = ctx.command.get_command(ctx, name) - if match(name, incomplete) and not command.hidden: - choices.append((name, command.get_short_help_str())) + if match(name, incomplete): + choices.append((name, ctx.command.get_command_short_help(ctx, name))) for item, help in choices: yield (item, help) diff --git a/pipenv/vendor/click_completion/lib.py b/pipenv/vendor/click_completion/lib.py index 167ecfd8f9..fc195cd403 100644 --- a/pipenv/vendor/click_completion/lib.py +++ b/pipenv/vendor/click_completion/lib.py @@ -121,7 +121,5 @@ def split_args(line): def get_auto_shell(): - """Returns the current shell - - This feature depends on psutil and will not work if it is not available""" + """Returns the current shell""" return shellingham.detect_shell()[0] diff --git a/pipenv/vendor/click_completion/patch.py b/pipenv/vendor/click_completion/patch.py index ab351f45f5..409c192e8a 100644 --- a/pipenv/vendor/click_completion/patch.py +++ b/pipenv/vendor/click_completion/patch.py @@ -75,7 +75,29 @@ def multicommand_get_command_short_help(self, ctx, cmd_name): str The sub command short help """ - return self.get_command(ctx, cmd_name).short_help + return self.get_command(ctx, cmd_name).get_short_help_str() + + +def multicommand_get_command_hidden(self, ctx, cmd_name): + """Returns the short help of a subcommand + + It allows MultiCommand subclasses to implement more efficient ways to provide the subcommand hidden attribute, for + example by leveraging some caching. + + Parameters + ---------- + ctx : click.core.Context + The current context + cmd_name : + The sub command name + + Returns + ------- + bool + The sub command hidden status + """ + cmd = self.get_command(ctx, cmd_name) + return cmd.hidden if cmd else False def _shellcomplete(cli, prog_name, complete_var=None): @@ -139,4 +161,5 @@ def patch(): click.types.ParamType.complete = param_type_complete click.types.Choice.complete = choice_complete click.core.MultiCommand.get_command_short_help = multicommand_get_command_short_help + click.core.MultiCommand.get_command_hidden = multicommand_get_command_hidden click.core._bashcomplete = _shellcomplete diff --git a/pipenv/vendor/distlib/__init__.py b/pipenv/vendor/distlib/__init__.py index 08fe1fc48f..e19aebdc4c 100644 --- a/pipenv/vendor/distlib/__init__.py +++ b/pipenv/vendor/distlib/__init__.py @@ -1,12 +1,12 @@ # -*- coding: utf-8 -*- # -# Copyright (C) 2012-2017 Vinay Sajip. +# Copyright (C) 2012-2019 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # import logging -__version__ = '0.2.9' +__version__ = '0.3.0' class DistlibException(Exception): pass diff --git a/pipenv/vendor/distlib/_backport/sysconfig.py b/pipenv/vendor/distlib/_backport/sysconfig.py index 1df3aba144..b470a373c8 100644 --- a/pipenv/vendor/distlib/_backport/sysconfig.py +++ b/pipenv/vendor/distlib/_backport/sysconfig.py @@ -119,11 +119,9 @@ def _replacer(matchobj): #_expand_globals(_SCHEMES) - # FIXME don't rely on sys.version here, its format is an implementation detail - # of CPython, use sys.version_info or sys.hexversion -_PY_VERSION = sys.version.split()[0] -_PY_VERSION_SHORT = sys.version[:3] -_PY_VERSION_SHORT_NO_DOT = _PY_VERSION[0] + _PY_VERSION[2] +_PY_VERSION = '%s.%s.%s' % sys.version_info[:3] +_PY_VERSION_SHORT = '%s.%s' % sys.version_info[:2] +_PY_VERSION_SHORT_NO_DOT = '%s%s' % sys.version_info[:2] _PREFIX = os.path.normpath(sys.prefix) _EXEC_PREFIX = os.path.normpath(sys.exec_prefix) _CONFIG_VARS = None diff --git a/pipenv/vendor/distlib/database.py b/pipenv/vendor/distlib/database.py index b13cdac92b..c16c0c8d9e 100644 --- a/pipenv/vendor/distlib/database.py +++ b/pipenv/vendor/distlib/database.py @@ -567,7 +567,7 @@ def __init__(self, path, metadata=None, env=None): p = os.path.join(path, 'top_level.txt') if os.path.exists(p): with open(p, 'rb') as f: - data = f.read() + data = f.read().decode('utf-8') self.modules = data.splitlines() def __repr__(self): diff --git a/pipenv/vendor/distlib/locators.py b/pipenv/vendor/distlib/locators.py index a7ed9469d8..12a1d06351 100644 --- a/pipenv/vendor/distlib/locators.py +++ b/pipenv/vendor/distlib/locators.py @@ -304,18 +304,25 @@ def same_project(name1, name2): def _get_digest(self, info): """ - Get a digest from a dictionary by looking at keys of the form - 'algo_digest'. + Get a digest from a dictionary by looking at a "digests" dictionary + or keys of the form 'algo_digest'. Returns a 2-tuple (algo, digest) if found, else None. Currently looks only for SHA256, then MD5. """ result = None - for algo in ('sha256', 'md5'): - key = '%s_digest' % algo - if key in info: - result = (algo, info[key]) - break + if 'digests' in info: + digests = info['digests'] + for algo in ('sha256', 'md5'): + if algo in digests: + result = (algo, digests[algo]) + break + if not result: + for algo in ('sha256', 'md5'): + key = '%s_digest' % algo + if key in info: + result = (algo, info[key]) + break return result def _update_version_data(self, result, info): diff --git a/pipenv/vendor/distlib/scripts.py b/pipenv/vendor/distlib/scripts.py index 5965e241d6..5185974186 100644 --- a/pipenv/vendor/distlib/scripts.py +++ b/pipenv/vendor/distlib/scripts.py @@ -172,8 +172,16 @@ def _get_shebang(self, encoding, post_interp=b'', options=None): if sys.platform.startswith('java'): # pragma: no cover executable = self._fix_jython_executable(executable) - # Normalise case for Windows - executable = os.path.normcase(executable) + + # Normalise case for Windows - COMMENTED OUT + # executable = os.path.normcase(executable) + # N.B. The normalising operation above has been commented out: See + # issue #124. Although paths in Windows are generally case-insensitive, + # they aren't always. For example, a path containing a ẞ (which is a + # LATIN CAPITAL LETTER SHARP S - U+1E9E) is normcased to ß (which is a + # LATIN SMALL LETTER SHARP S' - U+00DF). The two are not considered by + # Windows as equivalent in path names. + # If the user didn't specify an executable, it may be necessary to # cater for executable paths with spaces (not uncommon on Windows) if enquote: @@ -285,9 +293,10 @@ def _make_script(self, entry, filenames, options=None): if '' in self.variants: scriptnames.add(name) if 'X' in self.variants: - scriptnames.add('%s%s' % (name, sys.version[0])) + scriptnames.add('%s%s' % (name, sys.version_info[0])) if 'X.Y' in self.variants: - scriptnames.add('%s-%s' % (name, sys.version[:3])) + scriptnames.add('%s-%s.%s' % (name, sys.version_info[0], + sys.version_info[1])) if options and options.get('gui', False): ext = 'pyw' else: @@ -367,8 +376,12 @@ def _get_launcher(self, kind): # Issue 31: don't hardcode an absolute package name, but # determine it relative to the current package distlib_package = __name__.rsplit('.', 1)[0] - result = finder(distlib_package).find(name).bytes - return result + resource = finder(distlib_package).find(name) + if not resource: + msg = ('Unable to find resource %s in package %s' % (name, + distlib_package)) + raise ValueError(msg) + return resource.bytes # Public API follows diff --git a/pipenv/vendor/distlib/t32.exe b/pipenv/vendor/distlib/t32.exe index a09d926872d84ae22a617dfe9ebb560d420b37de..8932a18e4596952373a38c60b81b7116d4ef9ee8 100644 GIT binary patch delta 28766 zcmd?Sdtg(=_AkDZz5=AAKnsPIw$K*JJ8hCSY1%XhEd^T)6k4i01!{~ahzX*Hl5)~2 zaZ?4y2R={`P*jcwdHVnhMG6Xn$Wav3dgS2LaI^?Q)yn>T)+8-@&i&r|`{Vw1i)+uC zHEXYVtywd(W_GMO9Z_>7Vs)OXbxr;l@2WSCEje=XrSuNC?S<U*qlkY}+9kb3gn8*r zBGjb6iSWRd%=F_3kDT0?{*<6=L;U7YSRlg5cg}T^OzA)V9EFlpqKc5bgMXinq*RgI zEU8u9R9z+MQ$*Jbk`$JOFb2@dhN_agoK#6txFqGYv&pJK-Sx35DUbMOtE5eiH3NES z8&uNDE_nHpBqgpHkkH$qQ%QE>==f`uq-m-(1NyO6l_tC2X^=@cWeBl<{7Zq<k&EZe zanF&Y?!%FSH>Ht)FW5+Ra@Q0Tgx=JsScN*N3ko1jMl_x+SNH3Fp*v6%MR2%wysW-e zwMMUbJUr5?k)#c^14m1S_x*+KzG{u+DXcGyaVQ>beO>!CMR6)&lH{baIQSijlH@xS z8%%IG^SX|fxJw6GgWbXRb)!VBOQpm~t#(N+Z8sd}ccbl&Hcpi<uDBQu5&xYNO9<~X z<E8QrxxXoj!?~2?N*!_!#{DnkhIPo5OEnb+rM{@$q1zwPFmklyn8a708BmhC26w_+ z4I|lO;i>lW3g=o#tuJghD9+hdu=%Uo{nf4fP6#p_wKnOVIN)3lj!8UOQ52WDLh-co zO9_%h;gxuHDLmn3(Ap?^C`fjOqdi@!GJYaflFH;(Zo!+rLn+oKcOSWRSYudWn=9g( z%KGZlmpA(hx%^XL`F?ajcsZXQ&E`d<c;^J=l%vT!SNJ`Uf)H{(y{;&7tD(VqP4{Gj z^Ieq6@44vUQ;A%7aaiFMR|I5Ng87*n!nqSQ@f{j2m!1zRZFWgRO1a{}f9<}nBz~=* zsK|R%;tDFV3YmFP?;{Bf`<s!$wKL@o{`2pOQYClW+YW;7P+FB$<B9OkXbnt}3wcC4 za(_T=XmCl;jduQWP@)jFtCu9nZ76S3LW2;r6@nn3er&Q5=w2nqyhReLtUcUO#n#6A zeh7s53tJVBx1A^V$D4(%m9u6FCGlO<mb*qO9*yCu-_yqb1>LNyL^t*?cRKS&2eq&q z$>mC%1}cUWcTwQp3a8YKy7r0#-Rt0KqE)ZRqN|<}st{|v?ipWS*yeaI2AtHjfs}IR zot-jKqZ=|kvBX(X&S#>Hphniip3{La!_^|Hb|i*!<t+Jdu)O7Rs}dKBlK2KmF11VL zlFCVxLy0S<5ztz9)uUD7?5!+0Dt=nwXew|QYQMNdBpVv+GiYS^&bO_&c&>989`e_w zok2`4)RdJ&wJR$8W786qI5SB+Q$IE>#^G9oV5!5^rA&#_x3b5g;-cbFF9+|%UXSYU z1*6j};sFdN*VvimU^iAcc-9Q6P^{mbmLZ$+z~;KDeCj~rJbnee+Z3KQO}w@fuT@sM zehwx&`DejIw@7?Cn0O_axPZSMOk6DzA4FmtUR62&(aSD1jZK>_LF1ITv8`;nCV@(3 zZSv$(lh~z-x5%hWs(&Jl!!}Kx<Ul{7x0JXvB-YoaHG}F<+-dR7ZIE5TU;LGnDuyWX z*tE2=jZU<v5|;vQ(9l99z+6fYk0%+_R>5cnqR#NaT56<&kW*O+dQ8wz9YY^XLQ~86 z#h2{TI~m&1><3MndR-3|+hy2nBi<?FbI>7%qje3i5@k*YNRxOr#MJ*KUbiR8cPPFJ zJ=@Eraq!VSLMj`9?hRnLlsTUexkic7Ykf^>yg}^UiM?kJ_F|Vl6MjLxNzpwT-(hu? zCDQYqZcikoH-{438bd>sKN6`Gd{n!llwCXHJO;T5ic6Zd;yY4gm<k7f?D4LByizn( z^WI++CHT@IE#C7rs*$%#=PX8Q8NVw^=r+ocT;=;q(m;1Sczw2kWn^)jLVz9up#CTc z6?YI7$JS9C;`T=dL>-275W688aI?R3?d{b={taV~)>Y!d(FavN^FVjFD<u%&8t7{X zubkxIIZy&ocWqPM={7})MP~=n>KCa4Jt)h!F%1YC5R#6vc)s)RBF>;S+fUZK+yzY- zE;7ic__Tp;jeplUFa@HU>L|P&R3BH%Pg2Biw0>PP!YaS^ro+KmgMW#*e8$jFcb1x9 zuV{k)pjFoU$Z8s{R>~h*&#=3?CFh+Ki5D<AR#sNj*Ij|Na~zJ8hFvZ%b7`C{zmpOh zQ5dq}hO^eoB(Pn)v;7)7+%3)UH_XPOQo;UC@_Z*+Kh>Gh9Ub`uI?^SCyVSFSZM>{I z>#yzWoz{_@Ba(w@B^|=LhFzo1vNn2necc%p-BIpeM<FkR%8-GsQ>ZAbAjQG2TqXUm z2u9~#5=;v86{s%VZDkcgPxyD}92j!6`4Y^UURc?>ix}c!=5z4s;LIGP!K#&D{8ah8 zNdrB@Trs!Rk4te4ba1U2whRqXcNH06h>Lbuifmwsgcj9@&vJxB)YqMd(m|2~lIr(S zL~g2gsH<d+uR&eqtvl-ZdDy{^+vHYRl)JtdNimdEcM2&7K5p~ZouqygUH;#x@)23+ zm=GB+-a8wV7<MGAp5An+>)n}}az2(y-Kh;%M2j7|`DaY<LiH4(TgonK-3n)h4jNcq z#xpTF_zvmI_#hPgSj%f@b!#-Otvu?>jV(j^V6~ui#>8N8V^3l$qI*Ywg6XeG>J3B3 zm-J_wq6dtB3e&2;Py;I&&7Tpe@kk9E#cHL>V)dl(+Rj&8*Xs*g9Zr(ZcO;A99N2*A zCk7>Xst@blJ$}enU~a^O#OFjn;TmNHrmkG}XfQAJ;)9Xs>OIr1=N~4rTe_#IuO%{1 z_mN(dQBP%zMY^E@+y+G~L{O$U{u)+*p%RzUV(^F(xEWehtwrZz-Sf2{6Dc>-kVc8i zdCwGTMp{o|($$u8)c~sAd6e)8r1%acHWkvkLg8pR%y{Sd`9yE1$aj#CV3BE6>K=i5 zV3eCuvdHSs#_)nx#+UQHC<aZ#G^IHBQB3}o{>Yg#f^Sap&2N{=X<2~msUhybx>;_e z#o<*?yrr<+-HV^WD==_f`3WF$DT~u3PFQM2Ig2!M@dAH$x!CES7%q3WMmvX%mfSt% z=#(Lt%8M6yqWIu!@I}f+;ns+Z3?%lJ@lt2HhCeit{Y6(4-5-)et=7<weWY95b40Er z>Grfz4`M!W@cy}Ma*Qp*fyznco(IsRg)zF?71S!CyZUegG$SPA)4ptP%nC2Hgyf0m zt8D1VB>rh0*69k~;1?`tgvz?J?r^ygD;F%zZ#3LIvP*4sif52ZJJS-e(~MF_m+{q* zK@-rcfhb<u7nRcUA`!?<vt|Zj%lJZK;8~zGNm^uLXz<=Pqj9g4GFoapcI?=OhH|pP zu`?kfYOjRR6Nt1Pu8xuqZ=@7gIOZ}tm1^UrB3cJ|l3d~CeiNnfravV_xcW_-*=RWR z7Ksi-Y#@HWR_!~G;yW0=;+Gnz`?Bv{*Jm1foO&w;EEV#>18os<cwJ2)E9{wC1=6yQ zW*Hi0HuAY+B&jBU88wqTf{#L=m`EIt!N~l>a1gbywk{sH;`(fezrUaS(Xb;2eu?lM zh?&GUjFqGn6SYzeN+0OzJDh^Wr?ZuP)U(etH<IK+?TX@HDs-+;>uX4X{c@s}Ln?@M zoT!TRz~bb5AwGV^Y^~x|lwk)@a9=}=gHJ|5D`zwY-(Aqk@_VK9N&Pd8<e{C9u|>TO z+is^NOoxh3`r|w+UoxDnuwM4W@@kls;P4&*tYx?gwWq<EP{`b|lZ|Vs>_@P4@t3w) zKlJqBWQLlgq3BxvGwix7wY&k`QmVPHhcMIp>p1pnY<ku@dhw98(Vf^N9Xp18?&07q zv?c_pOOy2Fml)R#8o9JhK2vuVR%~S)n-teK`3kCo+%V93l!<ce4sI9a)_|5f;+Q`! zsrOLKYDYsu{$dO)S$%0MFK7GWCaa%~Vm*56qo2Z(JTwR_v20@R!KzZWr1wJg^b+=6 z?_0ZkaWmS{!M~iqCiLm2=3`h@AA6S)%2>;X_F{kUlM&@0)8^>KzUq^n3T{d{-m_Ea zBA|u_KDC!HLh*b)8x)_d+Q4SSrw?63Z*C@rv;G<9VHvA6*5AnNHDU~!Z^$^?gKdpZ z=-G%lpwd61{pvWa-LtTQkDb6ik5AHI%^HY!Yw0ki#1HU_$lj$eD_8AlHSSpcGM0~_ z`i%$bD?P7RW}Nq=R`68n6WEIw2bxb?!MmgIu%y_gY!<Wde_7I>@xHMAOQ|3sbGTQ5 zfvB<+YCPeFhNJAozP<X6z|6lVd~~2cSxJ|wzEFdw61S+8eb_fOx&@27P=Qv(u;2P7 zMmOA`%#C4b{oXJ{+@NfsP5Tk0Wj?g6981+=Ds(0Ga+X=))nWcw;n;hY^KYlJh=hDq zAsdsBqn@E-%Mx-E(G-~X<gf!5BCn3shu63tz=jGM+s1oOWv3Dh>Tjm7sKkk*R#PvW z!jgwfqz26oTN1|?fDkIC4k%4a8D#FGSZ!ij_oE<*rYOO(qmnHwNztipys#-+lGqC+ z@wzX7goIQR?fsHj1FB(ED=U3%ihH0^dVXn?T-dxaKG37Su-Q?@XCsjnmX6RR-FWi= zj3Zl7I&$(iK!Va*WExZ-Bd&N76;Hh4iM9T=WT<Q&Q&#Rwudz#f4dy@$4qqF#seB6< zu}t>N;4c94o3Z_luOLRIn%LjFIFve=J1I3jkW|<3K!3y0Dt~u+J%&#Lxk(yGSzhTT znqt;WSZXo%BXergSPZSueE4%P=TJJW4df#279Zqv5prM@L~GvRYyQz!9iw7jB}T=Z zz;IW=)Xk5PD$DsTCiZipzDq0FD+k{>iH+>vd-OKyrZ18IpStOO5JD~7FIcuff$WK? zyXx-hyRLFa_^zuxUr@iPF0;D+L%ift5*=s1+^NI9{2G{Wsa!TKfMF*+4%}aX*?gqM zG!F##9sHI#lC+VJL;>!cO;h^=UHIp-2=Uw;d<prD7HbMC<@FNMD&@97hJ!y%sI_TH zKRL3>m$#ISNa~_~e<m|0rRq;og$~2T;)>!t8*JpFXb)O}>tqEE!{$z+bg`uRnaN4| zoL^^3QvD*8OI^+*e-OJVe-u_zO)$5=VZ{@iNqH+!nFezpp$uLatHgdvO3#}DS&bF+ z$Gd?6*_N7+ZLlM$qflvglw7UFTpq_)5sS!K4~*S1AuGCsNCPYV8u?P-T{bFt#CTs8 zwObocLj#rbVzi=zPeBu5SB+MR@SRWsJ>{ZA*ywH!{_;f>xGGVq^i^pkS_RB##LBRh znjx8OPBwQL2KkNUJinD)NKTvhNb4VZ=pq_ljVaeQLQFdR;b9$1lczIK2TWbASZ7QR zi25sStgl*3>(Qs4Zsl-=VR~KK%BH21cr&o{MW<<8iF^juh?V?!dWaCiO9Wx+d<+42 zWt8EfpEa%ozGf&gP~zOJc4_1DG}k{n587~^wBa9fLtGku7jf~4Xa`hW<Lbh1C4x)E zGpT&m!)`3eyyJ+rfs|^a6Ay=CnvOs*O~aF?wgmgLu-W1{?}>Eq6GJfKuHq3G2`8Pb zk-PXy&%qhFo^%S`HlNFd=lq4uzUuQ5@$Vx3f8Y^X0>iTK`><<n<i}9rmM5=cg&+p) zii;Uxs6rdoFaGE&^o(nzXa9@zG|{@opMM_Ds#S3@dJ3Y$MbN)l5F^fsApML8Mw~|Q z{9_Rsu*ikd#&$&I!tjj};;{W2wM4IL#sbOP6j6*s75;6Y)Q#<E#HNT=JiCNT4NVbO zfT>^~#QMWucLwilSc&zMzwR7RC2l44fgK{yu1}&)m9RZ=wIDj*#r95#s{!iECk6?f ztm5|)kCS8zr&=62%9erW#*$z|ymiN}f$ylI4nY8a!^Q4T8&-4-csaj2*dNdrI10M1 z_>jEkfzjBGpApFu|Ck&@$!aMu&=uvpD3V9DFrGF}efmz8nchn^k`<;87^NruoQ9@1 zoHZ!^>hmb!eWOr9%;_}to2_E?s;mEinqX5Adoq2Hci~7dz)e*9M34w|F>Wj8tG|c( zR*I#p9KP+UUF&me9p(HxK`sick=r0YFxO?~y-DZfXtC%Ge-lfpiO%(yQ#)P_t`JwD zQR*55&C?4R@ZbLh`H1&H<0ozlBp{O@C#^$4XOjjQZ}VYM-YAQD_%JR*mITYwjj>SZ z&<qe%wm+;W<r$g^-*1XLs*LBsR8;yllWs_^671E1z?;cExvY86n-=PB7xosxK86PZ z$>>4qW%No6%#=1HXq>RPrR}o}$8Y27|H(cW+&=+aq3)g;nyXAyEG6!VR;JF#Qw?MV z83{Sit{IIHW1(HH?v>=cPCbCd=8Q&8n^EKvxwkD98xqhOd2R=(J(aD=7&Vwy&-in$ zw%Db43T!E?E<A+(1!*z$fMLz>Qla9kIin1@8HWt$sr?2+aH1A_y08-lt(mtsvpa^| z+tc41B$o?vE`OkzeLp010L@D76z5tpI-kHW`3{A{lIrBC(Cx`e+}a{Ga%ewD9zQg_ z=jE^eEaKa*S<TR7B(5Kt-hG?3vw&2dPllH0UaFF$w`i=%V>Enc6&pEhq&nhjHgniK zuY#<|&15)gIAu_j3)teLA}(pNvpmlZKjr>cBuT6QrpgP_G?OR6@fpi^iD_Ru<Df?B zbPAO77mPF`oP<E(oLa^QehVYlkYa5W)2xp76mdJ^v$3mGBAQ2zPL-||X|*VtXJT1J zeO_HWI?Y!gy+f{CPf8hoJr&c@^A%H!De2fpRcRHP2dm=AWA3ZcNR=Hsbqor-JJPol zJN<sxtZVor6odV^wV!vz#iijPBO}hJgkxOxXtB|et7$1yU%v$l5?CF2?rWy!x_8C1 zO*~JE=N0ihBc6N3^E95^hMwTpv7hqSJ&(9$9ZB&F<pGeyQpGFrM}eEykyqT^Rj%8H z&^LS0@3hXn(p8eIm))myd%7D=`PXqI`PY$$%)jmsJ?qcoS!MlDSNkS7UFzGyF8lTo zm*n5ofIzqE8PMxTH&^-Errod8?Kz;^Uy~uB37d7~J=X27^&Tun+!b}8E&Nu0-Fm!a zJ*ay)1+OV-b$!R%(fPi5svK&t7J<J`)UHm{x=z%$u1(OXzAfK%A;-f3A5U>l@bNHr zwvTJv>BAb`afs>ajv(1rPyCvecLnaHNO;S(&IDD<Cg3XsQw>zI<w-=ltRuFTM}SuO zei`Z+?rRDU_J!<i_wS>^!1?HRbxH}pwvLD`(?F3$Vee|_s@rq4eyrvsoOqO!Pt2RW zR5z)Gd}~7Tpg&8dRFd37NEYqT5+P&~{zsV<Z}}eU4XS?Icz1=b&FL=kwJmUC;eEfZ z?gWy8wUU6fG~fY`_yVt|PuS%U9XcKCx0a1a4YpHE%hR2BN{b8}PFP=Cwg-;&<!YlR z-gh`W^g3EA^!hw{rOgoJ$qMpprWRDZ=s}?xEq1{(&#SA=5!ju&+K~cTsH;sEh(}kO zD3GPP+Fk;grmO8r$Y#<Lv@0p|r21W?aZPon@gy6<m%%c(e1o-yXo-2c$3|gl(A75J z>1!*~J(d72_i`T}?T#7N=yg{@J2b-@b#-4yfZ;$&lh8Ag6x8ovJJ@~usD-8ab?pd* z3&_8&6{ughjCJ*oLwr*m@kEl=TtxJm&`do_49x>EC@H1oUEm#Q%=xC)yN+JN?BR)c zhuR1#a8<mT@&_HGymYnKU@I`yblcF3jjg|qh`!A<A|-hjMd03lGzRr}B(fIMbR@8# z`Oxidz+proEkWqSQB_o=h;mzKIL}a)ooqP^$v%!2{uw1lbBH;9*g;+GRTxuhp0~k3 z$3acuI4tZa-!5wPkX5SN1l)bo%DN*6>+81Q>5%KTuu0pKyZ`leNEhpHoqyL}wtRb9 z6iLFH?A7fVUXnsB-f{^G65nQuN-f_3|3f_;e9;^fzvU#cv@Oy-)`mB9was|?+UDz4 z?HBYrbdR>s@od|j?t6W0_qa2BZ3}gEFCi^h<eH{BGFs>p$vr2exN0w!wNvGs&Y{PD zm`(Gou68aOFjQ5ywo0xeO{0?`O%U}*HVJbiihhL}P?!ySFb~);Nfv}_2c#M*>nmWC zqvVQkUz4Wg6GS|{t9<*;|2A4t?(obAY7fZ@OtJ_~S{~^a;R>O98<Da+N8Uxsvf}Uz zFqZRsunMU3*BxT2ow43Cfv!Or#EE=NPhi=Bto(JW@rpdI-Cy?vkWkksyVfG2+b`Nf z5mJL4=}>l&M%9bA%CIkp$4XfFR5>Z{qcIG(_KNRN9m(B4G$wGQOug+@;Jh)Vg_tWV zMb)Tg@zB^Hi{MD<<fyTZ@bn0E0;)46IHJm0BB64@novLapL`eD2GV@|K3(md7~G_t zt7wWRll~4~Y^o!UX@kN1r(T9ZL#r<Ut4lwv%3pU3T(aQxHB3VMBgAorglEe*Frl09 zhOVxZWMd2-NO8wgldu<Nr$*Amd-VX@wCfJl8K&JmAeGeUnaPfdGCq1Xdj5dkzit&s z@)`M8-!GQskzw`kGRN+WxwA0d;3%#!>)lh7dK#p%7`MRzcv#X0^2LZg-#Ma#={GD| zpzF;zS(MySIMRXB@uy+peL4-Ie)8?x%f8s1<E3hl^2lO38n%zphV3J&A1bCOTeP0E zKvK|Xx>ABx^A`RS4t;`iwo>U1uT#8V;n)Y<^1kQkDb@@1qTz6gMe5J<aCi`GWld$J zdsGk&_skh2i);e@T>7AJx$zeM#D9war*EGa-JZK-v684eyL^tkk6Pu1#Xv;awiQ*W z7bc~`aWjvYf#uSrJ#o?Jh*Ma6p7As5wRgBqP=ZCfD+gQ?_xAQuegC_z?_K%Ny88AV z0;lBerR*YvU;6|cn+2^>4lZH#U<{HtIwc+@KS?u>ELLIik(RqqJFbRnAmsJg{G+1O z?A!SpW;v;)2NEel^8c`|3R(_aADT6j_6P5MOitm|ui5R=6E?ea7?4_Emlgq*0U7}x zBF%-{A<#|QVm^;SEe`9SJg||@NaO>-9R;om)g5&wxS9>8n0|N{^&LMl>+rOm!2=~a z7r_Q1%a^~?%<dh2pL)Y!c42t3Dvd>s$cx9mL##cq#I5644LX!1;Y!^4R(9)%QuSAZ z*sCLksr$`hpN>dUAD_Xlj!0E+nZXi9Zc04+5tSZ}69?@kGyaX{c!pLdZ5vXCu(KnR z)%Jh0_L1=xIy}mTL#H3yA6tS&(XF^HrGXq(&PSc3{T-b!`NuhBlR!M@!MdrHm5v%- zc01Dj{nKdaef_#8sVRa`QgtP68-$r>w7U!8k@wA6^m`!Bcl~!y&p)zk1xw(p?|QqZ zC+_0N_HasM$3~@MKhioXTb02E>kV-`MX7YP<&axn$Z?=Y^<SuuQ=^R@(ig_Qor-#u zo`*;MuA_SFkL%yGXRxpJiFm(VUyNsA=G}v+V7j^~WIv85oqc@jL(xjH&PCKp$=|ax znfG`@&Co+Ga^?pc!oj1Cqapk=on><VCX`3{>}t^>wXW`aRP|~E9m}b7bzdUl!s#j8 z;+1s#Cyxo=Xe2clquFQ{p>|^)LPG;wZoamN>X<6IWK0b10u*L|Y!7geQjxDhO{=3H zDTz;9xiV!CrL>5Yd}`Y2m`7H{_Xo0aWyWAiyP$}E>FmB<qQ5#zKJgfpynjXpOEe6+ zMf7VKk0&CZ#5=}EAlx^<8Sa>7{uyYEzWi0Rsr2-8{m#qLWR*=3L_nq}d{C#$;0J{g ztTEiI8p6Ibl&ZJ1vf;*YB4S{#7?aiJk?e?ZVA1^_z-H{Gg%$5RofbLrLpu?HhoO%p z-qXv$%Wp6|b{jD$ySEXy?FQZO0n?k3y*)vZA3BD__iFGM*5u&IQ6ilT#V#SH>~27e zUjT|_jr`DQ#PT!HL+%92TFRO7e6j*6x|@>tJ}_2R)-QK76>4aGxWe0(M%+E*D3q}q z$_SU-T4GAI%xcFe?6~k7^c6(+HN_~U8c$O>Uwo8ZG8L$XvdrxGUaLu^UV(#z$E%^B ztu*kaXQ#!z4H5<>e~NDGke(<e=P>qU_DJt)XjY>Vw->>|aO{uE71JUd(I|26f+8+; zB`Bt`P!)1c5T%o$fue_JHf~q|emXShjm1GpC#Q1K7fmV329ihV-Qn{%gqM8LB8Qf3 z@b#3#X|;_C{rY*_v?%!*Y94ol1bZ}1Mbo!w@Fq6S24$Rod8~i=qQei-%6ZJ;hi-;m zjA<%z(pZ81KLu+Ttk)8!FcfQZ(pAIQ{+tB$ob&8#&HxRam@B3Q!&ta^h?R<ED*i)J zVa;(^P|n*(@!(NCNyVHPu3&N3rJP4Jfoix4BXF#hIn8O_GgRVv@DGUdYF%v`3MCie z<_Nh`_Z|kfTr@lwdo36<24l|$V|l^YqrsRx7^?}!#s*`HgR$aZY+f)nJ)c^{9f4fR z4zn0b-`Gy`sRXS?1s`;n<y%ILLKWSyE0onk5o+P1@I+Pcp_qLtJ1!w4q0Hs{qxV7L z8~(|jvkdV5q50|1ybCvC?QCe6)rfI-8ZF{niT<fjK6NgmaHE|X@Kfg^5fLi#snaPU zqG>;cyAvFeY$8qBNSfj9?aL=doWlF^iAlmDqrsE5fs}1yHYIH!4QYxHvdipn`4XFW zIjE!?L)xUPJ%9?3YO;HCV=IYYHjy%8X)5$JRJ>q6ZwwYMsGl2y#S=pLg>JOAch8mc zNxhv7Q%Oxk?w=_qH$q>&%1utbp)VkUJArg%IM$v)U15pV9QDdWY^v3$Zu*GH)^yeV z>`iM%e8yEs6Z(vo;+*k_Vho&aWtXf2RIRL6-r#<Ze5okqJaq<jMtAWHgF!0i7jI=# z@(d0#iSEA6)d>0HMNo1G>I#pM5=W#;K6wHT8<xVY>zs`T-vC=gTRP8_@(Sl0Pl0kA zGWSkH+WzTyRI-9aab92k6C`)mOm~8lFrE>TtaZ`NP8#Z<8X9nNk>Zqkg5k5TuxQrs zU$BW0!we2FOGGW&TvvMr-LSC+=p%(OFVXb_U$qvq-g?nHD<LPi@lZaM&-iDY#61mX zxMHd;Vhi)l-H=Lj5#OGl)@2!rxT5;}%1i9){P-@bM67xBrN?bU)U{b`m~D`{<x5s( zTROPmx*f|XyRWVH;vcA-a`^j)K<f1CwyT5_js0XB;w`|r6b?02y4omEU15AA5zt1^ z!5XY}knjTvyW)B~p1Rs>Fge>5v}kM?jQ&d0sDeLtiYz-WS?lWdf=gTp+y6F_%FC)Y z9yFpF;zHg#zrpH4a<cOn1mN&@?kU9^e9^5nG&tywPd-aMep)X{b`&db$<T7%gh+jO zAt|YRWY|Zpu#bOn9)g5npO-oK4$NS<J_fP-obQ6TpUQ>v=0SpvGI=&DyTW0(K<n8_ zTjPvLQAy54N+gCVf9zJ4Y>!jtwX$q`uKj)LZY<z6c*%i<)fs4h=kg?apIDxR@k5YB z?dFa{o9&?p)r{JVZL_DxZNp`nP<tyTiU{xZ_x23+`BoM?Iz5UGxlQguW*<ESr{Av8 zWBY?reb;-n2wZko3O|OH7XF7xz6kJrIJ&4Cl9s&`h?M&yo_N!ce$6NuG(KOx=DUzA zH{l8ij?BZ1bp2)4O^NDNt*rW{R7l@&)5zF7szsZRoDOx0+Xidh+{(VaX*hB97&DN{ zqr{zSWw~P#(pK+>?QW}tksEA@*o(q1Nk%(dgoOHsIGWcAB70zrsn7qE_Wqc(q1!59 z+}cnB#WUPesCEBuMD-dw*sHyj*hz~7f6V`eALR+7C1We5P0zm2d1Q<;r6-1>`!m>N zx|D$<nl&gY9nrYLX8Eg|t@(7rsCtTgO`Mik61QO?96Eu~IkI^SmgnMhKTN?|P>EfZ zA^1AaPdKVCD0Is5;M-uP)07{_#;XmVu`Y#qU0%dJ0-So!pUox~&K{7Bn1g>j8?<X@ zDia#Wg-hqAu}(XcGviZsqOkuclt@?a!<Z40>%+HM%<h>ou7KcJs2s;aE1_<cm4;I{ z`KPv1b_qojy<`^XBx#LEYwVb@dO`|LvQ@Y`=MKa9B7gNCw5m~Y+8kCAl1%0gYeifo zzxrMSy49EW^SdzKDo_9V@Spbr^P9yHlcM)E#Vc{avn98-$ag($*(We+vt~B>(!7Eb z_AhWrR?6i5)Gp=w>Fo;H2G#22TOO`*M^`AHRPc$oWCHJgrSGt2#q~48p}yw?=PWrA zdfSCKJz);zlN;$5X5JRsp%rQr<NG95iCcgy#F4WTP1l3hpGF^J%~O5eaMsd4C5tT` zmx|@g^W*xe1D~)x<5C9)OYZ4Y!K?S6&U`nvs=*U>Xhxu$-xk1pfBPP$D(;my2+|r| zUHOnu>)_A?#|q{3lsVHEC#iA6%Tb&bJzU73OA!3TDYl}xUv$I`%9c}XOL3Wcw~GB< zJTlq?dl=f~#-C!|(iAprd`wSsC$;5al9F+X9Vm?(_S+%ML;GNBLbpD~aRu94y7f`X ze+GtI9}aQr<F`ZX?ePiP4@0Tx{J_WToAJG(w}KG5<#Ef$Of_K?V-wQUt*z{>34^`C zgRbCnQhiC9X4nzmH@dI`2}ipfsBF3QPP%m;qDFO4$AT)3f+pc^0d8uhVwdVK#Mzg& za?4+FU*QC{m`xIhSOjWQ=<H(@Y}FRKJDTqlNdu7-__oY&b_*-J*^n^-eISmG+*)oE zI2{{B;}DRavOqUsII*$Kb+I>Y9;qIFEBoqZLp;n;AR=@fDm=v<>r$7MP2&G>vLPh{ z)%z-0X-P(Jy7_?H9&!AE6P@jyc!J$K(X39rlf6E14oTrX;N12#Cs!V5jtq;coF$~P zv|H}&dK(73F9jEHKVmQ5(ys)(i<5aKi2iuq2SKok{B7(rsg4uj-jol+##X@C;&iJ9 zFUV0axQ~?R;r>$L&c_w7;^i-nvk1qWqDO+UMF>L5&<kiFTR!+P_m{@t5@49Wkgm4z zOYpVGXJD#Y;jv2MHt~j*bfoAw+v@12etJGT;&@qorIjt3v{QQp!vSs9ReVmOpKMeA zIF~J&d|zLTFUidx>80EKfo{LQ8Btu4j^^Kfz}hF5sN?3c31vwcM!f7Vyn^?Q9T#O+ zViQ;CE4`>-%Ws9H#`-zt4Ys=M?LX&KKBY{(a}Ilb%J{ytU2sLr#6_1r{L3SqP4%xM zEP85s;z}}bzk!ScW8ZJrJqdg(BGlAtj<AVS`wXF`?#JswMAr=g(Le*I<VLizXkqr~ z)U<e8h%Z&}(EtqWDdYR*vJa-FNB@A$N2sH&HnH}peTUu-wnpS!#v|sD;S-vS5sezh z@z({r7{n8sSV4K3_CZjBw`G!=*uwI@wyp)VpwI@Ipr3R@O2acq)A8j*b&TYG$Kui6 z*PZL>t<Z&<?XKR_VA+FXj5U-G=z{uT8`5%h9{axB!MaWxs9yCm8#yfzPL!?B_G9y> zSqI$o8hOL8Z-JZncN|4r^$g>evD*k~%2V&N1Jlwq5ikptZ{vpGEzkAGJbw3c>3C*U z&K&;dbHM|h$AWNguYx%1fzD&Wsmph~TN$TT57M(+UPxd+-1bk^2=?*xdsL4y`;3R8 zg&(yUuQ>Q!!`UY@7OTG+#>UP3TAltK%b1m^#*zDNvqr1qN3s`Y4OOR&WFO2*R;P^I z5t!vtsjm%X6?5)~yc2UiRX2=duiWlYy~+B|9i{F&j!m39TpcK4%jVvqUiSh!K6g@q z?|I?A`_W^Uez?yrCAs0Y0~`T-0_Y3+_$Br~o<cu06+K9^JZ|~C&1TH&JGTv*Czdg^ z$R{<Vc@=Ys*1_{=PR2;Zod+7G;u<jKxC_Y`qxE!;Fy;5owZaxs(OkUHsiJ|mf+~9Q zE%wIR4i!BVBEE_+q-7;Q|D>Wntc_PCv8?%nqHnpuRq+<-qP7u~pe|NTW#(toSlZwE zq>jhD?JLx%eKw`iqw^IE^~AUe<VKzedL?ZSyeHY3XJey6N8Rj!zfB$R2%3&hz-$^k znilF}MKq)?U+rN({_VbmZCy~`;Aycee6)8pbkXgu)t~G#npM9sK>fbFWBnWb)ano3 zV()Idh<<zZO+)ya0q_Gizqva6crp<8_NBT`-@1dpgMBdjZTs!vd4$Z_@wob<&)LsA zgkEIq6nb&jPN5f1?@S@Rcx&f!Rk++A%+}pi$l;~ko7MR{*!Vru)n9LCuk0xbci{3m zywfS_`?kX+eTTa1cD8J9iu$HC?1jDahQvKaI#NGgn(nhp(^uG~M*%GW(_ih<Xu!jO z<w#?X&QF?qdTVDB+z%8DqIg>_V7Su6g+}Vfx8LBH+R4!fj*9n)+4}X8IR5Ajo=l)E zk0F^3VQ}O4jnzz7HMjeu60!ievFu6Y>aDD%%5-z^dcLl14VcBJvi<np*E>sd8_-~B zYLPCNYT<4E8mnr>Yw^S^iRJY-SoN<ze#fK3=Vf6_iFIqc99aQpmP5`bLtR$p!YNRh z@|nM|4b;&twS&KTioJcu0JT)YzPw{lciW>-nm_=HI@!cWvL4RNguz&6U^an=k#0p| zMv0(7F<+$5aK?MDV7-9UFpQef<rVmrbUpNuc5j~XzQ*u+<8mBzOP==?$ipCcp&qH} zgi5|txE|=nr3!U2G*sS(^&p%^$b>HyT!e}%qPP+r+|B*A1z*3A+bmAPb`{_L9JR75 z&U9Nhlp(moh(=mF4eF*@x6N$CbrmiRpvKk)E_7Z;!OdpR*q|kFJ2qf<Q*mdKcLFFb z6|OLjrX*a|bSIW`6X9?olt3aXB&J9*7_bu9RjwI>zW6WOP<a@}0SCoyT%|sT@(LRQ zt0JyB#yF4kMEx#<-f1c}A>u2<bmj*VS_ol*P>WWnJG)e4#r8~0aMi>Iw%a?|cm&Y@ zv>pRKL$QMH>8PtHips4m<NqMbPxnA|JZ1}vc{WkI3a<uj$*8gHSy!T3mf2p{Ed##7 zY73iuxF?;1){sM`7Ip9N377M&jXN^#8mo#yw@ID|{xOUUROS3gwroMXcSR56T<?xl z(Gj@kdsq0l3Vr~q58Ai+^GPSsEFTg(J`@ZX%DDqC)Y$SX>!+#Q1L@;o25#_1L7585 zu^pe9C^`>qKMtpYVV7&5N=FnSfEV~ll}MK48%(M+>OXafa^!sl8=-&JrRO)Xo9?bu z_ZrXE-TiI)$YK;7xkf({iQ%ObYFDa?lqjH&M2bitns+N^o9~%044I(fx4aYjR&9Sw zE;%Mv_K&sF-Xa`&xq)tnVmF?}`Yjx*KKucjy)aEfS3CW<=E+tq?5CD4u`LS+sWq3_ z$%XyZIUlee7p9ENdWibS(~eJXg$5V42k0|g!by8>qsxlrd!?As(tbQx{-Pv}h9Y<% zlE}P^G*}U=Tr@BnewM&|e4Eq;)s3ftg6qWizC11#xS=)ZaHTLt#OX^RB~E*ZH80AD z#jDpo^_%Fj<;+>g(wDvn&VhT!sLzaJbMC#VcPmD>Ki=0};hz8tu5|8S&)&NCSnuwg zNta&iNP6-`_T1tj>M2?5@Zy_IUs}-zAHvUz4;uz>H@KkB{v=)jtVxQ8!t)ZK_&{W~ zf&iEH^Aj(zqWfI*Il~`k-s>G}&97cSoAK><(lzI{ma1^Fwaw&>5F>W1oc-S;G&p;Q zWWdMH*OC<Rni%L;J%8h~Kx&erQKZ0nUmF|cj_-A^8N)qNK8e-;fnOu!*e78*ZgnSP z3H6|RM9h8UpeBcd^*2vn{?$6R$DOLqKf%tq<6}>e?jfboeGooBNy+B1-|o<Ox~N9{ zXrz>wZU^1Ra2*>7UxAQ%5F{%O+8@1+b_}a8Jnult+2hQ%WRm(`BU`himwLL9y}V?2 z?i<gAS{ffhw9HcBAVj&2ftgU@Qsb4OxMe8v)zu!vn(T1lb#=fdvWfYIY41-`-;vIS z-#-vFxaIx<c;0)z8PC`5Pgj?;vXAdi@+Lw-&{v&H`&XVtB_r{151QbY2w7WCyB6$( z>c^_WYf`Qw%Xr*=tZdov8~C#2k8Uspd(R)<T}CfW{T1IC(;r{;3=BG)7`!K0eMlGg zVd>kzczhYi?pYe8?FMR4&;I@lTd{Pu+VU*>c4?{(Ip4^3r-{WqFpt$du(q$J7e40~ zH!(M$_auk!B7N;n*Bvt-e7&oA18f==;pc8*7akm>_B_D4|HaVV4r=J)CNbXfmnG>$ z^2ErKwDM%FWfFd_U@ux%YAl=dPAe8Pfo=j}-~VNx7kWzHN9g$Cr#kF6c}eRUV7uXJ zgYE9gD>{?-uTR2efA!yOsN0W5^flW<Za&||7!;-BaF`Y#SBtdv3SAw!Ow0KM>?kYW zq3?Ur>9l8LIkyl61#u57=Och)%~~GXSMvTJu?4Rt4{ayw8e(q^;)4A=@ISJjKF)T5 zea9czU-<+3bN|6EA7|HJO;e9Ev9#B+)xS2f>938N_&mlsjsiQ*+BYttg_v*_DasxR zMjK9bj8Xr?gu}M`l#Im}umTX3_8n0fj#?rTVN&s3JWko!zgIrWcE4s)|MPzK@7Lz4 zjje3*>n3$K#{38nYd~;j3A^_CSZ3e)Y-F(YSkGnSDDk#s<9l~)fJ*+y$7SXBv$vO- z$+vN38P-DYvDwve>Sva)`>Sux*Fg1QQ7X9o0bv}j@Ejn}-pcy<=V6UeGsAIy_TFH% z;N?M<P%K*IMY)+$ZG$bgEgwMH&skp3yZiqC&i+;p+r2zV{Y4Y|booHzB5IAowldyz zX-7U6JM(#C(f=Z!FBY@m-m%_(i(xF6Xn0Q%YI#=?#&ER=WBIQrqp~EP|3qOn4~S?A z|5k))q6*5A4E~9T59c3>P|q7hXyosRFp=*Np_#uf!aTkaq3@7g@h-9RwSqR5uNGl3 zUn#;8UL!&WUn;^We31yJai<8U^VuSt&8LZQ9(Rb)$%{p}fZIj5h?_;|=6Vq><ryMe z##0b_sSvwsFgSTnBJZY!6OR(;N<!PwcFOKmgkBQpYC<mv^a(=G3Un=@Ck6UEq3;WH zJ)v(4bQ7U}7w8s3HwpAzTws&<GXifQ^ihExBGf0)BZMv$=rKa?1}e&Yl47$&0-ba4 zDFQu1=y-viBh)6)W<m`DJx^$cKsit^?=SF+MD8ij(}YF|w3X2Ih13F92)!iGHbO55 zw4Kni0+nD{_(_3=6Z*bDHH5wmRFqjuv8^H@hR_WQy(Bx9@TUYho={n!iG<b&G=)%) zK+_0y2{ePySppqS=oEqK2^}v`BcV2dnh7-sG>^~>f!bAQf8Jl<V~N~Tpv8nn0TqQV zq1Z3?P(dAp{vgmPgnlc~X@s5;=yXDl3v@Q22Z3U*29HeV{sEV1D&<IPgc+909p!cK z7r}!=Xnd-jCrA!nAEaOcu!ksr2~zad2xo|Lr=Tn`T1^WCrJPp~<<B|kYUu_Fe5KZi zv7z@egOXyb5yeFD!oQXrB*j`IN<vK0K}x(e!V#kUcDE>GqBUYlh|&_Iq*x=Sg(#l| zDQVV-=^@JdL5kTLu_#2@5v1f{ZWEL#4!$8svRfmThL|1;QpQ>%mW3$QK}xYTq9#PS zOHh`SSWUHpQqC)hB79%WQae{W3@i8G%zt7f+fqAJ{qst8y7n^zrc_)Tt0?z<r}JIV zS+CEW<%wu<>T$8RDGZI}i9OJw_Wd)=_lsunkjtBuv?rzQ?5n!oNwIHY?oX?a*>0Cs zBYXnU)oFM*0PZ(g&xZ!9!D)Rcje<E3C8kT@gzw-Q($53V0Zs#s0S*D)1?+s2z4TCV zEOE*TNsb4k0nFRk^@rv~Kfi+#u6>UDN_Mb?5BKf89*Kxi!s>0nPXpSwvlkvtRzJUk zz4vei1uYNj(}@GQI}l6*%mz3CivUXjH2}7Q*;W?E1~b_T!cM>;z?mKF*_Gw2#y>`z zf%OnSUkiuN)D7%}Kc(mxKGEU7gV5--??hR|JiIXjb8V+{bK`KtHtOkNq=y-g&;X!S zIX(a!%}kFBkE6ukRTVnCE~cE!wQR{FL;H8ZW`MpuI)n4KSn_w%XxmR>Hw?G3eUA*9 z@5bR_$luflXyC2=By9sd0=@TB-W%u<=(?YB-$0M1!t;<qr}}h#0AIY}1BL2l94UVa zs->{m6NSUm^PBOl1rDt+EFSwiE0!%I?C@jZi#a)ZERe8hidss~Rri%k&)s!YD_2bS zkL{sR3eN?;<cHU>H{_A(lsDK{@^IUCf2BP#{j>#Yz0r;m+-QW2V{b(98$ZEw_X@>C zhwyYO0*CI!s|Kj+hOxU=4fHCnLbK^)qIdlmEbSND@F7Y&ZwXP|qYfx)N4;@xs4s*J zd@q|Xu2_6Vzs?h0u6(w!7P3(i@=<o|2VMBd8$MOYq1!^izNGgBGkjJNuJ~y)2R7o^ zjDENP*9D!_ORHBvAGY8EgSeiA^>8~|lh{YI734<#@CH^|pXe29nzS2>P~=>OHped! zlvlu&z+15Q@Xf!75ABN46zC4_sdnx}<KmZ-E<x+@k-n~$CJ8zJG`h>V1xfHd(cSJ} z$)<@5Bo5qZSxX5UNr>_p#cM^ptfZ6R6a+^{eB(4=*sI4l9YK+JY2y^oHqb*Z<-yLd z9&nGdL=&&O037(b(#mE&YRM+K)<W(c0wSagR8IN}F9PYifpVY}T`)zXcrJ=WTx+qi z<Bz6h)XwQ#hS6!!pT3j52NPk?TJ`j@_{WOWiC#AQF^l$_<sJ08<!tj~KK10~Z0PDi zDX@7n8aL4Qto~?v;#0FeU=0n4x2xHr)xEuzdC;2~xDq7%k2vGPN5v>ZC5}r=D*}4- zud0ZCwhWuf(9f5oQPb0fqK=>7u$S|Hy@Xd#!~VYO%}YKk=l`HMUt3MNY0(uy=M;Y! z1Q~7t@`K~olMw5TA3O>ClP~;JLUJ`rSd%^=_YbcJM(h4w8iOCYx@4K5ad)F)ZXd+v ztVvBdyNjkzEqTg2J{8}49xhv+g-evQ*V?qkIBeH4I2FwNIRv5V+kleQkRzce!o_yI ztUi%$B7{9|Fjr7&ip2A7yzup3;A!m-?yF<XIN;OK%^JEl^h~VSV3+e%xRl54dAyIh zVJ&<3@&4-Jm)OgXC#f?ovBQtw9e#o%HHQrfFC%0QO9>xENF5tRPMY0pu=-3Z`x?J~ zh`{{BZ1vVu_UseG)ZSFq^u(7$eEG>U6x2NR5xwAedKAT0JRPgHx3Z_7E}+<%r~6av z+S7v)XJ9uJIsljt)UOW|D7(g@g#6AW%(6Buy1!t6^Go9COPFhIa(bYGRL>RTTq&$9 z(mNf9EuLU=eB}AVS;N5)@fC!jtR?{cPj8_*u?N#UlOA1qgKesZ4ja4&KnXes=k>r= zj<ViDS<eGKlyxtz?tG1UYXi#r2bX0}{;j(+^M`;2zPND~hk<%I=oFBnu=S0^)>q~J zsEkjGL1wT@Hk^iD1zF|JEaPW!d6E3EdMG!HXt$t_l0OsITF=8_ejfL>xF1>Iny_EP z7r2FwlY<bff*gbM5=*@EZ&(9jyKYlF-RY!6Cx5uDfKUD#^FKRKoTg13uoLaf-S>&3 zk>5inY4Z^cDW>K=Ht@N0&HZq=|JO+xd+zo>otf0X3jx716Ahi2#5|v>)_uIg^nAQJ zy!uP1*Q6Kvsu#Y>7QB$6E`60f`oey3me!Y5y*O3<ukLK$i+?&$JMcMktiPv=`E!&C zHgp1;ICCItS-(fU{0a8b262Y+j}4)-wSmLvhZ6t%O-RkYA3^K<B&35OWh4vCO4(8O zQn4z0@)?xs<Bdzi8Qc(d@1|wyi!ZSsH{GUw>?KyRdAzz?7xwh#RqEXvSo+J!>T5T% zqL=3m`EWd`CYCBsAHhxf!?;Ng7}#W&?g2aqXaM|#H1_q&Mcxf+jK~xCQI{r94?gvd z4h4~$<j(>V#5qa(B>XrBdYcaC=%k_vpCSvR1QXW9!N=j9%D{>L$*=c2_UQ6dT=B<f ziFF<!l^u&W$nQAgoPUNF6PWPVB=U<@!Ey63!r%~*8|6=eT71KK(~lnt@x9u~$4h_) zK9=dHeCqRr7Yx^j#L=Lv)9o+G4EKGbavyf^BvkfKzW79zx}|^gxEqpgsbZyDk_PSu zMf@Jct@K84(ILKq4SsyU*VnW9Ew+*66>uw62Hi^cO%nNwj5`XU!{MYaue;(giK}CC z-bzh0(u5G@PmDA)+!pA~r_b-GET1=@t$!<JIF{4*mC5vrLGmTk;g5F_g69X>zC69+ z)(eRj-%9cZCHl0HxUL$A<DX)V3R#%1fWkX`M7d$@L#-Xp4<e0>_A8z2;QN!CtRH_N z#N+Ga;e&t%{t>Kc(BD{0-p6MAmWu|);kX=*kF>F@;ndL^z>FWy5qH<bEw+veY@KUI zT=AKy@tANy=Y`em<F|X$eYKXihm(W3eymvwt#d-_==Z{Ihpa}!S%?Gk?8l&UolC+0 zcHY4gN$-Yk7R5U?u_(>ixu`LIXNP>6=Um-*772m*(vYp3gM|@j%|!E8vnSr^(F<k3 zPubPJ1uKe=9OUqQnC|wzla%x{@fgmI!Ev5%IetYqLENgUFQOb#{44LI=1`%p7NI>7 zaAH8E@Ex`Xd~)>}+0%^T&wP}M@2fs1c`h6V8~pdfj3j>IyD9x#k023;L(LdobLzDq zV@aYCw**(dOTVRa_}xKW*WHfRKzX5uSKmDtWrk91Tu{i~+h<Q`g|j}m@Oc85d^msw zNj&+%dKR*N`!nJmhmRLa0KOTip`U?zb~dZnpQL?|-W*836PGlbt=L~G4n+1R(k%JI z{$zF2TBdu?(t8FKz7jvoSBT#aS>6pR=XgH;Q8x3vM0MUJw(Pz1?sQNXNUJ~!O#cq{ z_xA?%B+`L4^|dp!ojyD)VnG)B?!95LhaW--TTxq2JJwG0^Bhep*q{bmv^<jv*PABX zku%x74Sm)3jb^JF62_4rY<}_dbmiHYJ~j%<Vb4<W`yIHy;Wpq`Jx1{rJCP>NIG@`= z7Wx7Fx{5&D6M5K7_CrI;sJc4TWFs9#;f*i+dQ0$!W_r28xl1GkMZ4eg>t&p_vit+1 z)MH1pdkzfFux-byVT}%+3r0FmvtD=Kiai8vBL;@jD)^&iY~O()L#9Ikpl3Q?=@o^y zPw!~<3jU@YQY@Y}H!MUu-)LvLgCqM;rd$uP=p>q0Ou<UBDF;VHiL{HgY~{hEzJ#}Y zejNvX^v9pN4SbMtxq!WQFiBl%XI~%mBqdyd1t{d=2*W@1A~~BY>9Q4djdGbSZp=!V zg3>{A=OMuFLZh60xEZoSKUp*4Hg>4dpw@oHerX(;9O9h~Uc=dzhp2mw+=`N#BRt*s z>$kF^L+R@9%k1t$iKC{0=h_+inc(7Q@FT#f2mMp8&|QxqyaaCq&vN(_pur_B+j}TU z{mf77%%Krb@yPezw3#bARp9J2Qt4?>o#^S<6Zr8PsiB=;=nk7>4OO$_4qIQ|K8?NF zl%W248av!HHe+!Cir9c3H^2?GDuH+2Q2X@&dH{9C4Yk{UVp&H<smuP!<{mNh#i0A+ z4Yjy3TE9->zqPWz9~r7X`wcsLBu(Q|^K}nGz>ZJfk5GB(o;1d+#?_mz16p}CVm<Z| zYIhI(NG?{_c5H@UnIgIsSQfn3I69|I9sLq_9Iq_ztZw^MaFH?o(@w&H2JNV|FMk_E zU&p~Gxg0l8#K-0~_0!tLip>4bD%g4U`;or3eYlAf`eCA@7E)C@fcnjy4AD4@k1G-7 zK?8)#O#yB25J-*e#g29cjt-8x(}s7YJtUnyZKRAEgV{lov~;`?1>M2%)Rm(*g{^J1 z?-@N;^_|LlC{87v0-Oi@1km<YNlAb#Krvt*pa$?bU<+VB;0r)2AiR%CiUp(r@&FS7 zKjB05X240nZon4Mb^;mzX8^4LH9VcMfT8j5LE3=a3RntQ1=tBV1UL<72K)f{70|OU zUH}*YcEDu7Ouz!bgMeDVTEG^-e!xk<IlvEqc0l)jDrpd4B)|rk0GQs-tCE%gSq*p@ z@Gjs}zy&}%pf_re1~39{0!#+X11trs1Z)Ca2hd*)YUTp00{rLSzGS=fqQfrD-D8)Y z{|sCDHTED2zEqGJzQuws#m)|>`zM8d>0ZKGMk)oY)uC3u;AiF0oyDVeY(K7!_RhX; zr(Z*mGzjVP=eFPN63KBQ#2!G3L5Piqw5{DP^&~pNUPMP2k8u5OxZ{P;fe;%?srgsC zlz?y%LfSZ`AWS0qFLo&zVLZYVgxlKebR}e%)NmD!G!WJ!d;noB!UqvL5w;_ojZi^o zzq(`Q*%MvO(j3VRSO6*WrN#KS5Rf9>DcytT0;G6=&Bc0+QgcB|m+qq)?HF<)M>Q3K zq^V$-Bi#<>`ABv{Knh-1j2MX~+9Dx#F7W&CF9jm+6B*nAd@uwqKzwjWIB-_?-7W0Q zcWwRi{;L49q<MIAv1F5`?>Npg)F}f;ORYNizfg|JWn;@GUv7ALk84Eninb|Jvfq*3 zR<XE%mD*YSt+t?{aDr#wux3DSwkQxEwl=}e?h5o*Kc2`Q2_&lqvDfe%%svPttA~n| zu(w78!{MWXocHM2gp2(m-_Nv5pJy`H#iX#y*};@2O4w_NRyl&v@5|Wt7n5{x_!<ba z3pKfvHr394yx6z<XXQ6&-<C7mzx%0|-bS+2V{RuIy5n>0qQLZcX0Dwr4D?ky<`ai1 zmZ^S7R`2i%_U~5{yE^|xWYsS+upVO96WVPyT%hv+_+thC?i*IRJ6Na>_K5d(HHzTT zL!!hDr$l0T>_5Z{oy9#8Dt1TlKmRnCPEzB5FQD_0(BE_#Sgn$@6K1O8?|IOQFQKXb zUpHj%693;fWbCKbgE2EzMGF_-J;z--|GxX?+%dm`ZNK!zC|PpMaXTl^zkA`5`9*gw zm|wPN{@e<wP*vt$jQ_@0NEm9wF=pZ2x8Hfs{2fpJbXNpBsc!Gu01qquH5BdW+Lo+Q zr9C-Rb=_P1vt1esumkb{W`Gf(2V?+J0Pz4VKmxQ~u}c>L9B>}c3^)fk12_#h2{;Bg z0yqR{0K5y>3D^Qy4_FHz{?%9PUTGze8h{(H05BUc1>gXb0PKL_fD{0J4NdCMz<9)C z02+V<XuE8eS^*qz25<zh7qAJi8c+jR0GJ1u4wwQcxjY}W2a*Rc0)_+90EvKDfELjH z6NCcJ0Zsyl{}95xfGvP00JVTcfa!oKfD(Wi5CdqxWPkd-Fjd^s>(#39qlW}LfjWZz z=&Rm8{{5fhPY(@K4a}l*ypF#us?m}@a<sH9YP3Y(6w@c27~JA-YgiDk>556t#%g{I z-_sDLN>O`pl~mFL=;hbIloG*cX6pPie1qld{}1SR0YCL7{_iV#6fDL1e+0AS4gDGM zLK<H9pMYvgeNGu5-1Bs#>Yr-w?J9|5raljapug66Dk%f09r5@oEJrX?cf=d+!KZE! zH{uy?q^IAApL_=Km>cn>uYn)3JKk@8199Zj5my)geb295R9zk3GoW_WkJjI+ck5r* zAJU)KU(+XN4#^yysmXF?J&^Tbme!DH7-A?h%rY!AY&N`YIBfXZaM7SPCL6PjrN%`@ zkMU{aR^w6Q1>?`gFjKN=h^fd_VY<iUHZ3<jVtU5(f@zy+zp2sGCwpjiWp-osN7+AS zN9X8s{5enO?DyvUnA4V{<iwgsna7wPG{0be!+hC1$g;rVwLEUwXgOrzmhQRF=f0i$ zS?+ha^Q^t{-pJdTcR0_UUz|T9-<j{XWfjaRxT9c6!LEY+1!oGrE4WgC8l_0m6a<n! zM&Cz2NS~!I(9hL-^gjJs{Y&~I`rq_jGkay`WL9N*3$s2m{Wbe)_V3xrIfHWE%sHAf z+Oo?sHMf6WQQn075Asjtf1Cefew1yF4eBP*Wde=9M!#9VUH_i`L;Yv^FZDm7#nhSO zGbd(F%X~cZ>C6{0H)ZzCO3q5p8kUurm6K)5D$FX$D$BYxYYtj;QP$F|nygh>Pi8%v zwLa@LZ`O{i{aGJmoyht+>wMPtS*=--hG;bS=f-ZPp{DVst)>&EOQv>HR`yNV3$m-T zAIV;my&-!?_TFqwPQRRAbHdF%%(s}!&2!CnnqM{_H2=%oX3n%sw7g_lmRpN<+mqYf zYO;>DmZ04l@~ZQzY#tl^I=E!Vc`gF)-!faXa*ZpDe>0viZp!X!>2DceNw*BKjId-{ zOqQ`0hvg~D^Olz_*4(AJkL4=Xn7j#j6?t>=7UnI@`)l6ryaRb3=AF;`Ay1W`o<BW* zP5!3*SX-v;L0gMWQ&3&tFL=J-je?^E9~XRH&{`llB&ir$-bJ6TAE`I$^Yu4*^^^2V z^&8NfhxI)(=VZQ``FmzV)(=@#h8n{f!v}_64Ju=_u@{sr-8j-{H0B$}8*edI7-tyo zG%hqQGd^ToWqiuG*Ld7`*=R6XOmj_lniiVsOzWU<`Ps9xUD+>Xzm|PD`<HB8PHawM z&cK`@Iiqs2bMkV==G>g)$nj3gnUiyO&f=W=bE<Pz<UE|SD(7+N<;ywm<{ZrVAm>ER zXF2C`&gZn`v_eV4&0WpC%m%Z?Jjonm>1|2246+nk_F3MyoUwchMg7$hp4%liJ$F=Y zVXh<BmFvl^&aKa7xtnsogz`pPW34068MCeGytur<dH3W!l(*WO_j=xrytne2@_x(H z<oC~?n15IPp8S91Yi)gP$+q#fh3KyrZRc$Pn^Z8aU{b-df+q^L7kpYE&6cE9=)XvP zyuL_(r+%US8EEz!`hV$v(EqA8WaecSXVzstn)z(zj?B}UUuK49^~ma-m6oMs4K$1} zI1DcuE*So8xMuKnF{X%ytuW3s-fxtRYmCoA7d|%jFwI9Z9>!Reb6U;U%n`YXx!ZHU z&2{D_<!9ud%Maw=j>3&Cm{{;b0hZ8GF$yzWKU4pZzB%)OtVBbuVK#op@Py%egOdGo zj^A?3(q`$K`@40jt<p9FqiMeFPTSqKd%d=oZ98nA*#2!xM*XB3>@6TDQ=gb=#BiEp z+-z#jPR+R`=iZ!KE$>^8T8~>lw*JHVx%EryH&!j$=^8Y8qiwtGXS7e(f`o!W1veEe zFW6epQgE5<z-nwU5OjmGRp}qq-<i2E^I+D;SzQdh3{zmx?lgEGG}IXiP35MUrg=hj z&YHe4{ccLnz88aS6$ab(>^<3EW=lEIIel{mKzU3#Gjpm?oCM2rmdM<`x#rvnxifMX z=KekRMDC{i!}&`7AX|~mVY|b&%Jz5LbSlJpY>*H*^>^#N`gQs@^?UTgGuLFU%Z$q! z<;^m~$UdL7JL|KoU$Sm8$c8-QO~zv5G~-<3lcuojq?|i)B=br01k2sX?oxi3EzUOB zX0gq(-D~sNp0vGUd&~Br?MvH_HdR5dg7ktM%5a+`HdAlwFY3cHyTQ`MXC`K*z(g&~ ztj~NRb8Y7Hnd@Pswq$P2^lr=iHB-t8&(dURvleGnXRXb8F>6!SmaMH=$Fr_w4Ke5q zMuXXqXRsT_8j1}ihNleA8`c{(8MYY47#}p&80(D8xYoGN_>ytE@f+iJ#vhH@CacL~ zdeF4O^pL6E^tfrgNtN9-J0^Q=_A6-1{n>}JvvaH%eq}jJa{l7Ysljk-&FN)MGN+l1 z7+Mp|lg-o2x0{{j#pb2vznW$9Q|9N)n=miDWxitWZppB0&E1jvR<6z(XB}XjW?f`e ztTSPL2ITj!<=U7{YLKL{Xatr1tp4@PbD2M78nPy5{g^etFy7DzosTw-!L$`+dd+mi z^n=Me875+N_7=;Fx%#~O@?M0I+(_o}BsS0pbZERjnF*Q6nW>qBGlylqh$eeK^V`hi ztZ`7IR7195oMD0C70imK3_lqPjd#N!y#!0zjtoquF{TpJD$IXhm=u#X+n8OIeNXn* z?DaXDb9S3gn{T)HE&IHd<Ce#ApUmBm`&#b4-1c0JwU0H``jIs~FE{Vbyhrm^<+FU9 ztv7o61=~g2t^%nUYc3S*sQ#pWa@I5GtiNTwo%MOvHp6biJB9-&MsFkSbZ9uv%XDTg z$o&6SU|3?<VAx`~$MAsR5xX;XSM2Tp%ab>DU+n(avDi=FlE^5QlVYC(N;kmv3217% z$9{r6g8(B#1dy#^;9-zq&|$CwSZls8;4oA%)Bu(&8HOFe)N=w@vV1W7V<-Vk!x2U) zz-n!Q(E+0eMhw9EO97l@5`d-n0$?%lz?cD8%;=Z^4OEz}m&AC(iouS>j>C?}PQXsY zPQp&cPQgya&ce>d&cV*b&cn_Jn1Mp<BJ5)9GVF5f3hYYkD(q_P8thu^I_yBBPZewc D1!_B- delta 25633 zcmd_Sdt8*&_CNm2!);U$R8Z~=2#SI_%rL_Yb74$CG|@p6q!i5yO=M|0V^-Q|1LZhU z>ejT5njW-c9W~2)siBx)Y1&P#%&5$^ji=~jNJ>7x_ueyLo%8*Czpvlx_5JUQu4k{k z_Imbp?X}lldq1<}M$nQ^gH{@4?W;^9rmg>d+|WzwlNWcwxbue=e}#Bc(DKE9@bKNm zpYYJL_@4+rYk6z&=Lj#YS1f*x(_irOzvB-N^Kkq_Gi)r+Rmst-TIJ*4e)!Y-1pi-) zAj*2kx(h-x2%CJwmxmyX0JN~U#fOW65G2SLp(_*`5Dydt%`arMY(USs0W!hJ(ycOK zt94a!pU~Yh!O~3-R(>N0@vD;K`X0Q)vtvBIzZO9lBU_am+k2>e)<QdIxhL^pMK_aM zvc#U<Gk~@LG6AQ_05Pe11j=L#?E)DmCT5{5VLYO8gn#<8prE1iX3d^6Ll9o?i3C)I zzjoiAP2zpB)q%mmsQ8we-ef`Gfb7o-1i^J8vaYt{wj^0?nYSzgeJl*L28)$uMzX}v zYMB%zv{(dBafkX06}pjgfl-l(!v$*z^`Zc9)Hc}T4mBi_-vj&SmD|Q3le&TqwPc%? z0WSCA4)@|#`XIVPeag@jzUG*10ys*jLXxCvv0QR=(6VSjVByq$WO7iPv$PD<c19TJ zOL!*`jVMJ$9CE7Z!+7PMR;oq@t`iDFlRd%HS{+nTxA@avw|N%JGnxVl+F$atx+h-0 zwiG>Z6de#)MvwIn1b4v=PXP@Qgx1>??t*qh)8YX4;`Ywc4)x>JaF(%>ZqU=cS@Z@< zdjF0jd0N#ChMVDQ8*Fn?eYfMfm5yTMg6kOtH>!hBWCfUC@>PPBP0z>$0o8Gx$joqD z$#Aq}{Q88bZmSl`Sj)!x%0i>c=?oOavp*TvaPy+gxdsJT>GeNY^HS^b%~Iw4-kw#S zh(o)PP-W<44=FM<&Hu?8;4Wxoc~!>X{esqtsiztzdak>!hn7_jMHi{t+>Umdj9OGw zV2BdRY_=i81sfAwR6$-QMaeO&NU;}sA1k-*Mdoz{H>`Eef(|Qf;`O`X;lprGI2RMc z9mlA;f_Ce>C&0=E&a3!Sp4LC->4;>E<z+Mk!vzsBoOFEZ4N$igvij~1L6xR@PFC{z zmU&vGs7TbrH9)8qt7X+rp^W8am7?;Q*tOQSIYOl<BjacfcE}3x;uPM{#d5*Xdy7po zoHe3bwSYI`!KH4n_|mVmFTH*V0sv_rE~rnqYf>*F<|&Xvz|>NCxjR2KUW&?Qg<P!5 zPmQov&qc71DCNP<Oi+<)sB2Mb#9T)Z^7M-82HR=0+)BrHNYc_3smqX?pk&?4U$k4R zyOl~&p?K4Fo%0O_Uv++E8FI}ox6&`i@@{n7Q`0>f9oVYdr1MrJ&ZIXWN=<>Op|@G~ zzuuPOBxL%VFV#jb_)^n(>UnZb-q)GW@k5xlV3gT1`&fjg{M5-zQAtr2<c(~jS>Bn} z^73*>S-E6KC8VebqydezP2)K{jvPGT!We-XUC(+GjZpZw&^<-0o)Xf4NBi=Puu3W( zPet6ejOU}|H33_MPg|UZB$l9`J*NC`EW$fGZd=I7ZmFUpf?V&G5ql8tlu`@iKz*vV z0Xm`7b`hi!x|L*gkBW_fp5rpx+3%&c_zu=H4l=EKRNRjcN0#jPp~{sfcy8Kb!Vd&j z%dPaIaI&&{Kj%Szx?QersB#CpPKxEU5<^jn$u$A_#7Wh{q@_Qw*?{?MrGNa+W?Dp* z%aGz2fLC1Rl+y4u4Yp;x%&(E^%jZNWvT`+wt|o|rj|=MH9OpU&F4O|uSd|318NjNO z)me{d{=T;-BgTi5X+2W!O%3Rj%lF&`9Zcgz1)?{rT)Gr{V70;<RGs2#2&^cv(j(yH ztqpCeZ9yxVAzI$ly1Ama7t3#FPja?Ll#`|S)Z|g7cFU?E2I)LQ6N{(V<?ekHJl?RT zS{6<R(M3hiS{6~Cs&a?kQ&-yx+<G#ofoXR-3oY`rs~c)1bP{A!@dq1$`gTZM-E&MK ztJ^9(XASN1qn7ttLZ*er8g-mwHsh!$t*gBO1!g@NEM(j)E3cN@uKmG8Q;9k;U8nxi z@GDcMGx<BZcCtS-RlN~Q4<C0qI=V`q==g#6bAQ`PaA8EOwDl$zcyX$)GiQg9u&@|s zLuc|4p6p9Ac4pQ+;3h_<%FyVbb+y-76U*EiSx6to!(Dp?Pj!QSBMWUeSXN9Z<{>La zWsEk(9Y1w4s2=`?>qK;2kqE6E(B!N#eBx-!IQCJyr`5wt=uGKKaMxZ!J|5on|CQ)z z_VAIdvm(FffTpV2FOl8xkJ<y81M1j&<#qNfc^N%|PVX8K+ho+qN>8^*Qn9p;4Zp+( zSlY*GEbZg1SqYx9vU0i!i-7BdvYh^iK|Osf6(eH^V_MN6{@QqL2p0P4P}^JytG_9Z z{M55=*o9a@XcGEDztJaSNw4tau`gpma2Lo8O^z_Sk*7u@)q4u-dzIecP|#&vuT<Zu zD`>UaSpIZ(JS#cWy9FveqO6p@%#Df&vL(6CpfB-8BQ!nz7)w$`qep_yDDybvQ22dl z1`@0LR=Sn+Yz#RZo+{psA>W1%RdFsCRKI-)UWJ%zqEm)LuqHsTpc10X=xZ_PCafuJ z#jw(f81i96V&b3#Mnp}Jd9=Qu!`_G5z=x^YolcD*-FqoU=MF*(3+0}o+3ud6BAa_` zpr@xH%oaCXu=n<aDPpk96wP&n&}#$07wjnvGz6tbBeAasFLkBMX;Bif_bLqAiT?3- zp*5N`^_tiF=fQ#yexQ|gPWXWkEB$&fxwp4jEJ`7s-Z5c~=wLs8RX_4(@1;)GFu@T` z#|}eRB+~V;PHhq!lI_R{^W;l=pr;Y5DvY(e{V<75u=ZNXF`zoMQXjM<9W@LqrQ?wY zQ<<-ML+Do+SM=+79(bCjR(c~#X%=Ik$3bfnLQ#miA@u$!jR%E@;X>n<hC<TNC&pGd zT&NDT(rsC+`1-9{w(vR<s{_m2ng*oN6@N(xs*ash*{J^b5X-|Gw1x4zLq*pyh3k0W z(%+XrJQn>}eWwx}5#T8I96#0`<O!@@q9Ma0`#V8e^x;%>LuDgP&lZFwCOhj6dl0>W zQX~z_jrM`4|4DFoZ4IsSQkLGChTI>I^?aDoeC+oi*RhBaTBQ+$rDH>dC8)f&yX&L^ z2GC@57K{llQ?LtM?U9T7x*8&^RIe$6ycC2kE%Lnpo`O(Ug90W7d*|*eym5wO^O8)w z#OS5dLM5jpWgJ6WcuxIL_lhZvzIUg!5Pg)Q4^{t##_}U`qfVM1x`pQW^DZgznJvEM z!Sjpy%W}i7jz}8GCM%y82Vq)W%Y;r~h8G>FCHB7KvrnLvo@OW~cX7Mn14lo)4qQzF z1O~$c)FWKm1zcY&pWlnGN_0UVa;tBe>Ii%BgrU(M-z1zqjhWpG+LkTqAa!dJzWNID zxIykIZueY-dJ3aSeMm{am?Q;WLur^ty-ImKcA&kOW}a4WDE*-~araB?%e3+-Unje+ zM?eqM6}QqU<Ve5qVrdTP6|D>#0t?tL`{#O*vC#u%7P26EjyOG!{1AO#w-z({%Syj9 zk<l@+;!j#q6=Ug^$IGF|BFGyt=^-nb@@k17mt)fUgBvxVGhs#eGYtW%ZlK>IfWnEU z50L?}TG=)-B{psFZuX{zF?{Ksaz)($lihF|O90l3XnGb)xId41;bcc_T<<1m&<gjI zj<%7Z7RQ`&I@&}&k4==rZ1M&jS{O&9*kmV<99#&^(Pl|qVvnSzyV<m`JyY=&Nk8R^ zqklQ|n$Yb~2@zIwUuZeKjLN$UuE27Bgf}C;sh|VapG2{Lf<RuOwaAJq%|)ytzFsXk zC_yh&=?mnJKy||@@=9Evm;@L%2LgwC6R;PvR?F%N<akO^Gh4_9as9)Vpceji_3ue; z$Hj-0-=*9NBdPJb)ElsX`g!tzx|*eHvmk9)PBaEq=?ZKw^iu<|g_;^zEh{afdq$C< z1d~idMkM5jH$usxgxq*^1y&7Dz_F{rZTZT;CH5y^<3nQG>D40gae`WWvxtNwjvaQ6 zjly&&9;~?qa&$j5OB4+N!C!?}v3)}oyy<Mg=s{`{Q&TFC%DcjfMet8*z*88y-Q|d& zul6A46mb!sVYQQC_RfkBD#~bLI=QV--qWGuBDH)ZN<0N=akazDIaW7Zf;zxj*vhIi zA4UG9I*}mwt8<_`c_cYr?C~4%BoA_yB7xUr^lx1?ahaAtMG0jd%}gjRHZb1D%dz?l z)oUh$V1JO_87&C)6vh^f23FhfL^rx@6hj>M`d+GSvk&J>oq;vF1ZfqXc5k|s7BbY( zv>?_KT;(z@>|t{B!hQ_apuLEgQ~LLe5RmEQ?$8t=^uQo8C#7GHpCCX!y|J3CN>S## zRwM{@b7j?H8ST@`?B7s#h+4w(-jA*at8WpYbwE8#AiZ&=WmcM#LE2N&qJIRn5!;IL za=IOs1qz8zM}->6`u;;k?Mq{gZl~IN5z+%%0gG@I+6N08O$>6KmAt(@h4IiBJ*>3v z&nUVoUZ`+Yg$isb;=QkA)sH4UQ+3@^Sy5&5^?#Fbsi|Xc{`*hedmTL|Vm+9v;3|+F zO6*)8PsO1&P^8t7HY>~ODO6J3rq^u7fH-=TVSf+ZKf8q-OC9694?~9@msiKrDVS#! zRKmK=2@!M*Cp<U~Qxe<OER=&5$gAUMV*(10qHI_S>Nl{3XvbI#<rOxRKNS19<a8tB zqSMe1{Iza$1tV0;=nPgr!$~`A0Ou2owuMPV{WLtRl4b$|Nn^#6CSCW9OhJp@am5jA zr802SbUeI6;d$`LJ>A^Jm*9zcYZP+96qrRl1()0fEw05^1jhdZ%r6=Oe!m*=6tvLU z*mgD2%c$|SwRf;q@zrnX^>pkr>f4hrU&C&&XH+YDCR}Gv4P{T$6+Ej}L`5iA5EjUT zgceQ=y2OLDi#!<eDS{0z1j)hTDG=&A5cL!U)(eP36V-<@I=6Y9O+iISl;PhNmRjF| zPHYNl#j{(W(9jfg0~oZ9ZxipXy@(Zm%Q7tPK8sq4TExb{f-KOl|3I56*v@Amh_)Tr zcuG-rpe_?*5HQFx>S8=LmN)l0L@}?yT=3jopteM3>`5H>jx1y^GN26;NK1M~;bGuq z^eZnP4~Rz(*U9cnKVV)!?{I8xntAfkKPN}9WKr;@REO9u@#I&$q-4;@zE?}xWQfEB zU^+qJDTtJUS`x{wLCM4B3K;Z~07&bXYRSF$3hKKp1@*;x!@M~y25x7C*S*g=WUH3k z88pDTB^eCZMAxkG5g|O;yUXYw5S@w&ZghD9cehzWUG`9G8J+MiMlSTO^0Xs=?~H04 zEn*Vr3FGEj;G0<P$J#byk?DNZXSpmv7u7a!nxhX2ppQ^wgm^y;Mf`4W918JqGW!U0 z^7i2D5sO5KnuoVzA7+nVN5j&pk4zcfX+`-&?@!i_XNtVsbz8EBl+v-INmfQ*@$xV- zD&tKj8|!LVx4r?c<HCU9V?&G~iGa}?>VL0-%?Efar>f7~Pd9w;vp+EJkM_gBmsN}O zJuIrwdC&uqc-t=nmA>Uv(y-b|QOkI~_JAo;3ycQIL7vDQS_;v#_ZjZ`eaO6D>Wcp5 z`n}iVJ*&ee!DtISvj%BX8iVqoHL8177@Ff6`X7VMG^LR`(VwVc@WJ*zFsDIlq_aCo z9k9PLhbe3djYXI#!JLA|g)_=MJ2j#XkkJC?0Q1*chm^{CldVINdxu}aq#YZIZAZXa zb*PR?-;oPL9_zj0J0H1>lXK~!@5p^a`zNzS$gZ%hXKLzKs4~}yK!{$rXCh|9cqwYV zmTVdti<}P)P3ztE?O(IF`YmZ4nuNrNVQD=rIb9WG^%*g2O!#rDARJ<|(=$R&Kedug z!-k6TZ^?(lW;&CQ#J2hBQ|gc5slAH59U4+CjJI7yJ|%SQCPA25i7m!N&+JtB_!8!m zqWgTKGUd1&<7{JoYc`u#VjQ@iXzj6jwkPr&^IPo2^4e4Oxat=5$CGaUVkiimA5JyU z4{Yrs3r&f=eLF+8BSPn^T&5lAWV>m;cySO(Fek|(iOFn?#+a$Oja^<>OJRRor3Haf z)S4Eu%3Lf)4I(Y(3~@vW3Cl|q+s2V0dHuzY#*y)PTb<i3GpT~Du#!#MpJ9#Bm#|oQ z+PCnn;>fx71ebHRVDIO7+=gXeUag?7W6Uc&Cus=EX{-*T@z{Y@xMp<-U~xOH7&Lr8 zsl~D%3&Gsrt9SU)J3=IcE(9jw*KE!)Cilp2n}@UGJ@Bq?AvKoFgw0@ZC&Z(VTzBp` z5}Se;%Em{E+S)?SSo$U1C2U6a-Y;EuIvl;>JoZ=u88-<S-XFs~aJW_$O)7`0Gq>|f zgx8cIm%0LKg`aATpOgcVO~&XW;$a);29P7e3$lK}^6x3Wg1PnZDJ4`^-J2FxU&j@1 z#_~d0O@n<<_<nQv{stHU1ue^$WJVM9Jw+&N(LIk0V3iB6oxp^kSz@!dl26|+c}GXu zcC(JY+D5`hJgWA0PA^ZP&E)GID=oN$?r~SzN<F6asH1eDt#GawUi%~3-xj2ZA-hM! z+>?nSAguLD9){LuBUCrQz;U$)Esm)2j2RIDUs!<xWQ&*b6ccOX;;`joqT`n>Q}iW0 z^5cd;>sh>4B|ZXOBh`lwO;>eRpxzbkg!E`qnm=G1AHq^v#)vctW}?rK^ai?SwP0;* zp_@Q!beUGL`ReFh{Rb^WKU6dYF#;-pTKc5E6c#0hyE*?}Sq%ASftrjghz(@jK|ahc z;H2@QJdsERDTVt!hbr2C7%$U%*s@pQd3G-%Q1Sir(T+Y=y5=s!LD(%771I9QjC;{t z`eUDy2Md#&R!}_8VqYk{y9AGnCMz{@(m8)@7=NV!F`5b#)}H6tPZ2Yv!*ODd)30YG zvoeoDHB0+h61{y9Ev%?}+S*hg4+deWvptn@_wt0Gjy+IESV-p?Q-A%`4tQZk2HvH= z%jmAA2&q`^Xey(-FOu|;d6}`knKGaCou{diIqj!%oAMkJo=0In(r6J9+KO39$jH>F zr66Gz(-(XB=c1&s#gWq^hdOJ}r;Sq74g|*o1&>rFY35=}Cq?ZBg&#}ANg5*r#kW#; zTDutbGKQ0tjaz1eA3F<sUzm0IU7XT6R~So?wy^wIdQU7s2l0|CjF)h2Hxz{eW<@Kj z^t@l-fi!;4jG4nMV~4z{aPsDM3wfzX(HCL`Me-svC08WU0vlHbXN%HCB5!x$Y-z!4 z`VQO2cWxx3SkI>;&qlR05}D!kN~V`sCrsoBnu=*|L|Kw%b{sK`8WhHAtFic7Kpq<v z8GC|NcWj{K4)%5{qa)6v1a%u;YiJ>_j7oJz%TV(SY%I7xFTAE5rFsfevGQO@+dgBX zQ<&+CQC}?E7yH5&Gx}nueKCtKcGMTk_r-SmVnx2#-+i&kCiIM8SD}<ujDpt5S);g? zecDzl*<(>jzxD`UD*1FrIi2|#v5y|s4ehi?E|pfsk{zQHouMoTYQr9d2>|b^SFjU? z83R0&5+5#=|9Nzn>#ZBj#-0<B*NC~=jE=G`#PF3%=WVlDSg&IpdEPdSN4Nx?w@u;^ z-o@u#CdMH+JsKuS^-OZ?eO)HT31=RxS&m>CccgA%!dTBzwlD!|3Sw_I-(!Kz&bauy zJ{hj}3tM>2F^q*za;?&NW;yaXzYR)8(y-S(8YcSE`C=cFzq-Eh=S4~9ed1j2t1d5u zUgaAp`wWkX*-bWgJ`*3_(hJhLddN}Kn53k;as(m<E)K_`G?RJpu$?;5LQ2Qvh$lZG zE5>AtcfKOW$E3-=AaqQ6bo5o^#>F2mMcL$tV#+kPko4kYSqm9mJTP|BkCIeIzZl1M zo$vAI+fZp`w7!^liq*y342rR>WM<G4CJZ7n>W)1#%nOhz=wT~Xv)QoIZ4>aITcA1l zsmp}2a$ET#P-?(%a17FpTE8JZ#>P2Wj#De`ake!q&v1X9q1EgZ6P0CDbpv#d!e(KU zVG}k7)pGiKcfT~kZL1GjgzgTnxrhO&UkG%0LB!wf2wjUqu@G+N<FpL<cr>g}rSqPP z?kVTs_q9oy8kv^7I#$;Msf-SSmRuW~I(RN>xODNAMF{5ig2TuI+s7_K<rG{A^j>W$ zXj#G;T57%_IpYS2i&Dt+aRbENKa%C+77pC~6Xs5Li>tlwyq{RzJ%R7-1<CJ{Tzb6& zGQw=XLDoTuF(s@a?XvKi5KyZFsGJcXlMv7)y~$i_U+49o(VdY2Y|TKU%4sv&4#NN< zS9=g`v$0d8qi-RpthB2B?NqdcpIN<gTjb_)lI<`ufMaQ8=X>^0b%T}tQ3bXiWpwXA zTz5bvj0*;r(SIOP7g)gNf~Ps-!|DLrN59+lB8QC6ORcmHBEffEjw7}mARcA)f-g(p zbkxbQP1+ZTWdpS9SS)!x9jj~+3YlP=h8N1|L&coztt@4K*PeigRNs)IbS>nOl3dFo zI8-XEb+vNjVTDO{8Y9+a!(2GcZI}SM3B|Eqvqz!FR<Q_5PU1cGLrGfHcDU>Py)9{2 zgsm+kd3?Hfv4z|_J}rd#(=>Jsd3^jJING+1&rbkl@x$-dAgH#$#ypKa<!5Lmt|0J< zrG-6^wCL~NU{3<#rKN+!Pg}^-rTvmX_7t?kyA$jw=vXc+D-7iOzPiFtOsyp?WN&GI z6mhY1XrzucrQL;7;CrAz%}}7oB6dQi2(F?DDXdab)VUV&*o3&$56?j7w^u+54%7$j zMhg}sp)*>LL*26!U2Xu8?4F?M_dlg|Oh_HPd<<5Lb~HnBWa<k-?f;vsMo%0#>MCbv zBgXk7{x|%nPXIG6Te0jqj`%%J5w>Y;h8nJrw<pFujFXJT6FfKhURNK#+g{MQe-5|O z%U?h(uoXK%g2^aFE=uQYU7O!aXeXzzE%L!rVCFIGkVksAbgsbIC7|c8&|BXTOPR6T zCAN55X<jL*Et}??kC>JI6F!29n-?qM8khycyke2IC9IA=V~56F1MbC_;2pR}1Rsvz z{^A{QA%9+MUEprL&R*$(8yegHGHm}FA%_(e>W}YnPweQdxu+<!?U>6xOg~ZX=tX9h z8@n@|!h8s)Pm|Zn6UDjTlK0Ewod+?PF5_>9p~Gd4gu1}r4gz!QxX?&SS5q{T8n)H4 zhZef-q%Qgd8g6Q3qbrp&LKk{}@kphf1lC7oN7>8e9y8>rkL&3`nLVsr`lOt$XoBfD z05jyIeCeHw@etfgoO7xtn9Q2gCx=n4R^IR5{}srk?DtqkQq(kL%I&=Etd}Ny!+L2X zJRIsT^$F1_<c&%F<H4s-#F1H(_mS#o`Zs11dD;!P9pUs)3;AhMf1|JVj(+9zos&qU zH7&lqB;0>fu6TR6&EAl5x|t>Y=Scpmg%nrB$l}O@6^Y{H7V=z0YW6Mc#r)3VjX)uE z*H*HDUmSrm&QQZ`dkoHVqUa}g*%o|FzOE=0!$gvO-_WoTcN4B&CUfpn_73i%Tzd{V z(YDLv&HJJ<Hoc0~=?JtA&S6~bcN`<>nahaiQXCbu5tv)&wR)n<%WRuoCBNJkm-&)E zHI2T&Qlq`_#Xb>-gL?LI1bq_3CLx6tkZ_sk?jM%K&Sc@awug8Yvl$jehk(WBOaAC9 zNg@^fRbl@EQk_!amV18j9wyW8A0%FBA$9i;be7bONtI_byS@nzI2L!R$FYiQKi)40 zUDS|H>ZhP`Ph<(}o-*1E=7hTdPL9xu?a#m|U5Emj1U7YCV?!0JH-|yfZMWFNXa-MW zHTHg2s{V4j%-+{s8Mr+nyyiK9<V_x!o`o@kbIQ<a52a}ww-{O-VQ?4G#3`6SP{G(; zzdxBgKY6J5#u##RvN{?%%^T!D!wFQ_Bdf*I(h@rBe$s79if9=_hD=E(t14o(Ut>e( zALeI(vWl?{*WF=`$1q3JgLm2P2TDSdrUqX51IV1Dn7}56tVkLX)&ce6&+L^iFy;z! zH7TO^yIs_49yTm*e8I>4;X@erRd>3^eH48bjBMPe&_%#7?i2X9KYWN3DH2Mp{?viA zkfla@6EW@y?CwWT-^dRG*+23hvn^b+#2qwzRG19FixEY;b0!=U3v}n_<l!kg@k|BT zHst}<VYF8WhNmT|;#f;?KuE<@-hm{s@-g#uY}Q-~fqsP0KZ@w6#X-2%$8Lfwk{ht7 z4HxDm(c|E7N7L<HjP%}%<V<D1I3#;QaE!Ekj54t8m<&guwLaE|FA}Np0cV^qCgU&i z`fa^{`z~6~@sFsxI09$10qz2J`bhU;%jdZWr)n!sG85@pu=$TWE^rj51^l$@(LmTW zGT3wMgq03wC&J<G{GRF9+qx(8%=Cl@eAqTTu*lPBe{%UK^7T}=NLxttw7sFUg?pU3 zvrQBk{D4_}X#%N!V15j=mtd#M`-Ja*Km4e>1<~45I7+pDM%o@2Q}izP3rApO$e7al z5ygdxP@D<2!Vz}&>I^P0LRhmqFE_-n4T`JyI<6tM!vDqgR=wUK%csBn*D6?Nl!}$( z$+j7zVvML-bx<Yl<MpGDe$qJ@bipSiWM<mrW~MxAL52&~b^DGZj+zi*1DDdp%Ore1 zdZ_lxg>nN-3XEDT?dxZ|{c%^8n}B*h_B7%kyw1oD^;g#PC7WjUj6RD5mgQc~%SOt3 zpp>4SNlwg6>*c(g!E>CCoU4f%d>v>b>QqYm&BA=`Qvc|SC~EA=hkpGL(a%bq(gPHq zetP30wB@9?$h9~ETPVFFbY3sqeS{Rq>5ZX2Ypx0X>>J?}HtskIWW4WycYaTL4DWZU zwYRGKPQpq|8fGPTL#uHW;abls^5ZP)^sX6FMlWGL;uwyZ!fc1Jh(7Q(TFZ0~KLHG< zB|Ogb&or!$5<LIIg`0(;p-^Uyv8CncD*0fnvw6Zh%>C(5l+qp8IFL`36JnC#yI)!r zD*c2UtD@O`NmrFzsHmJm^i}=EOaCU7Rq=4%JykU^`HB5-beuYisZ_fL$^|nwgT9TZ zU%5;<OMa<JlN*p!@iq>v6CX@~sb_pJ4bSNhR%S-GS}>2pne7pNwC0!&(dv+6?SU2U z+7)=ma4h^e1#EIDwKi?;p6d@riGjVWf<Sg1jv<NF-^vn*?BPdc=gHiMpAEw~w7a$i zFIj0^1__uwPb?op9-sZSn0SRudL&DPdFy#(xVS8leE7&<aZMuG($KH_R>of{X;vog ziF>qKCjKy*JU8cY<SWfNA4qagl(Vrf`FZXX+3%$Iv0>tMEqVH}O!26e?09UP`04B9 zkH<>#3SVP3_sVx4gt0%%BK!mxQ)Lm>1L^_S05LX;Z_#1ut&Coth+$)fVFc}Sg4E56 zabCxC;g&DkD6+Z3x`bIcRvK`E%{`n*$9Yb6UR=SzT}^_Hs$)mpia)x{OUmZet;Rpi ztG5tglVBafy9#m5f6S{?>hbf<?c$?-fd0qCik4N8>GKEl>U)<fk-x}hkHH1hH+y!m z2Kr`?>v}|~XG#P#V2HZmS}??qoisaQ>G9)TVlwMEiC7XfeDht(2%rZgNpfCmL{-b* zWvJon1yOX-U7jSM*Iq!fgj>MHtu*vCQnh48&xT2CcHyRoBc8S%BNvuv6h+vo31#86 ztJuWzmHz6nuF^*xBbiGR`#yD-k_Z&Dc`62r9P2%i>|3hLoQyo!&5aIEa5<cVR*y-E zVJa=Hh5(gH7uYTDM*Hw;5w}CXB)y(V7UxVN+0P8<x#3j|pVtdtmq$YriS3!Jf{|F5 zu`a-8%TDakGeK;4XR&k`1DY}_LvwNO(^OHnP-aiT<#if~SMWjrL}GYZxt0F?8*=%X z=&|6Z9osO%Y3x<O)gO)-b{hb_3_G~uewf7)zZO8xb1qgMzukcQC%*mdwWGMG$9#@a zH(E%(E7P2Tb=SAA3Inl~$z{{j`zsr9s6t2jFWFt~Qbf2vas@MzZ&j>YpnP|^5}k)Y zsg~hrem6_P{Q-M?8QscoZ1{?hBgTfWSxn|3=-6NjNd0f|t~d$#7GS#L!pi%oD4zz* z)5<m~5jMGq=6?Zt*DA6a5mykz{qdv3TGMZkZ_#*|Q9-mK^vwbK>yiJKZL!cwUav_I z?;IhI9E=a04{o0{Z?ckKYU0If^(4G@T=G5~|HEeiPo3@T0_H7Rh!%A^&DmaNkFz#k z#=SO<Ao>f;K#V*)vd3L;=*MWbt}#J0n(VTXWponyw4%-)EL$F2)8P2AI&fq;{RUo6 zwk>s=nAD;DANX=8^{UJ0K)kSo$;Kom8?sWmhxtS++#!&SyfSzoHsFGvi`?^EpL`}B z8@Dj=VC4(0f&VM<U`5c!ITu6!Djv(<CNDi#AwDya+<5LgapMH?`Lbm3$qA%m+2{<& z85%eT>dEI|iNJbNR=O-95?6kusCM+q7ItUBt(l!d9&_i5S3e;K+^MPT!hxG#KEO_m z?U7;pbsdU_mii=7at{zYTFAiV31aytq;R=n=*mrKC9X%{(l{5&f(|dc1<!CM5Il>r zgVQ7^VLZs&%j3J39kj719*Cx5#rk@AiWcW^-dSbrs{vf0{{|B1=uqP$kSIBDbt`iY z`ocgSXLl%Z(HuT3PkJO?y?NfPVYdP*r=mbt+FT8p?-?PEoInnG?&*6Oi=jK()l;I! zfcXrAS^G)Eiqm~N_H`zG4a}G1+DHDqVvsl{g(TG7s~H2gj_U)sxp5yXnNA0nOCh)u zX#ub%AsPZhbAjSs-85(i9Bq3{5#NxFb=B+^*q>K}JDo<`>AmPNx*1P)v!?x8RUlK9 zEzGIHr(nKE``=T}x1#v-z)=HzmgT}<<1@F-v6B{q)FgzVO5V+`cJl1=(S2I-F@J+S z=U|i{yA|Y#Tnkn3=JQEd=0AFVNW{zVY-4r9=NY#hG4%8v(({G>LtloX@j1U@t6YmO z#ya|VnqC@jm9%uY&n*UhdYl`@T59-t&(s&X$%eEk(McyFdK|Z(#-$*zeh)(b;~*It z!)u<ugRTvzE4ack(S1(Vt}N-du=9=Zn$hSt-&-@3<d2n^;)XX##;SN@3CtZ<H{@^} z_Q1)4JEGUjbI<XmcwcTRxbpKm@51?bzo!vLQg!(zUd98-#I-5~jeK=gGM>$=ba={E zr-?;y7_Uw&ihvAZIJ?Z~wY$*|!MHYwUiv-A6I!<fV~0IVUA`=EiQ*1w{TwD>#kDQB z!Kd|n*vUkmUmcV6<Qve`aL=TRn6Fcj6L-(x9-$+ee=me9Rek91VDd@V#y7~t)zd`9 zZjwv-hohXkh4jCN%qBAl?(fAEAv52NfGrqK!RorsK3ZX}*QIOz*_}QM{RR_u%s8T5 zJ3ySYnpCb;_iP2#e}RB8zOr^f8Y4L(JSCx?@uB(>xg+SH47oLymb}vnyUg2zBP935 zlwg<x>?OtDMvBT7^3dN#_0@fd#{S2J(1+KM=D+FK)?VCD9rHAE^>x6LVQUIuS4Le; za&N4a?jodi!@Z^sNHfH{oQ{2wOX>|;00bV#in>`>pcs7LlP!9I8EJgoLKV;`W--LC zB4HcNPz+->CbR0y*_fv_!gKYX)tR#zIYS)(bUziBQ|)ip1?J&KbSb45$d!#L*)x4L zDW%bCJ1cO#s{-Sm|6f$#=NCxsrhMlxR8CqTrvrHyN|SgPLHqGAl7{mznuf4YO9dWP z&~{`gEl8!UJWQvQhncj6he~>phuQQT594Vw4|TMGhemo3q3eW2axSpYot&0WxACxu zZsg$@x|WA<Yx8gdt>xh)x|oNPse^~p=p#IwNoVoUMyK#_Hl4`BxwM#vc3Qy0h1A5u zMO1^($ttmb2_%LNWaRxd42|dLGKTiz=n94gb95y`Z`UziX3El5j;?3u_Z;28&@VW; znW1Mnx|N|PIJ%vohd6rJ$?#nqZ(wLWM^7+xJx7}vTE|g{pFp1h%IkcN#q2zR9W>B~ zIC_zxl^ngq&{B@JFtmuHR~TyIC<W@IDvn=g<bfRJ$JX&2ZDrJ69KFHNV2-vk^!5tY z104))<*0yZN5AJNJM*AlaFl(zNzVf1b!Iz2dW<JTFm(S4C(9+0;s501Xoha)XgovL za#X?4<s40AsFS1V47GEVT@j}baa76BN{(hTw3MSdh8A(u$WRkUEi&{!RdGC@kq2_L zh@tU7d8Nm&SWli{WoQ6LCor_#!>T`tq5tCOWQKmr(P<3502CVn*o<9fE1ZlKo?t_e z9A=R{#A&5_!Gn`xd}<NONwBbe6uBWN!cST0qbLnQIzQ!UPFav`&=@(TjLv41zm^nU z^CJcb{zXF&`<#{eP6~ZFMHqsj8N~@wwvQBP2#WVJrT8e(h9HHX65*r78-h~(lsn6L zwH5e6#80{Iqof*wGX0b<d=#A_$l|9o`zS_35VH{NA>*xdzmH@w1Qq$2w)!afhM+Ni z%4#2_$Pi@pQ<iYbf-wfo1WqZVk1`6{Pe#?x2>rOeb1_;_PY%`(7B|$BEA<!DSd4JD zt-Q?jL%8c|xZzIaR7cRYJ{Cc^*%W}Tazq}xCc3^2aQ!ZyH|W=GQtH~))#RtGeG>y$ zV>wE#6Np7viLeIIm63Q@05eyUlx+h=aNfHul?9J&i%)AsLft6g2Lzu2P6JK=4g>ZA zb^x}nCU0#kie#J~iRCN@L;}(Y>G|r+uoY`q!p+Z7o^cIvz8ceaB@z*12`e`PKLNN- z$gWqD#1(7E*;mt9@cXOEG{%9_EeMJLR=^~{G=L2-7vNe$#{Q!y(pSg^5VivL0-D#5 z*Z)yQ=C1dU+y5vEwPBg2H(&wNO}ogr?TW%5j1cUwSFl5|HLOK_qS#4#eJU*PE}vC> zI%4&i?4e|jY&`r^fLV`W#@{Vu-}cNXmgu`#!j6!av66~5k+$uF6UOhvGKTA$7vYSL zWZM=t-;T0ux1?tf(`y4}c{X<KqK5+Y9y-c$+rl2+zDHTvTiC<f{U|Ga3wywnLN4r7 zogFpf3bDGuwYUYRyXQgG7qmD+a9}xPk0j{}uCTds@i1wAO+Q5WJD7_vdBU=R1cb4u zzWCDO7*Fw~hfjri$|t+?d&#AOOWv<&_&>?e*N2J^y+bNr&otlMz_xMhlP|Qo-iR99 z?Sy*E-6%dH^__l~OB!|*&h8%IsJr>~WN~vY@xGqoEII($W<LDBbt7PJ&NIVv+Cf!* zs(sioNwn5wAM}p}&ja6&oA@1v3+(gIz%uDVeGT$PO_+bDbJtRh4>9Q`Y#DHI3YU9) zW69p<%-AyqRYy;%OlgE8j(w(ttAN|tC~f{5<l&cHP;R){$IP@2vMRBk{4mIk^xQ6T z=AZFS_5reVU@m5-FI)F>40kmJx}@}ZSK%J;TSTca%KR4;QMpnK3%)}tE605`8V>iC zYu0sKZq7$<VZ^9ov282*A6Hso-?Gni!fV({;xRR2L~ZMl1cwN_?{SN*tiD?wxB+5Y z#!~88F46@SxAS;WS9}tWkLirpPXLBZZG_F@%M$O@TS42x9-d<A8!y8#`$&Bl<F)63 z1NYilYCF5EP)kniP{y+Y3<cCa2&u>}MLOp$xDF&{%a{xju`^oC&me<$_809>lW{v^ zL#MygskMS%B95JfV#G`2$WDFeXX`uZPpl{6E|*xio;<f}fD`>WrEv@UNWdM&p16kF z1FU`z=&p6>y=KWTbKLpVgPjL$T!P`Af_pm-_N5?>h~zZ*X^^ldE-v>fL6a4-ujk=0 z_J4R7%{rUi`sw`I&QeBu?3Ttz%>SF<y3?}YgED&i4dg*rpwP~*>zJ>EJ_UjYn_D9W z$VxL1tH6D#Z|RJ`(EF?-3*Sf+hpr>9ypbTjt|3R?=&#Vd$5zi!=3>MpS}raJui%?W znuiNs%#@ey&dzuZ7PnhRUqIk*w+bjz86MoX^X)S4l-9*F_x!ZoYUjNyRUuI2-4{NA z&3r8Z+-><foP&k4%Ps8E&a07p%U4Dx_QlqSN!3dtvJ7a;4+&fnUb6xbdKJzsk7c&{ zl)ebRF`@V#pGdDcX7vO}*&&`^@R2!s4!;R;EWBnKxDCJ9dxh%i@)%c*6$E_E{|)K? z<|BbWM8n~`=FI_t4>RQGn~K1Z4EgrWVaylN=dFR_=@w#qtDFUE-<l@wRFRu+Wr#~u zBz4bMj2N)@A`8CS`yqQ_=e}Xl>~aQ%Cp2JRI?6L#c7EXHzcD~=?8{5`Cq{W{S)yxO zKAv!5>k#<4ChQ*&U$T;QDm##*6+qoeZ=SS|vC=6k$&33_!+LRsc*YR3k~Hm4n)Rif z-vWuSH8VYthV(8E*;m**xX|;*5&qBqV7lMW_O~uJY6kkB?qG3MGMRNCt=I6oY@_&_ z%pW{;7f_!E?ek=e9dp@HR-qEm{T1p%+hEY&t;iO%;!g)Yz3GLnWcnmf?-zeM=m|d< zU3f-;ocX1@khc6_obP9qQY)ijfl0-<LtXdq$vykH2$x{5zyo|mu*dKn3@#A6<#gKf z+~M~b^7mC&NG5O3PbG&AMu}^WkaGtkMduN6^<WA=^oS+>4;hl9(Z$pTQ`}v<0Zs$V z*9=;Ls9*d$RuJc*H2D`eg!!+7kJdxe<2FO1`42ukb%&9`!wucS_t(f9uqZ$Bc7O4$ zxAv@lJ6a4}3oi>h?hrQ|BBu^3#3v6C@8P5T;3I}K9+@cKmqcQZ{^j&z6D2#3J{ou# z5<`;Sy)16sLZo;3`G=~(fBuoueFutU2Os<#BuQMENa7w<?D@8#NESFpju(d>U%*d6 z29XbsFA`(+kTH$-i-+GLuQiSmm&A}8jVt<p_a-Fi$R%VX*#m1O=hv<yn-PI!C+%Yh zz28RaPt4H1Q^kZ4hEVS779nT5MR*AC9-!B279jyJ1F#lpr~mv%hP_wl?1m3X@f{aD z$c|prxU^GHJWZYp-qGCa-%)~3{4mtlG0&I_9zGD?Xk*fKv(lk>r^0*oe{ysE!R{qZ z#Px67uaC6J10b8Jc!Rm&rd)DQabgj~x4L(5knjoAHJdgE5&ERn)9Cr+8W-0FoKNBk z6hEK*r=M?S7avsv^?u}G-v`$jxyJ{Wg81o;Cp`S<n5;n8H!}N4D}5H<rgllP5|SKG z=ATRm8+tb<?Gm!_WMayTpzx0XkFz&?D%p3w$G>Ethh8RMoiyjMiigvU&dv(6dv>*V z>)M+>)0ITifO;?g#W6)Ty%;m9(<Xpz<Oppm47HSp*J`oU!|T5TJ^K#(^pxFF8d^p_ zVFZ+EPbs5Kz;RSn#?NEXsRwqfUY9iZMIQ&}Uj8TUr~iZdQE>MqQ`dDHT>PguhX3ge z&40X+wu7u*mn!}mNe-^lin}(G)^)=MbwDG+pz6Hc|Ne9)hTP0?HOsJ61WkfnfOWz{ zeqWy>4xLF<FU??1pmi^4#2*%s4-p_1^s+`wok5H*=aZ*iekBSQoGkWW*FyX(KrGBZ zIb}b+LMCU$lE(R}SgdA}t3WQg%u<CT+?6-j5mBAzX{1j}B;3xvcTn3WCg1x2=EmQi z<eCG1KCU@dAnMl~!=5Cs+2;p9L69UzQ~*;5IgVtp$9sG3bHs{bzYenC@#~nIZ)p0G zCl)3I2H!#jUS7B|@Sb)cL!Vd}&QC(wC)HN^+h?Hddt!QE-c2BJPp%eAULbEg$+f|^ zPjYRL_7p!2EqO`-EwE?)Q%}nRgWACS+oCFzGI{Yfarti~;OWU?=5OSIrwaq$`!^E* z`Lsf8xIxZ5{h(NWgJ_)!F`$f0a?YGOeS&|o|NXE<`1{+~cL9dIV-cPLJO?-fIDxcI zi@dBHGFjo9Kl4lYEaB6mbE<^nl+$JGDLFL!?&7GX{_*L|5DIa}2df+I_x7b<KH4ea z^t(sNvL;0prsDik5BoCS^9yF0Ukdr9jy4=9$?RH}Q7PG#EJuFFEaL-v#3;Lbl%r2+ z^$oAQwE8!1Kq-DSC(2ipD>N9Fti4fm;@zUAJW6^t$BrBh3eyV%x>C5Y_osOlOB4J& zViynH_6X+NXTH`lFEE+Y-0$9Wrhj6<gfGKAO|14eeR?7w8+KFI&IvcoD&-DMb$2=r z>ka#4kHusDdZ+TE=FDWgQkSn|mn<hkJF<&_U!d4V^_R#WZ}(b35>G`}gUjcEW{m#Z zZPB(OR+mz+vrovGqxn*G5BeSRKVU(XGsEryr~%MctzdOvzrA(QC0WAj3?O^5kFfgI zwuRRm!g`L6b3B2dkj<wO#iA9Y;Z!WU(0S=p|72FNwnB_Z9FB5W;jWVwugkOeqQ}vK zDu#QA;{7Q2)V1#`V&k4eN%%Fb7O3_I>Oy%DZW+IPI`@6{HTbUg2Xv2`jTWL>CCka> z_m78ULR9K!Ysf!7u*A)&?zG?g0K@Sv%<E|81(uZ%@pO9BlQ7z0=+acA`hUnErkdoP zP7MA1A-tKwej;S!L*${;#mu-ncsgD*wvh9ulf*aHk(;OW*^jZ_!3NG#fO~FF_kebY zrc*eLeP@MA<KqncYzV&<5(?FU->CV9%si9UlO4)>Q_HzJ`Qufx?#zJRjC8DBym^ti zl1^p>%}gO@&tybC?n1>|(Fu+Ys112_H@fg?((^;Jv%9TJf9!k^&A|99LC=@sS{CHP zaM?bq+Ehw^D#q(LRy5qPPc(dOFTw9J=pPsHdpB4)x3D4&K{h-c-KiFete#U&Iu`Pe zhH*{BuEwXt!)WjANTb;3(9C~8eKkB$PJ6IJZnSVLeda-O{zJtu?K9|$dUl+MSH5s7 z^}cV@`&0+gIG*Io+y1Uw>A@j5Njp1CR2P%7vjfx5Zo{hqjaKT#O6;%geb{KTz4jh| zS~;D)jBGnQXi&D#!V7m5-;n52Ff@Dvhuf25kc-~YZinXRpsUA_n`ejiV};Uh*aFY* zhSz9(tt7^CLqd4k#-(KLxx^TTU;7+~NoDMhM$G~ra{2aN@{e<gqHzpqI_D7gd`HY5 z#ft0j@#9A-g%z@k`7;Rter5@Cq#r#nlT{}a!$5b;Bs)G*i|fviZ$28D<ma6LUiFvP zo@ERp!RyimIeO5+Gs)18(~38ID@puZ3#)5*O+G5X7Mq>Fp;ahGhi2cr$bQ7F=q3C% zTmR$ki8t7Fl0o!?8LUutG)F%H>f7^?%^xR<S>KX|kB4BI{PgjgNe8eP`q!KLfa1at z<ScA57DP9G%WBFV!)HM<=aNP~sPgK*^ckQPZ9$tc>Y}|Deg_PDKMVXc^;3Cg0-N?Y zzefN!3*X~HS7Mp-7tubAi{un$wA|g#jzRlzj4#p(yy`b&N?fMhDB}*!m1hQv*kA5W zB!|x@WE%N6+rO=XLFPZj@n7Fu{yY}auIrmIv}qdo>XSI}^=ahRC;90+N1<ePDGC=d zw{X1cLgvdYC=~;R3z>y2r0UaQ;^1>+-KXjpXs$nB$c!pw`_K~l^=0zir-Q|}&XVxY zQZcXDccxIqQ}|C|cyTF%4bWJ)==?u+aIk=hFFRMfZBIfPu21|5!;OocgIx{|#ohkW zQo6K~f5F2x*PZq+F<!vWp50;|7IuPI&$^@$UE#B}@Dmbu;22{!DafZ6?g?1YV%d{? zu}S7^hs!@WNG3!B1^_I8@qjA8BEWjUUcmc+7C;A}H!kE10B8Y)fJ(qzz)#&}!X>~~ z;JX2b0iOb{1MUDK@GZswKrWyRU=M*8e;JV7fCj)hz$L(sfHpu+96}5LC;>*mSipS% z8(;xo31AIiE8uOwX~0FmkAQYS_ii#F1uz(p3n&Ck0?Y%f0Q|k1Qzje)@-g6BKs%r} zTA%<Z0cOBhz%+mzPy^TucnfeEa0|fxR-kqN`FAV<eyu!<@ZEZNXHQ}0^0=jorR6h= zFfS4DRsH)VcK+}uL{}(gEE2$rpMk{K6(#LC{-qe^wEhGqGsx}#vhZE~W`x}lpM)?1 zp@6U#g3Z@00t|d%5<={Q1OXx35yDC@?vo+3AcP-AIQJudX%68ugv<#Lfe=?zgmc#{ zLJ~qbLIuLj)FLF~i%MblRg16?gp~-NKxjw!B*IAuI}lnCvU)48?s@du+3tIuxS^J% zO?>o$>9c1k>~j<|=FNJ*K1(ro-kce;=Fe9+=0Ei4gNlJ^v*!;Khzy@DuWXAV548QT z$96MKRKUKn;toatvKwETUn=&Wo7B?yO~LDFdwZ-8{$)!r+4;MnyF!E3;Iduov#XN( zl89T;Vn!?(eJe?pN~-ZpBg=0kiRl*7<5ohzo<Y8FKn4%RpiI*Jc5LwEA#kV<A%kxx z1}s<kQl=P4HKN1ve9=Akkk@Y~hF>0G5un*w`-Fduu#h)y$MoD>aF@2DfZVwqD-Ivc z@)AQ!SRUaoj<+y={x^NRg@oUV5o0EBj@aoekh#c~jwF#RVC>@68Wa^9{I_Q<!mei- z+M@+N`YQnZ5$SeJz?pTvN{xPrXEJFU53*k6ug~2}-oKSJv5B*HRkqn*>CVdk((FrT zc@6=-BjB#wKFwsf1sw{F(D}C>wES<-!T+!8iK72odV(C4j;~!UyV$KjR%)Mz|Dz7P zB$GWH)V%?&d-m6$J<we)OOneX*A0%4-Er!^w+Pt)B_I=!4oC$k0MUR5fE>_q#Uiu< zDBu#{BH&ZNIlyT^GvEZE0dN>_5U>}p8?Xbg6|f$#60i)w_?KL<IE95k<^pB{CIhU1 ze1HXD1SkRVfCvD7xkTuc9y#Iyp#3{k3_t-bfQx`;z(K%vz#703z+Av&zy!b;Kt90u z-7K^oNG2c^5D$n1gaQHq9pB;{4R8@~8qfe>{Cg2@2doFw0PKKifH8o4fDw=m5CGS| zv8;Qyk1UEPFBaXyKJotZ-~a#ky1{*9DJphI-1)cu7YtXMMc8~3g(Clzu{g}||84jc zdJ}(3x|8IK=5<TD%K{HH^pPn<r?M4q0odCwgG&)am<I3q8w!kpfRf@f{?Fj_{{@0y z0sj99Sj}aEpyQbX>H4#Lf<ga3!+~{uW#5X<@iKv8MY{rtV1KO>WI{S}>x@U^HyhZ` zGj+xrDzU8Ni;K>9`b?y!-Ho4fBaW4>GkxL9;78t_@s>@9qnysTSoF$)TQRckR%Z*S z-Ic?Y_bH!L)+zTYk1EeAU(f2U>aUuiny*@-dR?_&byC%#>ZMLlYt<vv73u}*I`vj{ zllrv!Cv`x!BHNTbCi}kZ1=-H*wb>i8U(J3i`&jnL?2Fk~v%T4Qn(>;Y8n32J(@$&C zzNp=*{XpBH4ay14NzO6ll;$jV=IqKjniHVY>Xz!(=?3eI_0{?(^*8nVa^J~4lRMHd z&M?z3$FS0<H`z>&niiW5n~s|<nY^ZUQ=mE09A_SE)|y9{A2dH<e$KqX{Hpo1Su*#` z>z8N9tIk8q6oRk@L4opu`kCyu>_4)TGy^nmYEEf}=j_XwsLR(oPv}qUztUgR%X06} zeZ|nvXf%#8mKmoTA2r&I&l;aMzGQ4MesBEQILI{Aq%j#xyG{FfC7Mkinm$8SzBB!3 z`UTaIOd;lQb3b!`bEY}VoMRqt9&Mgze!yI1USNL8ycE^jXx?hxV@7S1P-6(@D*vuL z=2W&QM`TrHEz2TVr?NiJ3Q$F=l2z#{oocjdoaz<To2pgnzp1yYud4s0zNHq>*%jG~ zvR^<KU(Qx(LbZ|FOzlYRChdFLkF=L^!gMjZsk&LZIl4goaQ!5GbMEK4eGCf2V1v#u z+;FeKX;^MpW7ugpV0ho~lOfG`pV9e<@hjsk;|SARrjJa8=JDoP=Eu#e%`cn(Za!rG z*!+|Eruh$Za9)qR*u3HB(x>uX$on!+um}QFzA#uhPkB=5&N`4aOf^b%pNgm&Rqw0L zt1hXi>L=Ar)gP*0b#HaFTA?1GR;hLB0`)lcMD<klQuT}K!|D#Tn4Rg&R%Pq5$7WY$ zugDJ84AQ7I4{PRY4r`8UE^5Bgd=Jt2x29bqX@az2Xlsl%Svy#()f%)GZJ~Cw)~cPT zy<a;`J4aiq^=OH9y>_#9n|7!6E$ty~v-XVk6YV94o0t=u^LftKIW*_zoL;(Tb<gXz z=yvGd)HUkP=$s$xe$ut;dg^2JYQ0%ss4vq`)<2?uRli$*9&&M0e@8zk_k-NaxvjZ5 z5XK6F-LS~;jA5nWgyAE@Rme-Wafxx4@iXJMMr!PB(wWQ<)Safirc<T}bF8_*JO!iw ziutVh8*_M`Do>X;J@4VX$MbgO9ms3U>&O$FRv3#Ah9G4x<yd98a)xq|a-;H9<r(D{ z%CD9GQZ{FKv)Zy^R5sNcn1~Ikk5$)r$12nV)fV*x^?m9Yn2;~4zfccFFTS8Ts5zjG z(Dl=u*Gu{yh7`j=!!^UBrd0D#^S9<-%(grSn-$ZT{t|@I$_Yy64W%k;X4Z<V7S#gv zGWDqJ>)G8kO3g~mE1I`8XLaxCD-0V9&B)}832)35gslkDl!KH*lvzrRQm-^YcPvo4 zl=aFZ%Ab|}vi7N-S8rB_Lnqvyy(#;(>^<2hvd=;j4Ae~2tk!JM?8UH#Yjd={bE0z| z);;Oey`?*>`$H$!_ty8<57qyaE9Hh5!VJ9)(S{mG>#K$j4Y9@nMvXDoc)xKPrsQkJ z{l??Q_l+MLFB?Nl;ig#A33Gnl8+pg`zRCNQwQdQ%PDc=|j8;xkE>sq)=V1PvRsXF1 zT^*6F$)1!Qq>0uhXoqQwwGU{W4?;wS=M?9>#|0!pSE2j2?zrBVJ2kgG_dD}%=8(LY zyo|h(ycy`a=ki|4`)A&fytBTdR$?Q7SrDY`hyHjy>r&Q@tlOBta#g4*LZw$tRV`N4 zsFtZ#s8*`hsMf1Cs5Yy9RJE#ZsM?`Vr>h@9f4|^VuTig8Z%`jm|DcY6GEL1+&(6$N zW@lqo7_*<ntXP@7CVPE$qvn=I&<1JyXj$D;v?|p7LCF1n?K|4vw70afoZy_WoZdN# zoTWL-b5`a=>jvs_bY@+lZolpw-3i?%x;EWyouKchpQCr^pVAZke*H0hliqnje@Xu@ z{crl)`k>q%xly@sxoNpWbM?9A+>yB@xs!9(<-Q5|9|xT{+4z?6pz(z9wy}@t?<N_v z*$MOBysuc7Zx)1n1X|_Rtfy6D)lT(V^?CJg>eU$g>)BP1rrnyWnpVy0+Txstb5`a2 z21TXT-D|kV_=(fh*L;t822+c>1z`+=LgibK^tV+<RmZXLys!FDHBLQQ?NAep;Z60@ z?C-PXniS0l%`D9>%}vb~?GCLKE6C&e>A6qlZp!_K$;<L;5QJq21}Gm=u2mjTZdUcs zglqa}`f1`cNt*teff|EmtY)gFM&o=}^RebfO?T}SsE|$Cx3xcLBXg1<5BKNH%-Nc= zKj+^${dB3iF}kOrI8Nve>W}C@%)Orbq+yHUGsE|We;9Wf4?!G1Ge(*ereRngu9{5H zqD!$9ZH24|=eW8T1}cXt-OBySz^pS_|59bEbJZp4O7&v(+96mn&uPwUzSMjJO{dig zEl{nf`J}ALS<^5vZCUoLg;_hYc4zH1A2zT1_Ay!dOeD7o0?zP-8|HTNvb^ni&3P2V zAmjZfSEehAlrxn!<uc`VWwVkh<ynzg$}D5nB;>R{Yim|RR!f$kQm8Dd$*O6pMOfrF zL$kD~1So2YdYXE+T8LP;V4mz^AGtZy9D#K<+8l3Im{ZN^=FD{u%$N0voNKn57n&ED bmzZnJ%gigxE6r=n2Ug9O%?XB?Sn~e?VB|SA diff --git a/pipenv/vendor/distlib/t64.exe b/pipenv/vendor/distlib/t64.exe index 9da9b40de922fb203df6b9a1d0ad4139af536850..325b8057c08cf7113d4fd889991fa5638d443793 100644 GIT binary patch delta 32032 zcmeFad0Z4n*Dv1FFzmx1GRW?*DGDlz3kWEqgEk6^`-X}EagCx5ibgXe1BsK)JRz0| zYTUB#iHUg>%_6t~uEZq9#HeI%9FrJL6xaEEPj^G|yl>v$y?@-#=l*jiY0jxrr?yj7 zr%s)!ZYy?rRD9%7ouQGQ&Kz!iad6?vpJ({Tw?7ucv++M6eD~<f@keF&QvB00td9Q` z@YmRF@!tXdJVPJ9Pp0ow`8TSN$#DFFqWP33r~dM@Ij;CI4{lxedj;*Zt6WTH&X@P$ zxGNyNqmUa1vM^0|?*N|T+&QkSijC3?O6slQGAMedhI=U=uv)`K_$vI$U!Gg6;kaZs z(An>NFLw=B%Re0*7TDV|x7-4jUoYcN0G=z%hmZ1?22nDkNCmjc2J&6Kvk(eGD1pu5 z!~EskQ3~e=QU-er3@Do$kC4FL0mCRapqx@kZp-eod{52O$zB`WyL9#fi>Q4yMZ*ax zhXs3OT(TGI=^mA2<TxockmGV4=D1|RAruM@LyMn=lUzEE+lS0Km!bMMjyrMa#EA{= zk$g%D>*}7+{gFbXJKX6%)4fIMhKF{fi};N#aqrS|_b?PH-WJ5)Q_km#O}UO?clDn% z=ZH;GRcB-%HVRdBmPk=64rA}S_tki@Z{3a7?#K&ODIHHBFEu;mhg|#IRH+axH;E^z z>JaA#7X8M0srkOkC5Yfb6Zn)fQUKaSVQFcEg5(xmH90=H3$`kkNiW>ZPN@^Wlv=w| z;goD`oaPhs4Kn^RM?7g3k4jo=2*&{z#AfLyIs0=|$*!nM3l$SY5tCj=_C>Co#Cw9< zoz}-8st?hd?&5LjIlWRaN<~NBsY3Ox{#EcYwP3S&C`bHK5Kl=pU5Nh_Wm=BX&_FBN zH^vujM@z+X`e*8;EBDY!6fcOMnQCcBNM~P@%cq2i_fZ8Y55kcfB4%He1(H(dmVHG? zY(s1BMJcV#6_27&G);;{p(qIj>6C(~l*dqtNxwIH4p)+{4FtVRn<xJ0ETQZiHL?iL zpe3I}gwmV;Ez3`X+q3-8=kImzi(JPz3KCs;=@*uV>Gx`DeKf4vqf0`nR31%Zs30yY zM?9P(&PdJ?R{(uT$PvFc%{I+4%`_FvJamGc@`&)>fGH_BdW{^<u6snM_Q&k3s?%3i zLKFvRoI2O#I<!$#u47`{e8G_$H&wcWDheV6MFhoj(#1fQ>)E}}7fwoC7M6n1XpSp{ z3_kP5*nNQd0raCzt(>KtN~-o@FL?Il9ZvRzXHWkoBo)tr$<JkY+sSlZ@%|puG+b4a z#Vh5E%h1osvb;ijycJh*6aDqbl?rdFfwl&^LY{b4rSeR?m2xJf$z@1zvbA2Z{PoN1 zd9MiTHZRoShubdKBY9W3WRgpEi)mjq+O{9LAe-YZ!vd%bRCfbd#YSl_m{ZPBTT9K3 zVe_RIXJPW)aJgI!=D5=NNaHeWk`qj-6TrTahO)y@rX*g2#QXjPv&*o8m?k+UH4@iS zg{xHInnYZG$XsKTteS~wl)|(~VM+#*;Ltq};(qFjhU~XUkGTwqm)X<a8P=*+S>D+$ z!+BtWtvD_jt;j1i?IU$+7VA>ZnC-c72?G8Y_mN6;8CHWc7d)u&4NncGYC}qs6miSt z63x_~6XT{rAa<?K6b+colr8$W&}Dd9MW@=1p=IF|>Q22+!y+FwB7(zk7g?eiCdx0D z=!YbZW13Z+ji!mF$%5^QQ4lYK?^o~%;w_h<7y2Dq$n*&-^@+5emG(h*VHAXd=Y8UF zjP`OFu9AdA(iLSVgsSrv-yFA#f_Ss_G<BcDuo;Y|T2kJEcv>p*Zl5jBgI-mK7Lqz! z-$t@r6{6qkUxHE9$t0eYa#5J9jFGXMsviE0L`Wh{BYxXaUgh^A{z#>Ak|16zF{gF2 zgzESD5?!m%eFWPf9ucu+KdTW}umnKCd6k&$u@p&K$#_RrMX2|q)Ihm{P-LVgtUqFO zm)M5Jl=UdFrN=00!~rfefa-LVqT92wHSknF<g*(brdlXRx&3o76HunH#Ac0(v4sAW z(Fc^#Gjc}jkP)g#f{3REWGnTwhEncMJxKb!jZ(5g^`n~W_qvrpN<RwrnJ5K{MGzOq z34(Z35G`@}g6*&oTBTt2Awk44IMqwAmj(#-@{mdB_DQD6rYVBswNwlWOx>NHR8Q^L zp6ye&+JnZW^(T(A9*99BO43p-TF2P?PEY;b(}JzeUBCBJnj2d%rc-X19VK2g5WI68 zz2;+iamMC4=JU?ZCh>Es^Exb^XuW@q?FdgAm!n#L4<UJtA|cUwL>l4&lva|Kc|8Zi zwb_Ea21r0hIDLVA3rw(GhGZS3Solj!_WqNPYS*V}xO^!Vg2Te#Fzok``%Ya&lA#;~ zhmCR<#QQ8k*VPIxEKm-^Q(&aNy$+R-wSJHQ#h9OXCs#a#3h!&ZN7M8))JlS!)^3h7 zx(pxv2wiweUW^~1Vv5=*gZGg^iQTd`U%<+mUy=r~TQHi@S)WTwFtyQYZvlg%ByCWc zRARLajg>1}O66nxTacW3R+iFGg&#fJfS&b~dlus^T28S<O)>26DRlpkf~G#k!D+#6 ziOZ5MVO-1UOQ_CP8rLmU@4L`)x#HTmEb&yHERj`|KO(+I#E0^zb?DBc`SQa3xm2yC zwv?^KvXdzwCx@dDV%<;jkuqREs05ecH2Tmnp>v5TZDL$hS<f7(YolZOuhI+{YGM&} z(hwxVSjoef6lEDMLy;x+K|;|X!SM`bi_9u8tnwW0W|!e{WSHxCnFuL$IkqNOj(89D zlp~)nx%O_3=n`&zn(Huu+9ZeXvZwufS@)wf6!oE2E(+yj>7O}@X@2oKnr&YkmzoRd zB<DJGpMnG34nqSX%eAkKOUShs#~GzG9~v~Lr5q@n6VQ+e(nEKF{nM|cMBlX%GXx3J zb)R;F8PO?7sILzwxmoXWmGRdP3H40@ttUfSo_>ICO)M4+4yFSqdB(D*^znt&u#iZN zVwC066w*lpWgZBXa~^==>RlEi#dah^s}!9Ixk_(=TwR?~=j;m0GMfY{n^-etTd)mF zhBV!S1qJl=--{{yA(d(mk%q^x2?23@RSa7lkYpvzkPodOc;(u&LUQebal+&&%93y^ zrB0}7(mzv&S{bC?$U?38htV8oYjinB6T=yq!E#A(VC+jR=%#8CoExepp34<a!Fr+v z3|i^5beo7U*S<u}_E6o0k)UQ{Y(Zr~Tg{#|1Ex1tlP1fc2~>$(v0e~$1%ovl*&A<t zD-Xgh(u=47WactV2L(!KbTs=iAgb%>-ijzu-|#eZT$aTk6%r>Vp|3PKngw<0&QHr= znVq7B+=Go`2^K3FA(JZY0jRw)FDYX%nC))RgYWdfz)2hKWeFF=;Q``$nN7Ee*u16e zzHD=+g}S-M4uM?j$^rtH1^$K|66QY5K%;)|Iq6acV}Y}yMx-Lysyx(U4O&(fCe1~c ztD?LX#R57r>w|-6pf;5mrJsjk^@AnHN$awJxd=Oid?5w_Ok^2A65B2k<tmMrq%`f% znP`kZb@!i0jdFC(H>Kssq(R<1N%J~TEDg>;=`^^c@g2mJIu;XT&@YSBaEG#Fv5VRt zX0j1M(boEzu+r{g<0#m*&*aIIsVvTs??u)~<VzHJNr^01BRy-wP-ExHlqL*RH{>YT zgEWHeV6tGV_Yz#kp$$dtXfQi}qJiNs+=<X|*$%-iM{LY>7=FikkR#R!^=(?)w_S7C zPeJLK={X!%ky$uU!*Ld^baI3&i!-^%+IBdhbq*BV38mJ8_!aE3smM@B|7!UV!J+-8 zE1PBL+5gHY^vnK5A$l5PGoWQgqdLF@SNs>>V7u3z{A=5-pmm*X^#RiUQS2i_Xvm8c zi}C0%G=)>H$CeL3u6bSAFNUO^Gf*$IDjB&HfREDZ^!t~G=#fF|5Q=n+qR64&u;IbO z2V9Oq4k<?jdp1m~8+jt^m1|UE^m?##B>b=WISzR1QnXe|$zvY|_X&F;7aFjr1${kK zT272TA!1DAl5pnUC6n(S&W3l%=6B|@C%UBZFNd)sT@oX=j+O^PAjXRzo~5Dj3ii0- zIj5gAGmPm%;xzMEzYviZz1V?}99<zcvg(NZC4?D5hx2v0Y<lRU{121a#n5FbIg?=! zibr!{R?N>4&tl4jPoe^pEs3qoWm$w?OO#qy!g!{cUYW}t4IAk>7S)ipbYab5Wr3fe z7o;fcI@E&R3}#cpcj%TwEvXcjfw@KW8CEh8(nOX$ZEg20@j?-7@MA*#EiKlbhlXo7 z+jm`sMQ5hwxCJ$$yF=qJmO23m03wK33$&AmGfPD8%%g^mj3)rKopM#(DC<d;+l3;< zb9v$kRX4%7J}n(bsqMO{s0+IkVTujtjx}*_lYXze;Or)wq`6qmvS@xMo5gyUVdNKV zcx1BfPi#@uCOjI%Dk6`D#r#Ei7iK7wW$Se6(ZS<cVN@tTZx(wfDuz!qu^mx){Myd! z*Qjy)^D~*LYd`+MnXIg9PrgSMd%o*r-7;8vYS#BVF?VAMYZ%uzbQev!Dpy&EbOV-^ z8gB#2KAyTV`jO#^{GtUhyV;d@uH1cnh%-QNOlroaq8O2E;<RUV`(ZoeI!0(9x;``6 z&1pSiD?zcP#^|e_B&D+wA$v$`1>p;UgYWwSHhg+oW`e?3)?bPOX0{uhJ*(<g_kfk! z2R&96DS3k2z+t`MoW9a+7!zs@9|y|zO*gXNCj|M{(5A|EI28(?B^%?C)_3sH(z4Lj z@>|L;c2kJS)~12ZjUqedo*UMV<I?stR$)x>pO!~vT7x_qpP9z$jk%ep{X5Dk07@D4 z!p_9Uc1L5;+V1dW7o^JHU9M^vH8d$sOTVHLSbKrFUt^=9yIYTCqMHOUvIt!dCyu_- zhE`yAQ<fs6MGgVcQYPJ50Fqss9}V@lcRo#n)!x!i^9U)Mg0$T!Es7m2RNb(QpxwpW zh=dU$h`zM6rAXT-oZhf96qFKM#_G&6y)seVq~A)|_2_XiFQsF3OgRtBr0RyU0(X`w zF@xI|@c<nwiRrWOQVDfpp@!@oD9#hoJ5ET|?m-JfkkkVS)FmJas2Ji5%M*{}h@X?; z4y8gB4#qgLxaHcr@52s&lXxtF;vuI&uouJ`vs0QdLumIdonfw+XcqFKF<>jqkM>$5 z0B46MCh5sl^`YG12Vspgu&%Lv`xOqPMjsd3q%$NiblebV0F>-fWTv&9b~!r>;^+ht zijS0VnLQd?U|9GoI*7OjK$Vcpj!VD&%C5)825-S0*W#XX-kB;CX`2whkbLYION#4j z?E-5Fjft^k8lh%+tP{QZVD*fkCLKy@Uq`)rAh@`DJL{iVo`{tvRt#BA+SI{%F#*A* zrRW5y2+`<bpM6lhoD>Zfm*My^w8*aQ55q)ycL`^uz$RoTz4@TZ>_yCfCQ_ygRgyQ6 zz5_{CB~l@3gSFZd5;+NCuBaUniv`}U9SR_b1|xu|3&vk5^CuDo?L#mUDM2x1yb6Du zUBouqT5DVu>=)3dfpkJzioeyQ_zPfm*$yYqKBSnii_ji~6Ixed*kf6P1Hub?zxAjW zMENx850?S8Ow_KW)d~epl~?JBa2t_ZLHeSO+D(?K4ci^gViK@RO6Azs@nO~sq^CF- zZE!N=!gB$=ij){VHY9V#2x4WNT$WwyMcF&lpsfx&Q|&X9T-5$5=c1iWt2p?pscv>{ zA*c=7fgpOjb9D{cWMG)6lt4$$QxrRMJ_6oqpwwj;*aZEh-3dlT-2P}hZ8C#FM{8#v zr+(BRV9Q(sCa%m!;rnR`MT9hxBMN0<s<;ei8d-C<eFIm*0OKrCNKN*dv<aN(K>z(p zO-{NDlR*|oyPIl>k|TbR<91qllpW~a!{cfLI?T5FSGaols2;oTCOoGJSPhz~)=h54 zXOh*zUhUa;a+XZg%EaRm|0Ddkb!Ftk@X_!8Ezmgzwn#6Tg$5W+j!QpuumpgmlQR)4 zF*1v%%JR_<taswj0NV<xufFOlcqm=kKZDu2#ED(s>`RKqg`JNvJ#+kUEO?G9l&t6` z#2J&qt|lf7TAM5<tlTB%XK3w5eG_yIT5}BsX090dB`7(f`}G@HAvTL2T)T+6^b;GK z6g*e#N!4tG?-==Qr<8WTHk$C0$adPlQ%};Ke25Ati-M8+kes>SEy?FwIqOA~wJq)r z=Zr%JGnLpjO6xKlI7rhbQs~EyCH3Pg`mp;+p?qr?3+)x_mZ8<Kp}j&vbLOLmi{nbs z=eco~lryH<jcA4R{s30mD?Bt?&f#S^_7IPKGgK%BymX_Mz0@m}-&4yj_8Ktw<$4-9 zVjIj8V3d#GGE@LXl>@K}``PZeFpZB(#q+4G4%!0=DJP^!7g%2J0bM57VJLM`h7!#_ zn1sGa4F6j&d$+e|r%|Esfrww_kgmIi30Th>de3*8<ECN#`iz{n1NJm&b8i@)E3k8R zy2JSrK=~Y!x24v<#ZPIE50c|mTmCC)#@~2SI(T3~4nqX2?r?VvYw6RC@yXrz<$GAq z<k42Be@x1?aRH`@%4YB@DJWCc-XF`OUSC;5V%*e1i+EkKkbbVr9h$$h^q4{^B+4pz z9=Nn`bkcA)hlgNpa%Dxij`)6EFbs;?a%>rQxw1sr1uA)T!;G=J-=(&O2yE-z2i->} zKuS7TxsCNPZf7_R`=vh7YB7r~nC%6!Iy3{LxnGKBs+9SS%O$PF;Le7d*q&WW%XX5q z2n5ysXu}phJLSANb>?dpUqQ^?Y1@fu&fO#PEXs$*Qg#Jx(~f8B_a>OcYi2u!M7DU_ zB%aB&Puf+IUG<AqU-_Lk_43TxeN&MZPF*a$X2-O~99tcqYtOF0dcFPtSm5V6QesLx zRFYo5$^exScUqz8KgsoKv3;v|!nb)$C^{$<VXH)@td<9d{*xfS{<b>`UMwEN-nudz z^W4>>uZ%^OfY!=WD&Zj&mZ&gHg>4=(V^t5e4*QBx&GdTxQ(ma3zA_H_#@1%iR~|)g z*xH8cE8)%olC7`&Kt@LDE8mon3Hr+QNNQ`Fs;^vvw6?bCmM&IX+Z=u6vob@Gz7j*1 zYi$Nsj_8qNI|?5H`5hs`bU}o_5k^qI1HDL0`u#unt_aZ|;ILjyMIvr<;=xbb%wk(B zjU%)EKq?7)YO5U~m{1FJq+*Usb4tk(eI{3Ib3SOgc{th2iXr5d?fX5JA2WPDQMFKI zR=pWxNi96P@_T*dBPaodqw55{IWkht?0S{!;cn*CU`w=qk3WpzYH2Q31Z0ldx(TkM zCRYRgn$ultzs4>nORlyVPOMUW)k>&i(~ZMf`pRr8<~B9POK<Z7T7oH3C?B5y5n2-| zma3`N5@SFXOM9bb@swb&7NeS$LF;Abes;Ba{lvHhxkVK;L2wFCO{xfoI(f~~aF`Qv z6<7Nsk5UaR4QPN8dv>$nnAvQSOKzP288l?CC#jZXyE*qk3vnjfi7c5#{JqMhw~t5v znGRrNz+8vqTAz{obFTi$E=VgUNUwkR_`U^Tz)-1lLteJFCHl&G5cWY#GIcv(maVPS zGRxMsLSMNSM#R1{HSR1imMej(Lx^#h-gRc9589hf9kByvZ0z!puGaNnI1m7Ci1LcQ zmewf<#bT1JFw@60rLrr3SiREZ$brYaZk%J>nGzT)g(SKMoJFR65U|WKMrHQ>(V3GY z4(S*L($|Yr!Raf1#?~R*G2KWa*TT=}<YVoF6lr4cK7si_!ZPiH0^pSF$X&%gIJz#t z*JS(QrYZfNNk8@;28D%XHNcy}KK&rYdKw}kQ6?S|T!%1@v#Wm8SKft!sj90@Mot#5 zR5yL469jlYFt*mawO&M^tW$|Oy|v7wcHQ$(PNwu*rUNfh;rg++C`(hi>*4N?G|WT^ zLeXids4Jj7dl#mazOobv;Q11J-892{Yj(PsMN9(^ESw*E(?Y4v0+aredqU9-B&%~7 zVryC7#<X4)U~n0}+l$;V`Z2p)&2rDn6$-;T7K~gb$V?yaWi^e_)?*MN%>|4=nyH^d zkeCJ<i@p{HCdNKa6I*{!h`gn9t+AcxDF2qGgVP&T6lAg)*%ajBaH2dr@uoTPCbID> z3zT=$Vk7n>blj0K3^fzWYSle<Zd^biV&Kt)PBBU=5X;6k1zT0_`zG=S!^=0d40_r1 z@Ek$BV-jJsWLGOwC^xQHol@{Ox=u+a=gT7^w@9k0dpMpf({lY@VuJ5KOIn5Oa@_9G zT)Sc2FW1!BLk930paNNNvC`f@&q|g364Fzubpq*-h@@0?qdW%fa<@dtocl-|YGucR zOBE!!Q1VS5QY7lCbZR!nJ8j86`wV72dKDScdZTc)CL)#=6I)x3CEv!%k98RkDwFnK z@om)ugRk97UX03cz*2_;mi<@!T5o9NaUIb53lJ#G))sd7ShV#Mgi1`d)ReMdfO5W{ zoW>n#{?NBRkH}hDE|4RZiVYMbm7oM|6eL}s1T7S#>F*G}S19v9ahQxsfZQFxy!4g% zz(~0-hRm&T<+8ldo|EmGK{`FOLprVJp&3xIdp*l~n3VY}w@g-?G&e|@k>!uQBL!Rk zpbGdMAo)1f3e7U@JZL8IHkD~^JtbF|GE6dw9_-@rPQ3~+k?ijhlkJ52&2t#W1+6;~ z#EzxF;a4H&Wky~^x3P#5$=2!M^m0xlerFE;YMjGK%AU>)1bF8_)W&R|w4O>Vr9TDq zN5~exqxuj&)fq?~i$S-Tj10wUkXuHk1{oIms=hd`GN(?GO_RKxw&xpBrF(Rg=kRHU zk|K+8<Y{){MC2$k{9vZ;r?cafCde@vqX2Wed^gza+DD*fX$|h-GRy)c!(&AlX7(ab z<%=(-eORSre|qX}_DOO-e(g1OExCK>9Ulz`*Df3pF2mUOkrOPjUbf8VI<bCzXUB^1 zG|<lrj&%Vh$7l}->5$nz(=XRP)4k;Sn}Q>A)D8AQ-#L-9(IW!gx~VEfF>s-XIqtNc z;new?_gKGv`I;@v)-Qu^TFC1A_2OTc573#pr5O1K=d<{f7x*uZv!;{*eD7Nu`u9z{ za}%0$7}*iwS6#l)P!4VFb;>pu7kJVev3VbNk`$u$3>XFb<F#lRLWgg%h5biFl_Sm; zKi8AS%bojW@eSI^z!8G1^512r`bR}2br7O3BAlPw?%XZIg%|8%8?*tQ%x6Fv-}iTx zJ)kFl@&;Qrz}#o)JIY?@Cn^c;GK!~NhG8o4h)f(4LE8)s%8}C84+Hk`RxR5xFw(!! zNXz@g2k|Fz@t^G2KpfyUvZn^E=d~p)G&Rn8;5Xc$ae0?@62yN|OLEdrPz#&TLi=_i zTy?qKc#Oscudlida6|hc#I4a+{er+JI{bmN>CX__{SSAyYhT1VE)MsyYq!gYpIy6| zkT2mQb$`R<!nicrZfn;WgrZ~mz2?zg0XS8y6w0Tu!>LBiNcLlDbPpJZt_CO0^|cX@ z`&E}0RZbTGT&di{ja0cv*EQBBEjVcJuSEaGLgxe0ctlBt%WPWO0K+6Q6w8A4VfAuJ z-k9SJ+M%KB#k6UB|Lg32+E)JAYi#S_63uPqmfllXb(H#4OdtuaSV%o8h5)BsW3Vj^ z=WEiWzj6Es6$Op};?DdVz3)+0mL6l(DI~IT4^%)xLgH!Z))Bb?(o7V*tOS!Ohl>hM zHYx_&{NDjE&cbZ_=wOnHUN9FVQ6vWGJz{n|H1!j*tiaxj*waQ{wFC;HUiNC)JW*e* zWIw_pGeY>KN7$f@D1Ph_HYLNv7hh*vGLrcRuCfyuefimcFjq!D<L+0KBL_@zQP&N< zYrEyL=v;>Dpxbmof3R_xG5qUwtSqw+KlL(uC36^Wxy~+U4i4~o8QrfNjIN=r>g|_V z{~^(X^zv+Jb{Wnh7QF(s5R7YMSJ4G7!`DQ!;Vy^W&FLw{9m4&Z^E90Izs#N-GSPbY zAW3PoKn>AKdz8>rCG?sSny-YmD4|j%^rR9hS3(cVAyF2+oY8{{VW%RpFF!#Y<e5AP z^aJu3J}pJU?agKn4eMl+sM_jm_mCnEvL}Wn`Mv@<Tf!jl%&V+^Xe?h>&wd)3=r#N$ za{1)_&O)<{T@$G<M6~=GY%4T73Y$WVN<SKch8r)ksae^)SkHE3jpPGQGAS!fvy{b{ z(jxo5PU^u8iAaail@~oIvK#297g)I|p4UImc9;frA9#txb6lD%kI>`N2o-)UdjXDP z3vQgz`u03_&s1SuLZegs$`XQ&U=eU>`$)M`UUbcAJP~vgPzY}Syg)$L03bb#nnDLp zNBDX+!pEopN91!dOXgMpCl3AGsG7X187?9(A^p<wUj$pGClyCG_;mO6$h&WlQktH; zxlJY<ox2P_=~=*VQ=ehCNPkq4vTD%wrU1Idygoh_Xw`Xr)#ubpqOKU(xC~2nvWnrs zUPI(S{!X@i_>iOk^dk&X$xQc1?4B;1qwB8t6ZefBaty+s%i*7PFfVi88Mf!C6}n;o zX)jr`4nu`PT1KRoK&sBJ`oW@?jeIG<59?0V4`tmNw2^H}^^}O4zO>4pmJY$H26P$1 zwzG|9D?jZWX2{Ow!@gw&+1(-^g$auj1bt<h28O~IH)$CN5US3Wd8&o&$PTgocmw%- z6oeHPYH<@50YN-Tw>K^{V>&dEzbptzaj)f9VwCTyl-LKfqAqaoxeWf>K*imUpOI-# z<j@T{D0dFGA_H`d;V&<`Vlt+F1LrGUO8kr*<OWxrU~9`*Ig%&`K|vnp-(jOiL{2>c z#r97bw46r<V5`QeBRlg?N})1qnF&`JREnoU!6Y$6fX`GLV5BYeN$IB&RyU%{Ks#mf zU#n*MJ5j0a93rWA;zGAF-N-EdbUqt9GBnT~Ef>Tm<Fa@_7$<SHY|Y4|@L4Dr61Q~O z2a$5pSfw73w#>IKvigxRq2FTRQjeMG_*2w1gMeB39J@PmIG??hnS|~E#a^hamO~ed zC+)f+UaVZ`6CO`Q+d*7z8+B8OgyL9!MAZG^$qowX{Fk20B?JfUz>+RF)>C2(NVhNS z28So>kz)=VNfEXNuk_E>u6KHfk%K&$H77CP%obV9*+S887?(kwY=2H~{*TS<a!xFN zz=P>WrG{SEL`{{@Q7%g!%{M{T2T@yC;ix`*-e&gns1&cOxJu$Oys?>`7&Wxl5!fRs zzrg%~fe0;eN-Eq(+Tt7;RS7t8i4);2F5ZmxWp262Qb(7C#UjPZ7#he_n%S{Kg8a<s zk-j%s!{}+f;^tBn<n(>$$fjqSb{hsHQk-%brpPI_t0}5(u)N&DUW@*Fihzz3$7-mI z_7rV@vafUV3bnx+Zrxm==sK4BujuNGE#oR@$rnVwt2a#W>;_1qaq2=s@NhN2w2iaf z30m71%x@7hxD>Z+6!J#D)+of^rEVX{p4`W^?D4#50Uvdy*a-o2bk`^aE@oHrmU?G` zh|rqFZ2p++Kr3dP-cf<!lUwvXvLEZt4vY!*`E0LzGNkR}&Mu7^rgOrU86^gN2NisG zFY7&aIzRC{_QY7Dz8|f54!=ghu6G&YH?n<Wr;I-FEegQyFj1)b#S(6N$V+1}U^ci6 zMH{I)C#4ndp?_?#X!D@}!F8(ab7<NfNQ4EjjTSOrTe_yq!(~X>$Y$hs5139-ZcS)( zBmB94Y+xJmWBA!lwkJQH*L=$^<VW*M&TJUhW6<smvcBm;E5*`XuzvcTA%7&a8m`Sb zbvgEedz{llnusXu$#rqa*JbFoffbHR;`3YBzsB|E>z-zxj2qk~PKzS3O*cpjKkaa7 z-fU+2@egQx+4Awv>WZOp)Fa)h1<Wv^hp{&(@{#W2G#8Bs*+ZNUOI?6qtY<7>vnHet zK6_ebDoc@00HgEV9ysqxIO>UcOvkx<K<zlreO(R&Q+5MSv*rmq_+D?aWfNmJOv>R8 zJjF&&N@uI9{n%rZ$^-Ay^c@`{6kWYh|7ckk>64QzU~(A$!bz4i*_71%T@twLI=1*r zp-M=08~b9qM8nyPsUjPaZ!c$?C->!_En+7o$Mwp9y1?BKx*{ShYMLsz`5kn6qA{O; z4SU$+Wwdm`cKj4>2=h|*G8Q={fe$a*J#tDPO?dmYCUhr+C>-w);upu*z3GGawWVy} zjAH)&O}2GL4FAMARySk2^){?;9FL?*p8{#%ywUL1BQW<;T(}yw{v}}P+WVMRdfO)8 zxRV24CGM4dzux7_sds5@KXn}~K6%Ty1QTS@jj2zV`+B{G)o3wzAGw3|-MX@iRD+yR zK&e~+Nw#%1qRryZSb*7uf+=B@S7`!WCEXD&-Go`K9_sovu`x5dM4w(vtA@@GMdpg< zMvIklak=(_<SZq?9-En>S;F?uG!8&Lp2$s3GYxLEjKi)fZ@o`Dzsc=u!F4d(QP^f) zZw5!jJxy6}HNAdTaBpgX9DfU(#4@<OVeq$ngA;Ykg}!uTV00O}+S!CzaXQW87*1~A zA(J-a5>`2D11iyFc2q|Cdbx9bT!wv*$yiC+x^FCVu%z{P%;i$pHQWtzvX@{9t5`SH z$I{ijJ{HAMHp^!B=(h!y5Ydt1vnm`4>z$rJ8-aok{<z%q2POTZevq#d;+IQ5F}pBi z9FFgQ(C_gbrvIc;Q;HA-+F81BgqaItQMLJn5nbd~9$Ihin`XQ1R@UhWH?`E~3XS~g z1?=O(ZvF06Qs=ZFt-)pRs*)AgBiIPoj=NfqX)qoRi8EVZn?6|Sa*a|7H_X|Vgq%rG z@V%WQNttS~Y{Fr8gjZ#@9XTOSf0rSml3kn=>{DrLmj$~&#~6Y1sFwE=*<UGDl7ne# z>jpoNG~^WQ&iy{f6r%7z+LE+M=PbcazjoTSM?q8XDMLVo`bUpo7vPLhvv}!&ehFKi zM>Fo+H|fpa;1a+zm!Xr57IQ6z-AL>Xl;JNG9Aft$2#uKoN^26#HJtl|Nht_n5c~bb zw)r4SD>8Mv2(v!rJPw=w4Md$lu=%-^^E82ALvZPt-~GmR78&^Oe`9-#dWNh5Blz=W zewq)?DX{Y{EMT{aqWed6<RYcWd=NWgM`J{w%3%`4BQf2*RI6KA&&Cud=nl#hS#gc6 zXSU+hVe3F?zscYY9O*OY_$N?E|7^KVz7={_^2$K@$8*Jl*fr6mS-R74L`c80!cWSq zXZMP`x>ucfI2Siqg4oqMug+Odv?@0_vYt5UIxeM6!#>8hKAx?8H;RAf5KEdjJT`I% zR)K=L_G7A@@NVFkYBUJyG1a*PY{R@BV<uT~9lYvgw4c_FLFg;kasj7c%cVD166kJJ zY;I8<CVmnaaedBYYk^k0rOjJZH#N_#vWaw#(+4#Ni?@||&+kE_`^K=;3QLl-@jyp$ z(Fa)0{8f=Hfw)@r-;Sj)RW|x?o!N!?mwon~R;uz|KlbT@Y<~G^7P4@VW*^I6*uXD1 z#kTC86DWJ&rN-W}Q<VJGZ!z5mLv|lq6s6(kf5#U6J0hHX)kjaz&_C)0H}ze`O<n6U z^nHj$KbV#9IO1G})-@>0F{uJgcQu<T?#?S~_fXH4<D1qnpFI)o=TQ>txo1^$HN6-h zX7B899^(SSQSu$blOuH>4=&S^FwG=(VNXAP&0N-LZ=(C_<3K&Iq^J8sge=(`t$Be} z@9n~W_bf|T+Ldow#d4M=@#042RU5-LFWtjGoyf+O2D|@zEZFbYc6WaQ$nIxxm0Q<; zPlq(?|IJ=s7O(3$6ZMCsR2CrJ{5LzlEUZ`fDvZ!JBH`VMf5CC;<WuS}+Pq=+xc-fa zcmiz|phnTj-?NzIWAZ9MwHY~SDxf-Xp2BFTYARomo+*CEaVr)H;-y^ix19QWxEwd2 z8YJ8IxEyzW>S)pZR~$5kc-g+ySh6uccCn9_N2gwcC8o{_PbG6@_C%q!82#bW?_i)J zoBm>)43xd|VFY`nF4(;>4o$AB>#5-%*u$>WkBmL}GzPQZ?)Jf4s|@Bl?{~;!{rhb4 zK@0!Hzu1X`vHaYB?f(5>uEuZgL1iet8xL9L93I9mILI7_qugId{_Neu3*G555sN%> zm=CXK-yd1;J}w#sFFjhJ;lKEt{oW84(dU3Ngk;a~e;ju_^Pqg(aSckwWq`Mh#?k+D z+;K!^>hmBScerPu4E9muMfbr`K(;r%z-QQZ4?DJ*=Qmfv?;54cd99;aXJ2JrC;PCW zE24D2z1l%t`zl+wB9?#XG4|AoMZVjyrH4lEF6F$+w3dR<4^ic^a<V?cq)V@KBwG9m zTVr{FHy&V7WpTmp&qalc+AuK6aV<^0*CpLJ$)=V?2R?)-&<E2Ege<6|z2li!HqiT! z4XW#i^Xv)s>!>h~WSpK_YT4zoAt7<HYqSj>;V?9lwk;naGOCtkt&9)(`2;46-IJ*0 zva0)nP9QpMV(V6hg&o<ebkC2E!lbar??DhZ#2U1>K4b5#ObIS{5qm;R<HC<&1u}(a z;>YavN+bX1To$paPuwFgu9W-9mQK0i5&2$vqx2l)innyoE|;PET()#oFa5buAkmIu z8ti<xPuZKRjMmpdlkeh?UtudTq#v?GVGvgnPWN#J70Hr?0bu}!Tdh1fuB5{`sHb=d z`I8<r(OhsCSTiN^JC=uOG3=;0)C+zMiyt)VbhLtQ2x1!2O{rvZ*x*$eI~!FVGRB7@ zF%iCdf%-|d`W{i{-rV#^1F~E(69-O$cwMN!kGU5g29oV(%stmsT!i@pb1zk%dxKEW z;8AQ>d06j9JF>!Y0mysM8nFSB&W;EwYfcA)^y~91;N2kRw>pfURlyQgkK>))*+U53 zoWp8X$MS<e+Wpb$V;XBFR5JM&&tf*CZx6t2pMoXod|5c%Y4`w<8JTbn;VRg1X;csd z;V5v^JmFSZSN5}Gq-MVt7=)rm$=X24X{5}p1qEXQB8gmv+$z%L4IunxNL`Vh=_u7$ zdJ9FrDtDg3v2ij}T4f-u_-i1oiDAaIArTSCg330p1CuS@4?%QycleqnpTbZIVT;!W z2Y<3n8BXtGm`Q)Gl{sJD%QmgG@^i+r&~@ke$}#N5I&si2sD5hgd8ikhI22%1x4%4u z5q4BN1``Z({x0cWF>(O71$*Wz|7NuhPvJj4#5!4%{hg4B$#p!X&TNl7{{b6o?aznK zWL4Hloz4`1oel>r<7hwl{Xyom_DrY&6>}K3MNUaYt7)D6Lms~Kh#Qyb5rM%xt$<}# zBzcFg1JRKgw2r+|(JjzR*>5-^udkKKSJr~uhks}rJ6{nwym%W{fR!|ragnKE2IYn^ z=rVi&)GVIMNjdK_1pE!(NG#w^ZI66t21|b=G3wW?(1SNW%e6;d0j9hJCJ*Ejws>6w zt9;~He)&`u@#wSsuw3@RqvjCHCNk7)zAkALuy*_BYz+&*+f_YxvAed3vGa$*k?KZk zRkRdP)6W0Xvose%AY<wLvus7>06u9dd!uqf@Rw6ivOT*+UhoDBMfASbbBEaUs_>+X zlQ75|r71`%1>%pG+WMS3)f2Ng5Xm6h(Mp1ea)PKDIRPfVKez=kuf=893KE9&>(YnM zFjf`sSD20r<U%eVWQVKf=sho?irI}+-%x4rGpzq(-FtisPX#jXQes>8d01J1P&BDg z^p(O#kUi_N92R}a7kWl|<Kphh$9y!M_YXpz==&4x5w8zo&p#f@Kib4TdVB~!p0T@+ z8~K0>ENXoOf9)HVvA%nk9|y{5^p)3Po3*EF9>~_LPvp~kuvgdb=8uhI1@<IEE>;3r zI0JA<2WtZo`Tc3O%^uDBq_R5uAU>y>-Ll8|_rYq~-XkAYv&1JJ3=P5{r|m9nbw&4s z6~$g&_eH(*Z7O^32`jz3nP9thA1@6BR<|oE=P+0Zz0-99wDXE?^7SIaMBsv0|Mz_} zGSh!z_VTi|WZ{*N&z@>eE)_n-mVDHee|H0W>Z7Q%y|jopdR1e%Ld|{kks`kV81~Xu zI$nbrnm~c4<i+N+P;^eHzoV5ZPO`L*BM0*ON?g7{(Wj-!GPlbjC(ZeL(v1I1+KZAV zB)X~m@GqT~ZeX>dqHqWBwA4yV7j(Q@)Ym82lOOly-yh6A`Z$Xh>e&5{Ct3HIs8l*P zfM?QuAtfk?E2Y;X%c1A9@KWR~T#BVzZtb(wx&sG9KBS*SUzZ_t3=q5uL{4?r=`!cv zRtQ|26?OJ%Xxci#)}#^AbaA_||KC`L9dP?=mTb!>)A%Ec*q@)2^Mi8P(gT@%{*&yT z13CQLquH$k@%)yB%vdMz`2<qg_$_-_vIw7pgH066`CU2el$gt}`H)3C8N%;c$Ob-{ z!gtPLi=K?<Eu+|mCr9u}quAF^X7CU1XCaOVe%Asv(9xgYlEW4|hV*{|%OJc3j?q^i zNxN`C)|g{Us29inMHm7Coh|2i2`ML~gkpBlF@(32u&94+;=4V;YX6nWKRJS3``5^b z8MtC4B(`AXfH1LizSrP(0-6<9{hpr3COy@Y*Y0PJJQY5;C7Wim#emNY6yhakdUc;v zqX{z(E6d+2Golz}V@b+`+xVoE`~b>MVtjQVzqXhKSI^)5SoK6V>)=A_v(W)kUj+$~ z5){NJbybiADOf?0C7q1;N?L`GCEcAZrxv8^3X(5fR*<REw+d1yol}tc(rE=rm5wS% zsr2z|tDL@E`amX$6{PfDQ*byhIBBbbV-#>wwSvP3%Sn|AzLW5B1%Hk3#R`sr!byb+ zj+K^^CMtLx;evuU5^l{<0?iaiR`7F#$0>LV;UNkx5$>npR|)47yp8ZbW>HH-I@ggT z1&14*lfF@KIw+Ew6+DFS!wPQ2X#pqIDuD#T-%{{o!gnfoD&ZRyJd5!43N8@7R>AWL zFIDhT!iyEWobag%UO{-Sg0Ck$YnD~6bu|V0D-oLrPf+mrghwd&YlH_V_%6cT6}*=4 zwwZE1b%bA0@J7O06ug=6&lP+p;f)G@PGd#jBPGy65xW#zBK#!<ze@Nf1#ct#Nd>3d zYf^=R)16DnqTuvB6=}YLhY((%;6}pp6+D4(GjJ>VjBa&EsY*mD;YkWk2QHFP!3DyD z6g;1BF9n}U_}v1z^@W6ARq*+Q|DfQdgr8IJa>7q2cm?n&Qk@c5PZ1v|cs1d#DflMB zw<`Ef!mAbhHNq<ud>7&63SLY2;sW+YP2{+2OdFgikC0<V<J3}@u2A|blmJlrDwG(7 zG8L403MD|Hz*DT7sZj3EU?H1~)+^W?BiAoxP@_j15&xY+Iiye$Ksl#S-cu;JLZUmR zP<AMkR8S5p6o*2|0%gBKc}SrMpzKm83lz$HP(HCLq$vui6r>LnO145N2jvZgk|I;Y z3d~Osw<*LZg-F^3#A=1;tq{>pBH9$<AJfr7I$g%AQ|haWIG|tyH|zat*8XKc#I9u% zHYf74*0PGt-|minZmY)X`wYoueKTc;B<`+Hot!kJ4?=XGTz*Lep7c*$_!$xhgKXC> zCaY48r58qYb17)ol_+4I0_FqA@9m1ZNknnQ-$<id479%D)nZFr$Q1dD11w|PP-`@- zXw}j6PC48jSuiUuJcm@G?kqs-lc*d>Un``~0GxR1JpP~({*c0prAkH(kv-yGw9{FR z`6r0gJE>vfvD_l<potupHqG7A8Ann9@*AZs?t&Xm3EAy!-9pStW|;u(mv;+sB%aDr zw@3JI+>GrAPT0Kg;)?JJdtm!`I-3_irO$PU+Hn(D{r0#npg2F19)!JyOY3pq_5}Ak zFR{DZ=cV@80&ze{GE_Q9-f1~Tg-DS*sN#*-*}4pNvN`SAHLu8E(JSn^9RrQ7jmkk# z3aBnaCRp&sK4cRvhkFfTzwhYArzNq_oiY56+gaw$FrjHX^2E}cA=JCv1V;qkBY<`N zDp{v^E9qtI59o}rMh+vMzTNSJ61hT-yhz(;Y3N3_V`rK*Yr>?-N^g;InU7uh{&HkQ zH@eJmf=QHe#cR1@t27k`Bd&v`MY2tZ#*6$_Uy$$}x)rc*T?QSsP3|ClC8N%97)BvN zuGzSDyLpY=Yp83(0GT6X9KA^;o<g;UAXj`chB)QMC=4z66sGuYa=cf2{Nqae&W?D0 zIbI?%rJn}`XCp|crRqXhF{sfrh23SSYv&tkCd{rqSA+fyK;usnf?F2aqd@SkNdo}h z+_OB8n|{dx7Xb1Ok`wFRrWo<6%iunMjej9WFv*;!E&W}Fcls-3_P{L-`3M(RGelj2 z%Fx+maHtG)+@<h{x?9hZ1XlEC-@g#UyT&rl7yJ5eUGdlSPFTT4yeRM^R<P<9NBiGe z{#Q)Za@P7{Ilp2#D|#t9W_}e~5G`s80qwf!6zVLcgI%43jmy|8FC|!mCtxlv#OwTJ zo>ISj;01#1cFU^as3>~^H$Bl+klT4&<Hk$<cn3X~Zmujyk=qP;mMu-w#y$-Nj1hBO z>WjS_WSl72?zF7@m?&LAkzQCvrTQVKc9#n-!={odo89Y>RE1c1US#x@cSyYu#>J#B zq{ZzKu}VbHGPdUB0et*2_TJ0Ee0?|8^m0%B>6h4_FAwlr2p<YrtD^R`6>Q)uy?Q)d z>T*S=N(%wh2H#wYy##bdy$AIuKKk)J4r|`n#-4m7FXU!t6n$L!{1Vk)UwI6R79z8^ zvEN_moOt&MdC23^UjdY8ojlyN;n*0ZO$!<&`nBYUetP}JL%2QJm-Tryluzo*M!lNM z@88PSzS__I;qT!`egD;N?(T$q^J=8~_um1z_i848yAR8FEj&`uf_``pUj2ekA?1kA zI^fGFeVFC7LA?Dr_V#Phyhk6_{Ms!3*;tnRdNjWxmQ8p)f#0`<t$RH_^jZ>HaIW<V zYK|RGZmUyi!b4lwhp#`zU*F8)-;D8D+KX&;ePtgP8~0`yv%j&0?<B|Yu*7rTJjF*s z!Mv3ic~u_Me)t9hu6k3o1bEWC%!x-`hO$I9=dI4Xu_s&k)_}qu*kP%T<sm@v-f-<L zT0}nWLA7x<QA31ioDIsE;7xM*?#%U`N;P)h#fAV^#qnzAOOSp9a%e3_dMY*_DkZUo z`Mw?2^W6nhNCf0A9R_MMa;=%*N~DXm*iJZ%VdyPv_x5J9v2UBk6e~PZhMY`x;bHw$ z-RDa&yZ(#&+yc`7<UZdRz%IUROg%flqnS>isDa`?=R2X9<UjvHp-ON6#W)x!{O7?e z_nk27k`8(+#trJCUylii6R>U1pg!y1&c_#&RF~olAgT3HU5bi3pAN$g>T$=Ds5i_Z zxXO?SQaVVoKH3kNV?x3?TGw5cKD0abZ3d~+{6y*+>GHhxR;b?p0J-*^rB6E|q*?Q1 z2jtGWFgC2D3;)z+HlyS;zh?p4`tI=9_-st(V(cJsQC+ZS?^K-r6F^X>Sj$|-@9HsT zX8hkzu!g>b9(5Q(EOdf(^gm9p`h)R*ae~z;o~_>X_Y<tGb32ObF_-PzwaT~)uW|l$ zf>n<zAayxKb%I;t2>qT)@8$@W|6ZHVt}+tSap|>lZ2bG#{DLyJ`TarsE<GE!(7=9v z|2@9Yf|IOy?gu`1xpr$mDAw?^*W=vE*yZEpbl}kNpJ!GH(ejzq!-#VkT8zm0AJ42N z%kgzaI<w->e9jW~tnT_at=eMt&VM|yifUtZV9njmF7D~eFY~1XD}LuqHe_!Szx*a! zyf>ELyp27*H<<q_gT21j$RCJg$M>f5%Ln7M>g%wTpF&gJkTo!_oR=zB?w>dzZTXYk zt?j`t{d0HseO?+}C`@U!$6lGiGWN&m0&wR@c2sqeF3eyL?(dp-7xN7(e=0fGuu?r3 zPJ>H6wE7OVsS`I~jw^>&*VnVX`}3^DpkiJ~8Gx$ee1kIUR;Fhzg>}1P$$uPLSwQ-y zSx1hZjnG}ybd;rx@<4p85VzY9L2n6Qsc?3YzJM*PDxS+gZ3#xyUp^7$s419~5z-lo zK$YJ3i+9j;_U4Dt`n7aABz~p5f%w}a?EHsA*z>CmhOgm4o0qPefm<^xW(!5P-~gNV zI2(I1ivOvAEj`(-`znY5+UZZ&3K5<-{|=Vkp4L&3gFs0&G`d*fyAdq89<Lpo^!4BU z7;;-66yYN!y$(FaZk~+i2NkfWQwheSq_{-$JyhM#jgHfUr8`qQvJ05TN>0TkTn2?& zaj+xCnTKljP1m(Sg{<fy6nzR+>HZjd|5S`&r^;O3!7OQ~ver|5`7Ko};&h~``(rRO zZEI8fxH4Q3%uNYU-^5)J!f`jxg`bJN?J}qM$?rr-cW{vb7OsA@==T$NLjzofD_z){ z(+S2vlq`q`>5Cllb(}Ngqr}9^4Zt0$__w;SgQpXtL`q(V*Vd1=?tvA7xi_Z@h2xyU zp?$NG`8MZ;coD(&5GoT5`?S3-le@6e=CHwMZBR5-zmyeI6kfd##yy|mP<nn!_z=QQ zyykvNW*T}FQGz`McOb6W*x}|O{QEZM{;AQJ{4j<L1~PnDb(R%E5nX~`Yr~YbyEogk z(`?NA={)}5kFvKvo!2#M(qG2ZiHTU74mpbjaZ(mu$!n1=rLdGU{Q`HahYq}q$OaCp z7`}ruk*zwD?DsY(j@dELvgs4q?la5ss}1sm>1<n<!0D?}uqcuf?RgkB+gygvU~bsj z*66FkW%><VJ%FHb^#BGIojAPsI12d{mYMu*1!THr0<(NpnB@+6ktJDVrmq9y6-2z! zgp&wZm!~C(#C_gkL^C7M-UPFV+Y3j<FX+2<%IzH9z=A%Xt^0O7RUXpgq$T57#pe-x zMl##}c~rorMRWv#m)<ru2<ZbhEMg}<PcRg&p(3%hP&Qo-!CN}Af_Z<@Z%8QE=_?iL zRR!&>g=n$Ml0$+vOYgo-a@V@gMKO!sqLHwAwY23;LM)}yg>h{47Xvc8t(KXdLP|=7 z%~OIP4ycxcH_D94g<p-rv_@sxt}rcH$edsF<I~?@UC$=-+J$V~*&cjgAGY?aIeg;+ zIw10*7Cu9Xk@Z8)<;eM=1?>FUaeV*PtoN6ZS>9kXr@=F9DWgwlY<S(}`UIK>y4=qL z@1<Q{rL-KWCsF)z#OJ}&3lFp-$V2iS&crWc@^@hQ#VGfO@{yh!+deF)Lyq*K@$16( z5s|nSfjoO}@lfjv_h|iW|G)-)l*}ki9)qnXu|3_3-TN|;mwK_D=Qf8puF=gd<@#y9 z*)b9ye#PKeb&Z`r7n&mYqmc*bRUpR{>>uzMFucDr0Ytj6iW>Emh9Fn<##jnakz@G} zhqA1%;%CHOfKI`2h@Y@YNW|UJ^ACqR&r7%RKuy(_`AVmOHE?t%U%;6qZbhS5h7dkL z1)_Yg+en|IdKDq(q}4NVrXq-6fN8_;tnRCj?8TtpJST+XBW;)9Mi(%X%i^Vyj$EaO zfXX(1AUMh04uhf6ACmz&!9+w(1Ab?bUq|t)*Rag5Pg+Oic8u9CN4s3LG<uyrqIUXH zB-fBbUiM=sZ?7+o3!$wny}xc2zom}|iC;^pFyh4t@}<OskK(wf4cRCQh;4L;vDW2+ z3nCS2>c+t=i%$Alj*X#GT2fp$*n{8nPnz8eb?&&hI8F%_();doadFy*vbIA{9G8B) z%1(aMk3ab>(_Dz_wW1u8{U0ta`r?kzf4R80_BS^ELJ~jbTUK!)&bp5VW2?tvj1A>G z9<F9(F#GJ2XD^4<{)RShCSD7&1S#L1*bl9fWpW6-<Qj>$bg>k2(&j~U%!&gex`7ai zkFj)!@^}ulsF9pX35S$Vt~EY@P1+eE2t8c!*S+PJQs^CAFXWJlO`%jkg91lSe$qbV z4$9dAcJ6$4-r>tM--N|k;sWSoC0Y5zl_gh7ei7eGRi>4off|uq-P!322IH;_SzuZL zXW=3AwNxYCGi#p~(g=aMUre@E$J33Xru`@>%M7h%bQzxUL8*4_uQR9~hXvJpCF(B7 z1iUokN7chkWy6Qi0CLk@@d;>%E54`vx4+V;j!*h!;foYkjPxZ`F)vGsN+LfDwx;+Z zZx(y=qK<b@V(+zd_CzdB6B5~P7Y)X|WT`pvZ+w&kT6}q*CkcF9871f@h-mJoqL;Ml zmH)y@g~=Bkq>wVEyA&FE&+<<kv|*OMvk(X0U}3W^Me(m%*u$4%c$<ZNd@0hmZ6$R; z&@st3k^OinnoqS*?fUBCCeu{yhRSrbq`i&Q#I)c3DovIG6I${^{#c<5Qye&AgHfj( zIQmHSY*$NU@*+4O{##y>z4#xfBqj^H*Am7@uVD9ELV1rBsCsB8Tngr-pWuIJ&u8zP zo|-_7byHt*F>u@~Dy&uEQ5CkR@R|xe`YHSoDoj=3L=~2(aIFeAsPG*X9#-Kw1zI^t zCA6t90N)R%UyKS*(AQt^+poemR9LOTwJP6w6>e1FTPi%P!m}!rRCrg0IB*E*uM{v) zg`-qBONHesd{Tv5RCrK@XH@u|3folZF+j;TNQFif_Eh0Of+(L8RKjEx7OQZH3LjSC zMiuT<;rl8)s=_lW{6U4+ROmiXsYrwh<5bumkjm$>RKj=_E>_`VD%`5V_f>d8h2N;~ znhO2YE(ud%PZg%BkiMiyzv(JmGDwpw6CPFx=Ts;Y@n@piS3^`-n4-Z)?&Bk?W4J8J z>fB@nRyQfIZ9Cx8(Vca5Hx)u@EFh9}@7DhLeNu&acy`8UxSfGYeOe}H<m@UY?jHQ( z5}ma$KnarxBmdMeG?)C_saN8m3gutE8ip|-|E~Hg@o-PbziKrcsD`uDFw9W-*P>JS zu?e7`l`B;Vp$dUBs$pzc<linoB>}cW@-Isblaq#iSACUuT>O%Muc_g#YPiKmi8rd@ zQZ*bchbQ66k(a`NwNw5Ts^QgYI7<z$QNu<xd{+&_J0eTerH0!)HM{Rz@8+Aq&EYI~ z7IQ{!F1H;2EyZKx7H~`OcQInhfE97`<=DZX#d9k-RnEI3U86Nv|CHY(FwWr~K%%)| zvmhfQ5-vxGiXd94oP81S75LAHtXIed%>&*Y881e-JEs&vILAHnq^6)IRio)tqtR%7 zj?DOPHJZ&8Ba7wS5UybFj~b1h?<Bha^8Tgb_2zB2ue;BF;L%4lHClJgiiruS^1qS5 zZHVN!f8*)jrqp^r@Kb*acM5ao!ukc58VZAwDUAkf^c~Xgsq54<8XunX>F1GLv(sH; z<PR^aIq0q#71+*Q+k@lC-xt<a6Q$JzhqTkndve^s#J@_esq@!#sVUNGIvLto=Jw{e zrFh0rK{ZX@n(*XKK^|OCu@@KA-;)dK7f{;HYUsytad>1_aBDnx&SN~EUz6*p39%9v zO{^X%99M^DzFPBBz`y@nSmWc)`Skao)|7VAyK{P!qVMP1QA(cy@ap1eFU4Et_vU=Y zdvx%)59GKP@z_uXL5&|@GuA_s*CCLYOpY5igyWuXt9i#m6BU1W8OOC~!H7>Vxx>Dq zpT?8tJT=eB;wF)6YI^u+qHDg>X@Wh7zuCR!vWF(D8{wr$+exm7r(E%Nkt`p_aZPwW zYOCq#sR^Tw8R4mkl7&Hep2Iw<gemZmDvVAfvVMF=HxHlAanta8*&hEl;j|0X2Ub-O zo@**k58(ND0&h_(P_NGJTxSTeb3eT-Aaue2ZWrdr1w8GEegocZ=3j|4f$m&je;pUN z!8c#-M^vgN!b=m^A;KMn9QXDdj_Y-+X1tdshMKn0OY=ZS{E~$n_Z=RH__CKKDyF@< zJr{G_5<KJ7>iv#A>f;DMa;v5%Mpw<ZKAND}iWu@c8N4`y1y8XjXTX>+^y}2zF4}oZ zIPPUUTh)9t;XYirC4dVr)^p+g{kiZBLCt}60r?ue2d6Lg#yIlg^y7U?NhCF|d28Zp ze)QJ#>8NhS!yNZM9_#IzcQ8e23U!)<j>xw~j=O{hk^l11^sIrh4oT?L$%E_E--qke zPgmz#>QmTWvDob#Hxkc&wPH1Y_-OKj+hZSp6)|{P5WD}?n(4lpuqb8Z_;5bU{~E$4 z--U|C!{4siqEklB9-XF}MpLuRR}(AGa9_>9?lkyleN<YhM=Sj~{rgHQ<w5JubN>B& zY7+f4p*6XF8n2Fu{Bo4ze#b*xzaOnx=%)$mXm3>`$8~PvxY4(3p7qm2^=_~H4bVP1 z#&HX7lN32FGzv1^;M?Ne>{+KRbuV<w=aVrGzVp+>SgB^la11}{!~VY1cP;wn{}ffz zmmGHok5$d@e-i0)=OBMP?II0L_T{?vFXp;xrgH&T{agH+ed@eQJqtbZIgZM0<hjKE z|AK$S2R!g71=rlyX?k`v{uK!?T;thvyJoPzCMuO0Pm1eEEB=8TJiFD(E(Wf1a@=QX zSYyBd?H|Ay-uExn75e4ZyymaT3=j0;0w?-lE&=xQuk$PQE!5ycKHg9=&+0WsSxm{e zb{U$)b76>)I}}DmXrd<<X6dg9>z|?t(<Ezr)uxqtqs#m}I6sU$zwt_=HC_F<u9i+* z*Wv((#h>fCJmhL{i#q&6e7KNeJQKZP#d>ie{eoIBElNWN>9}r|yZKzV;yYZo{<pbq znm@R|k|}bm#{NWBiNi2pd(%i!e#x?}rQym|gP*9Spl#w|S%v<;{lfp-FZ|!!FZ7WI z>Hq$Ip=NoY=2$?nX2jCv56-dV&0Vo#&b+x(Y6f@KeBDRn^5<CQkDvSC(v@>ZELc2u zLh0P1DO|Q@f@L}W&zZu(HYASWOCNk-!IHUqBZD-|J+PuwF>RIE0#mtkZ(x{askbJ3 z@1wCAe~ne}R2a-E%u->73R6|sUxmpkOi-awg&`{RQem5ilJ6B2wy5wM6`oU|mHS*J zG^_B03L8~;ScP>e{78khD*Ql&yHxm^3OA|nNflPAus#2^au_>GiyE<5g@r1dszO19 zsVYoXVUh}sD)dren^tK#J`5`_Y3-uNtN&S~xFi*}5TqULIhD|?!bTO=sc@GHcdD>j zh2<(NRAIgfQ&pI(!UPo>RT!ee06;6%$V(+~Dr|FCYI;?Lk_uZ?cus|lD%_<)Sp0Ge zt5rw^Q2rHaxKxFODiqu`!(@h3m0(n%mkRH?DH;5s!WI?Qs&FU48ly!MvB7PuwsI6g zPe+IU?;tg(gL_Ruf+j+Hb-bLcbHBnMrL?Od3M>s%U^V57X5{B75T0jwr&t`k_h5qN z9XD%Rpb{aeusx#(f)#qg4P}4-e+ps~#L*+y9Hs-;LQMdnyy*W7w5q=!)O^>f`TjG6 zmiy-jW&Bf3sd62^nr*!_i>w@0aTv+mPk65O!Ut#o$9Celf*$zSdH|p9jQ5hjvk`Dz z5XaF!E?5ER#M1))D}Vt87)PKJ9MT10;8}pVp&U0AIKjpUSOmZcp20)>S0f-aJvO!h zu}BQ~3!Wh0R{@7dA%pH3t{Cu7Jo%uv0S@R2T>(55a5Wy{Sqm6xRB$8Uz-Vk?@ehEc z0yg8J!U#SVqcjYzAP!z0q{YM|qy`T$Z34U_M`*aafW6|BOltvO>;?g&;Fkb@!}AhV z2r#TW$L#_h0XU%tu3rJ42zUoiGw{2Bq1acShzAegs6=!@9HavHz7>C{H6H-3NKz_c z0qlwHN&r(5U?m<iaDv;>%ZC%-g#!E~8JlL%uLAn_RSF0IJcws4cn$*&!S=fXJXwHu z@KCqj1-y?9_Abz^Tz`&Rgonhq81N@NR}morj=`?_F7SN72JD)RXh9?3*LX-;-vD-l zF>D5%jN<sg$cJ!1Z6+!Q+zW8_5Hte#e8Am9Iqogs9{~P16y;M!5&~1QFb>dXQvp}v zA#v3Kw%{Qif}Bag368)+l`aIV#B&b()qt<zX#u_q@I5>~P(Fb5cqHJ50iPKL%N_Wp zVOEZ#e_`b=0?mMj;pKDh1}4C;F-l<(fP3(eRB8duV-@id>^~0O1b%|Q;t_yf1*{m4 z!q6Cke@^7M#h|wVo}J8b>w&`ui54S&D_WL3mE&HYru5BQfNs;3mbnA2##4&~>jB@! zLz379I2Zf*bD+-$yn=@+M=+p3NlS3}O#FkKkdPVh2p*DFBjC1K@ZJL7326Nae<<P` zz-ub*Hyb(t57F}hZFnXEuLS%U4;5AixU^8gO95STIIa*psSj}6EIgF<Nx;@(oM-^Q z0@!D+(tuPz`r-|EtlVNCL*{eb2Vlwq`~*)k@H)WZ3l!W8*rek0rKO2*BS_#Ocpndk z(atS`wD8afUkliXhblqv1Gp0sz*7fkeh>o_IKkr&T9v;10fA#nP&-7N01Q~Fw2a^l zJT&Fn0JD~%LMVXXEIc$w3jq(|p~7gh_AMUjrXK)J%Q@~Fq^$rvh36{pX26*iIO>3x zT7gV2LxIS&0PvlaN(*)Ye)j(rb#5V1L}47Cl}amveW|tx>Y>PPAS7ia;bT$M7D5t1 zJw=ew8a~){4GJVzvIsOQ+s;_{QdwErg|f0MWrAJSh+3qJ-MnQt3+ngZGotYK_dnl! zb7tnund9)ygF9Ibi9dy1yI6Yh-Ea&itpfz$ailAb!XA`|?}O2OXeMzur^M!D!m$x1 zCpiJQ9EEk(LCguHovY!(Qailau(HBF5l3OuVV+%b#N0}I5(RMH5h{4Zvta&FJ3<BU zH_}4%7d9NTBPwpMqJQ#tRPp>@prMnfgWr*^jFP<LKW@DkkJ^qzzd<}&;v3hll5zOF znpKr}3_dz(*MD)yDSKcs%kyFvS~{Kezlh??O_a;dO)&2)f5GwM!dhl8J{xX9y5CXw z59vgmbNKU2Fmjv=3^mfz5rlW@7zyGnaP&pH9t7Z8&xc?je2E=M3c&UT$LYhrh1)JW z&V(5Z5qx=tdyjtwS6^lJ<3q6eIx`tB9=yeg;}1dSw*8wTj%c#+RJaLgq>Ets9WIjm za5g6SE|Y@BB=`&!<74m-is1jkarf*MFNM1vFb|0H1;uGa>hvjm{gByCJPv=N#I1zR zV-_MNqqr9-zZkA+=KiaZ+f1^=kcu0h*cqD#dp+L=b6#-2=}4R%qa%C<9NEUz;8WpB zq&{=tN2GP)3%vT)J`va8ig)%l<-FrjnfsoZ&CZ#y1Zn38ocF;_x-9q}b(7Np*L2tu z6@4GMI^Ke2!i>)hD_;Edg%=FGnEKVu8}U5S`cVfr_b}>Vl0ulzNUu{)FP(owcyWCn zlMBBAmZQW7!Kpteyb;fU1xRu6wC8K#@ShxvoK(0QX(8GJZ=e`)zIQp{c7ADReK#17 z%19FR?V*O5EH3lnqN?=D5milBj(Eb0iz;3#M^vF&UQ}&aUL5s1;lqil^eo$;bZpDO zN>Vv^s~6u5Rn}EpRM}5nZ1ueAr%C-bE-pbjIZ@?F<%_ED$%_^J(`=)HhRS%_AU1nm zrMyn-`5^eUK4rcy-xoGage2Z9hH&%w>8|FLhY=iT2Tu8%nv}tI;~9o)=wLgoj_p$h z+bwOh{e$hoPvq10K!5R0+8k$~owTT{zJGx)IXICQ`T!eROw7bhhjH8#H|XZNj-UHR zynJ%Rmo_ZL1dNkizAWNvT^!`KlMNwLXo^g+DKix&YHCcaX)ulEzEMXVrrZ2Vbme#X iA3|=Lo9<@1ixM5Lb2qplx6mze>+?%}`;!i&C;tabtURCq delta 28445 zcmeIbc~lfvw?155(9F<G4b9wOqX?)74k%(kF)0)kXH--aoKaC5W1K)660sy99u39} z8i#12Q4>uZVhlths6>q#=K+T|DQ%4;PB<j}dv;Z!dGCGi`hDM8-&)^4mzCqQ&p!Li z(?0v0>J;yDC|=`G(M8knZgTgcFUC|nzh(FLwk9{H_qc5m{C|EtXj`p}1Gjx7W6rjE zVEx=F+wK5w+1a*zCG#^?`8*YKW$ZsOcLJr!{g=PY9vqkVrUSQq^66m+3X+2xxCqXb zcj34z@NQLN%xlZMH7%Sup5yE}&T%R0rs)_PqT#wwaD|3jp9w71aP^4x;;%u&<!d<3 z^FGG~vuk{;wt?f;@b88PvIdP)hmn^d=m|&JAklyR896Se^^CmH=FuFtvI9a8lS_oV z&RX+fZR3!tFMNT(fBdCzTvh7=C6-(e6o_+!Kx?)Du~1B|KYX44Tdb7UGsfmm%jLKf zJA|Xya(R~SIL1e7mf8C|p>%?%-PnvV`>3J~eK}6LspGhe@-$PNP%h*M<@&1UVCMRB z+#X8Yrmwiladijk>V&dvlao+pHhB&aEO(5ewnc&lX+0IhKN2oxh=()EyFJ(KKanmT zmNs}HNwHQatulv*T3bBH9ZOBpr0deor`xL2V<4Q?={f7H4b*tDL0V%*5;8^JH@iW! zymwrWgqs;<W8)-WSB^^+>q@H-W`u}td9`%nZ<|dJA%imW2`8oX=xXvy?*u9YtJeB8 zpAT+^EH%%hNfpng?zTsD(#4;o7Jf1c;t5G&Hm#w^x&-lZZRs(833ktJEkdQgMQ_@R zN2TxFl@wFU_oFSJl_KAURGsj=M?#f&R&rFcND~jFi)T?oX^)=dk5STdP)d{><<5Hs zCTbv_)2*nMe*X)lFYjG1h^JC_Q^!k1`AYs2G3IX+L7Ik<7c;+sDit&T&cb_C@|W4Z zi)vEMnrd{GDwTqqmFz<QExTf5*QY!?#@0LO@{B;;E^QEk$E9|%GrRPTxeDgYKL=Xv zdW$#Y7Q7csLm}Igk}e)h7l$XNi!(va71G5UsiRUyrjAI>9?>)s1`VcR5SM^9`J;SD zYoTnODMxzG8Fejh<=UO?atu#wyu@*(Rl1TARP%cpOxF1s<=RD0NoJsFf>54e%95_i zy+&RUUh$mN$BTJ7wFr1$j$9a+Kx4&}gNE#gh|)<t%XISPIcGM`sV(2{5_{h%+M^V* z;<?hQLN}X!;wAR8Q)JI0=^C!|u-Pf$q)q?mBFF8f`~}Owr~@>PJ*l{cK|*=HsaRNe zhb9+h#mF#>zJ!B9S&srie3Ec7;jm4A`yzYQxf%cUMK;+v*wOI`iqQEPd)qmrC;_RW zRL>iccdg|9%w|hCNmVLHEAKWzn%@T#=#kB4t4T8zOhDc?{V+MgAdG}sX%!9Pa{W~$ z@`<N5+n$FIw&~j|LbW7Rs0dw9gf^4V16jxuLT!ceK_^HmQ<3^bk$MwSLb>}!F!xfG zYkF+qfHwWz3v9Scm!iHG<Wg5aIzy~M^iV`d6SJxP2b!`3@n~9Eh>M^>j5HEOr{vi5 zWh$9vIf60<HTkF;Gi~~2N}72}34VHFvmqAv2hL;Vc(HKgQ>(M>#cl>cyp6gKhB(HK zO&^Kg!-P-$jup6u6#XKN^`>D{7yy|-k}2=yWYbI3F5)>%q{2F(^rG1{-R`m={(b8> zb!)j^1HshYv_R54)HyZH1~2eSt1`qxv~u{OWJ37@gKn2@9L9!Ms`!i4h^0zi=#a1- zW>66li$o+uk^a>Pc}oqi%Da*LVaP*N62z<H(mJ&;2k3UW68}qu?;=<Z@CayD_?sHA zQVuXXjY}(QMuCEOT*}5^QD*C(k5OW|fP&aa>qF6WWIE2$HKMTPI7?E5BJ~%fPys4a ze+q6&%5sRO^1T!^q2x$~1Y1H7MWBjCQUSe`as*Ihs}3C9u3G6iHZ3gDZd5ScF1vB1 zRS|{%5=65pQz#pOro>Vpi1{W#5Nj}ZjI;oZI>0>N3k=4j!3Kj#Qy8R9ni5k1I5z$I zv+OPRup%2g=+*t`3aVkDb4Gcqn1@&cA~MP+@K$f}3>BcaQUH&1%a44jZdVTuMJT+P zQQkf_NyB|d3tz(Fbn#4@_@hn###t;~oszu~BAzI8g^)8y!E&1y%9omQu%D0XQDz!^ zNV_ds!$oCEXC61Ta-7o2>Joe%(^4q6DEq;4*2g2Ph?*Izd%518S{mK39x`c*TL@Ts zGGm@)hzG<odv5*3acSc5vV2pNP?lyg+Voe>$m2}jo}NBJJ#*zg9FNFo=Dc~CLis#X zCWb?vDbq~DPCO%Z!*+`%$3Q{HVFVRQ<19t5G*deIS0q9swK-C-dMHigtwcZ{(MWWp z2KsfJ<&|(H#!WTG11uj+G3q^<di7-tnyTo_;~8anrWDEHc~jF|qGD&z_9Rigw;~zp z{}j1J9-&3<PzDHDj*x_^bjx8IcKCGhukuWO+@ISp1%<~yXOyQBSN8wKdV97iia>85 zat)<Ui8rR|Rvbi{QY4beKocfrK$D2eD0im<pnb6)qI9HQpmFm}MrjRgC73HIn6y|# z>af{FNj})l$n)ZF+&EnrZ7+~ba(~yL;R?Nk>S|BVagVEQwnF~?0ipV^=dDyf=I+(r z{r51ehnydTrMY3OuUBM_eK2Kmj=D3G2C2LBxh!LVjP-kTPwT=6sq#fM)JR!kGEl%^ zBh<x;geq$oRGl8;K|#?0enwo>7YnI$NXt%pwe^TW{BJ4OGJ~`$l=*m@_$Q&PlXq-9 z=4QtMq)cUq)q?20CRW4IekGlf*BF0kbEsTAoBmU9uvQNUWidXXVW*Iwx)ug?N;9RH z_0j~A#ISOeMuxHpJ}vkWt=PLhq2c|)HJsT`EUrcNrCJ}TxrI(rN(_X{>@a!xV0OaC z*ZEE`_|8iR}q$$d)5i)MNn%<_Dvd*5$`F|bQVW5}r6bxyj`iv8w0Ds&0vS>fxw zk?kCz^l)LIGzHD6Ht^FRmZxV$U$(>yMVL{tCSajODLH9jTTnKlYLG7Ky%a*&jHv48 zPY|bp`XaWIHU`?U*@K$$31TB3ck5<pP7q~6{UA+yK?>p_tG9FtQqr&&%!Dd7%TKTS zQK#V!q{!_oY7;ZrD!=d|eWr$!xQ^5g+FP$s8LJ9S<(W>%t1P6kF9L6LkOS+~K*!yI z$k940;V=fl5Tq!S`Dz5q4{?H}+DWh-H9Mm@S3}tPE3`w%^o2~1a=|WLtj#Fb_ej=2 z`K%JE8?}~eVd<>1f0F+zopH`cULS=;&a9PwroxEs&0k?l{S6*lJdi`u`59e>a&4S9 z`^rC>Z%ATS`~w2Ku;bM!wGqS%mM6aReJs_U(*7joX$UY}J*I5A<@yjgg(EZCBZW`A zSgIj5dK5B5apMGA<Vwg<TzB2x83r8^xYdmU%X?Db++(cL(4Ft<#V#3w3_oE&Hx1vR zpl?k>fu(g_nJ%DBpm$fy-rRb$-Ye1^5{yPGVVqAju&jV&zKwxZ2K3<VyRs_*o%k>O zSwLV+h+DGUm)_`dK|Djf`*s)7->h!ZSbsJ;(4=|B-V79-YEr1U2NT&J2Ygwppf>Ik zFp1RR^-#|y26g96UD?*4Mf}d5EIxR;Z$M9MIpU!VtX~tbX<))w1-2_Vgddo~&IG^a zxC7ZpYkiq1q|mz~(aoWNs|oJ#VVgr%yU+1?!Ey9qLqgkh>mZ<|8g1riOXJ_>NFf(G z*Ab!mi58md@%9?day?9#d@?KD&bJn7*Azbt051@EAOsN$qxSRmtTwcD@-ZkiYF5KR zT8`OD9~DN+$^bGD&-E7TXxu0|*l{TVRlzh6&!Tm7=`0~EmCp-cv%<o7OAV_Gi*x@A z+qs(F5-)Z->`?pYmz-VLRV}r)Tf?ZQTwaA|t`l|Be0xi-6P6e(#86T-((wW8xG{n+ zN?`Ymz4;PP)-Ajr?>vC52#@D~>CX;_NAvUJ*~9R`?r&fdQnPc=v8;#$<{udn8~lQc z+^mx<_4g9)mZcg}EsYv1C!b@n7XPwCEsjd9Td|zT1dp$~(-vAI_f1(p_DN($axOM2 zHRT2zw<w`j(C=I<&opMO<r!aiS@ML|TY*y+4VL55?@(>AWP_@Bz@nSCDEb`dOEh*! z0#-37l)4fN#vpXW!UUmHhzFSFLTU0Ku*$S?=oYEPKpKB<I7@>E5K%Y;aUYWwgKT>V zrH{<7k`5VyKzSW^#rkDVkle|iR5L3;5M9Svy1bEGs8a^8oiwUHJJq~jM7uWF5E3q8 zFDredTTz9SeIx^_)HHe4yRlbWw3*Zw2ltHf9L-*AkysPzq|U!%&n<JHCCFF$4hx&Q zx_t#wG*|<Bi$A7|r%BbX!paFLKj<oEyNt3HdmNF3v<$1Lc)+TsgN?CA!eNXfI=e{Y z*wq%{EI%qPJ{z+Vy&yJ9Td5t;9}cOAQ!8CmnnG(iZnOFbVy`HwtBYj3%Z^25>&N_o zb|>lfICUsn+HZfbS6Vjn`}MfZX0}hbXiXGywfYmJPr08w&Stl4TjYaMql^)j<a{3u z$75w{*$nGFb_P!R-q^JAYq7^j6R=V%qN%Fr-w_bN;N_&{FGMdOs<dG^+EyBcl+bf7 zdk~S6LLg$(+rCGQ%CyC8(6iz&u82H0z2SLls47evsPqT+OvyZ0WkI*#q$^;_s=t(t z(qMOSM2j2-GegwQfhv(vrkx8AM13|ubnlOUrJrw6Q;_<H0Y(vWDB+p-$LcROTINNW zQn1B>KM1TkX)^w;pmi=AO0?x*+^7SJBJ<jCc04+``DGN;rXTe^b&9qYjX;`6mb`%s z#F>{)#L}MK%qyl%(PD_oYG(!=t};Z((Q1){jvl|8)M`O2F)c>DQ2&9HQn~82RKaSa zy++YRZD%<T?Pf3Xm6$3N`7H>n(Jlhh*`BMa(ar;fIYkkMYLsk8;C!eZq<RW$`bqn- z&TH>LP-(*3!$?)P>3f2Y!5~&q-?WFvl6(k4T%n61_XFf?`n>NrZZGA9QEk&Z>}SET zdpi68^@lTuBK96ok{Ocd6c70rD~LbY^lQNud)cS%rn04rzogq8mliPBRxKS&NKY!- z(YckIhMyF=Lumbh#&g3yIuhE+)i@;$&tdo5v>lu;Gqp1FsPyzVn+*ey4s9;Fz4yGW zeXwy4l|^XK(9l2oQp6J?Ue*|h^pQnq@{!2S-`JeES3F0JqVnoW&tuuMX;a<U;kbce z8zX5=u)*43OiKPZQN!uV?@+Yd$7s%}QLIbbsO;G$Ibz9nIX!)QC<KD&`zIE-85nFC zVn{P^(nb6Gk5UX4oYlk=2s(=(s}@?Ib)K!})*HxjPZr`!Z-r8!YGKbp2De+Vs<wV9 zOE5lA%}}TpH7zJ{w<i*RQ%-sp{OK0cGtO#){AWsNBPj_s{rz1u6+)J`V1DuOA@eZz z1k%&xGoktAa4vIi@5#o-H*2@}DI~~zQd$oyW2)FF{wbcfJh5R3h{QT$$ukvTXk?ho z*noB~r^0$fvwiVF0dL3|ZE!-j%EuHeikJe@h3%{%K9OIwokb_K?|l9n8en3hO<x0w z(h+R>ogh&&&r(~Vo8_4eGx(^KA}`$KR0biTPP$gl-cM-nzxHd4IDcitkr@bT&MSz_ zrdRkG;so&)+U7lIOsV4ebXw{D#H{>=`Lvs0_eCSe7PNb9{0G>lXo+@)KJ`7!KvsLf zQsqfW9gvSEbN>;(t|{DC4p(*0^U{!i$P~Shfx6Tc0nif1KjBzx`{rzL`v`v6RyL=7 zuXLPx&=z~ix}0zrwxl@PHEjCrJ5ic4ZM7S^6O*K+P5&M^T^wcxq9U8Yl`Xodd(a5- zk)mKH^XL}O7yr$=b!!oj1OX_!*uQN0w{|G?YGp~j=*<>&8`aE;G20{IqEJ5HGqt>z z1CHef(#l4-Wt5GuA9sJVP@cT%F|+SJI%L~JYGLZ{0%~jV4>8?dwFvp{V2iqEYM!!F z-MjE@M=-CnR=mq_fDapxX5@bv#tPHc@?$TufFAAn1y5OKkG7q9p+U^uXcG=<u(@KB zE7v+v4p{Eugrr)GqTZM2aH@@fpiuS}1~me;zE9ZR9<PR)5oU>$$B^aO--VGi+V?P? zXrZv_7jI{wuZD)^zhDf;RI{G8JbPZ~D_|it)@U=G*sxbS@eBWCD_)J}LmFB2t7&aE zZc`@auapy=eZ=E7{T!9~qs;6ROoJKAA}77i+P$`i|K5S!do9GH%19kLa0>nj+4qPA z3J3VrXW3O@F<+R)CZ?N;bbr92Y;(qOReYOjlAcsYH9SYEz{kXBu-QF&vlaxeE5*d( z9%)@4ahyh1dKaGkP%AhnDGv>_y>z!P)82qNL+tKUroAE)Ze`l@M4W{NI`CVYtqi5N zJk`$E3%N&hyV80&d5%jhD-kB7GvA&@O-I(gXLw7jPPQ8B6|`yo1Ipit3g?~!S}EL| zvsAc{z7JSYPd{JB2I5~b(~(K)gCHs35&NuXd;Nw$v__%t9$E$^XDDkm+IIrk{hmYl zk_W6$uMhcO@3T9-#zDy+kP$7+{Dt~djG`92kxe}+Vw=UFK5X{0bi#37>h}+cL5hSY zNq71Oztu17$Bc*~XN4so`1@kCkPvfRdU#sS0Ovi~`>W!`!IG0MD0CJDm1}qZ3BWiD zwCu?Nhq69D1T0Y`0qF}8E}xsV-+>Y?*Ul&LPOmkXy#-Mxt4$V1lvgX2o@P^e8~Dkm z*;~Cs`Cg~l#@?y?&Ijyv?>PReKUi>|w*0n-tZ$!qqjjTlSi>9_-DhE}(_-mv)5mNA z>7MtHebOg_KVQv$?9+zdWMz-~bmI>`V4eGR_RRbg-S55xT|*0E{6<#NH@xF0c(FOy z^d3kTy@GYY5BDPt=mMMGn|KyjQp@bDj*{^tj5!x+IRE)6yVQ4JQPt1XO1%WCh*tVa z@ntE#^@?wT;(Jf=6)3*Nif^{!n<M)~x#{JECMt|IN|QNOp$zg&?g#$&@)$lYh2S{E zwq*u-ow`NER%g3|WcZm~$c%M;gmyLuqQT4mU|#*2@%v7&4*g=B=58SU<D);>#D2!G zxzra_^Js8U?NC_aF)H0?2<lsJV4M2&;NLpI?)7_(_qfCo`*+e@WHb7A3K{)5Ee}cS z5gW0{;QR_WvQ84Zp4Ik`<VUS#_xg8i(b-DPb5weB1O*?JW~t}`wG@7=^2Xt}E)Qe9 z2NW0WCv`_WZ#F=Y*#TNw@QPe0C-Ep$7-F;^iCE?O)|79#yKNmt*8-Fjo69ixPbI_u zlrks}IfXDRxTgVD*g@>5n7r)-^c5!|>FSKTf+cwfWoHgU#6Fa^m#>nWj!prGQ(>jE z={tC_oPnur=;B4AE1|`zMmv=}=$5p_k+~pCFX~E9Q!k0`J2AUpar%Uv80hEpw(ME_ z3HyCu*I3~b5;!NNksZCv(FTj1dly-twGslqN%n_+!m<Xn9sccFD!%)N0BILG1C`#b zus$Q!dax>bl-@M!_K><IdE)2*Z(;Kq?Fg*#R6Hf%v3w19Tsk0|Eiq~*t!6g|74c=; z*x126_(|8<mch+KZlhUo^3;_SB4&9XJLx<aXjFGga_Dt-Z?K_=4Ew43eC=p$*4APZ z5X7HS#nVzXw%>%qq`dheDh$=FNl?Bq99P!9CE5qtrcFQiV{l>7{SApmBL&(<MEUxy z7zv<j^e<0eWXUv2T_o5m>3(iUYh<RkRSA~HF0)=E&RTGghU<3r!H|%wcIfZ_l)z1> zpLGzc!mcA90)LVgVhuzAQeiYzInxvlCW$@+a;dvLjilxMB=s4_JhS{elv5)AwP>^d z$y7=^mRRZ))98=bh^!P|H-N3q3h?fNnhWAmQwk4+Qnj;&9m$FfI)seT;%5InXi{{s zQjQQy^7E_AYiLA3_;aqw@+OGqF*-cKz%2cUWen}k&;N+MJG6!8PG=NW%b|<KpUT|d zc4oCh+XNL7)AB<WEK-k^Kr9?HeiYr)oSFBqB))|+>pRTP_a3%%p?on##(=c@rA&X> ziM=r_&AXBUEHzF^r{*oTI*1`}IkDrzVmw_wl$*JSkb4i~GS7**3~$ZHe!w~pZ^rAK z*r?%&0g<bysuDWNX6{Y%4VND{qYVClZ6Dr-FI~m14Nq`NailqNeH9DNex;QbV>RI} z^q*8q9af2BQg$t^7V=$H6i(_UI=g&`WX0qQjcjptVPGzX*Q^Lye)V9dm9Li2erZV| zy&9SSh@q{PW>FF3_-*K@AKgiN`LP`1m`(rA`xN72HO7yR*+(OCTAljuF*0Ao;QuW~ z;$s#)vUkoPe+@T(tdM&j+kH#aMzC~g;LMpg8#Fvh#WBNE>U#zn6&k_8R)gb<$@0v1 zURwx1cYrXYlrzHMl~h#s-xTH_Qp5rISaH^YogF#UbFm+V4)7$CPpvd(6iXR3&Dj}D z_^yv)yGQl#KCY)TQ89*3MlLR)wX3yEm*eMh{E%#V&=zS~o1AX$Nm>opQ}n%#rMK}A zTaYu1Uw?yL$T8|>(4JTBRx6b0Z2A!^nDgi%y@GEb1Dvm7gwng_Aeg>1W<6$uP5;3P zD$Y;Rv?}zEr5Wmcz*DdtD?E*r_TF_0>(xjbnX4s9Q|MsRPhP<`k8a`lIR)7rMx|?U zYKUFVu8)r3SKna{V<P!}*IBDE;rxs%EOSiDj#eg5O@G=VCVvn0r^6MfKeDYl;gQun zy)64L&gvj#A_#UBcQX>U=|?iQeM~IB<{G;)rZwNVjCtgC_Ak*QPn>G>($ve81lo*~ zo?T+2a>r;UvmbKbbDsc(H>3Pj4aV{4Y&JHpr7@mV;KT@B>02ahg0IYAT_6R4fRG`Z zZOu#UeBq)jRG1*00F_o|w6=srCF+nPCi35fA<|xOYw&6Y8;+~=h3t{@?tGCM$FAnT z+smrQHd_{D^xSvBW^1x+37}bARIIL;m~!MFa4}NDZj9~8ixsTRxFj}f+-&bjSSH0@ z1|hfMQT3uie`(D5W#a?+SI@KH@u{%~zN22Lg06qg>>+)v_;TPYbj6%18!I=htZlX& z%gV;L<v$+G_Ki2SihYJf=vo+=)`iK3vjjW0AFPh3@WG2vnp2&OX8(kf;xX7ed8s;= zxlf4Vn+@L4aY7pn3(RdDa1PoLHMBwc0pne5Xd3$Lmxx*RpFIcBw^@MwQX=x3z32WW zR7|zmkB?_MQ_E(V;K<Fsl$yO8d}W4P#YA@?b+p{wGM)`LbxM8!1rl<F?WEhFsO6*4 z@-1;u*hXpkQ((Vn`a`luZu$Wr+0P~;b8hR-CQWp4FMb*3ewr<u*pXkF!}d<h;}<<* z#z_&p$61y(>0{UUQ&943Wr>pWDRy^KfN2plOr2#V^jF-lWQdKpd>dbBv!z$tw3c7P zdWlP)7)P?`$wAJ?p&cUHrjxAC<VpQ^-=!%nU&#bXZ4jiIC(~q-Htt*X7_DM&A>pKK zRlbUC0BKjeNAb?}5=)RSS<|xPQWOt!$xqOXWL@)(_;*ykUj~9x?JfwmA9|GMG^Q<1 zi|dqH{FkP%byM6W8F8IcJJ*^+a5xND?9#+mB0ND<iql!GL3O?_FTTaj=9}D0SD>No zu0y^NRdbrM4EcR9CDgR>O?fc6z(Q3jQ{y_#zhoYbefEpxHk&Gbd5S;8A5Li*|M@Un z)XdLvLHH1an{`%4kQpG;7B>@Y2QreTNV-LCP~og-Mmev{sX2yJSe|d{zH#lQ+h3w7 zfDgO2kJS1&tDV{mDc_kI?2n2g{^GQ@oh(o73cZ%tp$=5kuxUoVY$!{e);#_V3)XwF z9&z<HJ(;$#cJ4Wbp>))C>j<=!qyyrJdZ@>LEVJ2qDw)4O?IWb$u^?=MONm@P51W4F zBBFGq@1eg?SzSpv2-Ly}(UdZ4xJPMmPJ%gUuM2|4bsBWeoI;NUmh?a$QmHf?3hKP1 z$N7JFJE`^EUl1E{1*auBE|OjHF0Q~S2|ue_6K~eL%x!uQN)|OeKKIBMsKT?qQ*~*V zV7ne_)0ZzKT>{Suq@p?+W3q{MxlCeoako;TELs|;6xxedtzV-J-bc`_STM2B1WJ?P z!;npT8TIF*?7Qiy$$sD@T!e|`?$M@HZ|z5V2^UKb>lS|mDk*u~?=&&q1)U-OmLWb) zul^g?JIUjJXGt@n4cibA@fXYSX+m0u!2b4R5}Q9GyxrgzsY;V&Idls`$4^E-9YDD# z9Fg0+6x(-%otzQn{uB>s)Yay#Bg}DTqQ}?ZG%cv9M_BKf{vlgpklWD=@dq5^D*mK} z>XG?JA?evnH>vOlTR1bUNPUzy&*YgpkS>y;W=V+{h-je8gXJDCsYH>bJ6m5Yu7&n! zF(2uI!jTu)*LPzeaW-xLlN|TBy8+fAt}r*FJo02c){5N5bW4}#Tw%-}GD1ls|Ai%^ z%>Fr4qe27g-eGFUI;f2ca}73JV_0%c*i~s~&Gp1TI2AFMpAvY>J@(2^$N5!N%=4Iu zbuouE`}#BNj?ZgswjMaDmGxLd*EO+{6%4geX4kL<=9Ya@m*N#m=?0XJ_SDr_g`wm= zKLRB;sm2^dS8~lVa;vb~7$AtN)>KP9c7i9`-jl1cdfSy8rcrBkK?_3Up~xPZThg3+ z)Ge{t94obZnO!xs3bP;URXCcj;@Q^10KT4QhYN4JY^YPRE{|j1&FaCAtYhv!2eQ!D zLo~^(%j-4#prg#=<Y=d^>6SxI(u4jxrp_Lx;q9-mtEYm49I(gFKv^j)Rk#1SkoyRA z-EGrrXEXOXDNYFpv+2KkosB$gaB`JBAHB{NoDR0%6^k?R$ERPn=Tbp<&JD0{gXcV~ z)!ab)A5*~THMb4_cOIKJH^%-;ymDci=SFKR?CLM3ByR-M`wePWvB!2&V0BPnr8%F- zZJkjzXuCMilt%Xl6X^ON^;>!fi`MzNgtea+%kNsv=ADUPrSrZC_^vOe$s;5XLl+GN zWKm2>T~a4~$EKZ)wqN$pX4|o7zJtd7G`4EB*G7NGRxF4dxD`79DbR(UQl%V7lbLSz z`s4i!4yJUG2c7!fOq8{e<eY5!88a}}`GhLu=Zx~MMm%dBovmSWMwee@9z`&jfNL>w zQscfClldJaU3z%-%%tSkuX5bX$#|HRA>K=`{tHvM{k*H}?V><UJN8*oc!y;#B1s!T zA{}M2u0?4vS%ai^6|OY#rQoA)mlYe>hfCYK`^ZdrLtgVOyRftwU-=QUEuHN8sciQz zY#~kgmKBy~2Lv6)>>+~)9z@uE`=V(o_OjdMYx&|@_V(K*zrbl|q1;APb~bJ@<*Pu+ z?+mMbJKX!mRGPQu&Sc_F!NmW5BzyLD2dAs6FspyGo?#)rK@PB3huo}Y!`|s?NFyZ! zChIcooEoHwnEMd;b~W4nPNcz+0?QnUJ6nEa^9zjy{S|iioxs5M)ymM;Amxm*$ma0k zPO(Ni@iGgqNboD(h0`ELaMdBq`fcyaGCL2kNfk!^!c?}jqK)YjC>Y9P4YOB<_@n#) zrdEmxpk!f2#POzRD!W?IN>`7iNIp;vg@!-#0t<TASo9@$@|6%-6xNeK(gAZQ*3Okg z)9_fGPYqFsU3xw!+;s3{vXbfs(K>w<>C;3{wcH59_^`V`k=%~-o_sN@+o7}oCs;Jh zZdfuFpcXXpFtX_C0?#!L8b{#`DBJt4p-*cH#Bg!|Dm%`th01Umn-o%d9>ZybAU+es z`+~e9L@oi#@*6JvZCSXFd7#^$k?3T(rZIO!M%@N8<FdfkpMKMn6z){P-jCXdH5go9 zAb|2JeIX!uf5Rp$Yt7H8WGk2T<CB^(>#{(;?_}o4n(@O9?ucbaG(}&WLxEsbe1wsY zer>Z4a}xW&#lj%e>efyKc1eaw2seyJrO!jr2<6#!nk9B6RfWHmzoyws>Y0#>>4Xbg zWt99#y^isK<_Nj=Efkw1`Pg?<%17+tq~!7fjk&du`@3@O7KGzc9>rA#(ae{F$R&hT ztS|&GMG};E4zwY`5;+fEbae|@m=2xAFfy=9EBySff2j;5Z{#g`DU#Z1=JZ|>KXEuK zd+#E@bQl{~DR%to7p1n(_9C_N<T{LK#SD{x<JZ|k+WoZ%x9JCtr%XdOKw2nEcKDt} ztsKH%KFSuZjPvM)-96QIG@%ObSd;d%?^m|tr;lR~R}S)u9tZJU33cx$pjviE+3@$H zqwbBBN0}=f+>0t`6L}yH-S5GtS49ZGK>l(p`{Mmr=ZmFa!X8)30#`Nnc9I{$<M6mc zVQ(RJ8{YaU>$NJR`{_@ypv|ILjJvcjIW>$wo8Ap%ns_cf;i64HA@3!92Q*11HU*x_ zW1CmSgpNcy>5orkl!au2Qtsd6L0o5voPUBnSoI!%eL7qE!FznqEN1#J&9HI{=|UD) zn=}<vlS=V)Gb{e^Hos&K%l~L#vw~hQpW4yV8!olm+$8S;s)jS5jV0UX?8Zm!`I6Br z@Z$k~ZAK&KvL5yFqTX4^rN@`9Kd~bp2gSaM<qvDh+|TK_i+{w#TOZg{IWdDL<;ZQ@ zONnr>jv|EGMp6V!`glm=*?GN9Z%<3;#lp{|tDiBu)sb!!UPA(MCd2AjyVaw0q7_B# zQA_0wkVbyS)~{~S@+YiqSRVYxS>~S(Ec6s|2i1zMQnUV~M%v5=MOU(bo|IZ!cRX0_ zqVf4A9%-WQ>zV>K#51o?0{E$?S?niW`B@uT{wGF$`7QSLC&B!zTWrfGE&Q+Hy#?av zO77#7*c7j>ExY|m41d7H{MYW_<A<}lwXyp3O*>wDo4y{$1zKf4W$x?3dG9n9x2_{U zWj&j?&g3y*!^<AIx}JToZc0E2205Ky>Fg@n|4^Lg<n*_wlWwK4@byK^@OcD3<3rZv z^Vxhw7W?M&jNloE(QGkL`RCz5C#*wTY8>mJq(G%hdy9o`PT=nkVFNZt@@YfaoXxND z&$HMMo4fF{YL{&Z=F{F}p<CMVld{;rEnV9UgVhKJyYgNQ3p-uDEH5)R7SY7(`;u`I z4Dk@U$w^4~Ny=Qv_HT`4e{S*Soh;1Zi<P{``)th@iG2PbcHxWHf(O8CBbSX<TCBid z*4U|Ki!5QuTci2EYS_fBL7l4x(!en5@f|}>8lJ`BV+fjMURc_(mHo3o7mATTR=3`; zJN_hP6(Rqwyyrsp_tpvQvZvFIaoYyk6&;#KBdM3C^qoQ&q@4<3lr}3wl(beM;-vRw z!c}@#VWdcl6+)1T6e3far4U)t6otr<@)Tl%G+ZGPrTz+0Af?YMl5?LerN}I?nC4GA zg<ec_ltNb!9jwqWo59|#&})dcSLjVdH_oL5MOc@ixGVH-qU#m9is;h{P3xglRI7MS zkY}GlpCfv^Le~?$UZEwTS1NP^(MuJ&k?3NDCd<2IR%qB!@lZ#hVcO@UY=x#vIw@13 zi(qc%q%_47MRcM<#}OT?(1}DF6*`4zUxgNkc2ej}qMy%^YhOTggF??H`ldn`6Mash z7ZY7KM{a+yf;?49z)GTbD)a=RH!1WcqCZsV?L=27^lqX{6uOG&*$Q1tbiP8LAUa2( z*AP8Wq0ecG;1Lv0Jq2`8Xo=`Jg>E3)q|l8-8x)$9Z^=!eorvZX+LP!9c-Db&U?5sj zXd}^=6grCN6QGMAMDGHmgGxXm(YqCzEcVhCg%*fjqtKZ|FIVU+q8BT44$<=zdIHe} z3SB^So<h$iI!mF8K@X8K6whJ`NKxntqT4C-N}{6_dJWOR3cZPFPleu2w7o*_Cc1Gp zTk=gvzh6Ga^_EsTIa{7&xb|{Cs&J|ljwd+P3TLar$pYuG!ue3);E?NnSK%y~%^JTk z7R^(9qm^(Y!e=O)z6vJ_oCyjiS>eDg={{27#3-CZa0V)z0ELqRPEUp7sBi>uQWVa^ z*X0^a0H;rp!n&cb3cyNJI6o_#+2C|mIN!@0c~JqgmBRd7VbbaXrcq(8P?)sLfa#+! zi-}1GaeTL>z8Z#woB8e5dBng#*Q6cnTEP-_$MAoyU^%<5vFV>}bZUBHzawtX7n-7r zI6eyEVp?IyLT+#S*@~CeUE!k#F!Gx=SUPd5iwoBT0qinu$>&X>^a+sreDapLFHoRJ zfnop(M#qdP#IZ&Gjjdn#WC&jinN5ZvsRL67rLydAUnzPBo059YTm;gZ0t>~%&k##= z{~d5^DGCSHp9;$gu+D@3g5p0#{(NcR8mfqF&2%CSdIlz=AXcoQiit-ua<v`Dz(6+C z-t2=54o~^XcD=n|_k)nc0{1mHWGjgc0(_5{9DVg>)BpY|%i0(0k@qRoT<pkBc#~(q z3z&WV-A7|X`?(CC3y9i&quI56CVz0Or=;YMWh;sa(vFZ`@FffWZhT_uCNu{cNk*l& zWDUN&C+t_y&7f4_v}Du2+XJO7)2@R04-j{<Z@=qcJdQ4+TZIAO+Vnjjk|EZ^pCtR+ z_h4T8oAZN`Sf~9F{MVb=i2Z@W`;;d3;Vwe8%}yu}#=|FQZQIi1WOw6XP`Zd4r`56_ z;q)EWG9{47fmi5ACh0z5-|z1fBAcR`G~)5^p@CfFMs>C6Tc)v)jX^~z1MtzH+@FJC ztHNEAcI}(Uik=XqnS@kP$`Jp|5N}BhACS!sk9TLv3U8yXl#T@$UrEptZTibp1i1@! zC5*bST<?tlx$NN-MjG233ZU9lc9$jUN7B<!@fg}G6sg+ujl?H`*A;<HBgs$U4s!Up zrtl$3_=FeXzi*)UyPLv0f`FYH5&wk%YR(9hC|nW8=d(?6x-_E7w7Vf?^+e^j5rJEM z?WZ8{&~76|hgoY(M$%O?4xT96&2CgE_i_pm8*KX1-B{<UboX#s^0>L3O~0TUtElpA z;iPhWa0^CpPzt&#!BYWxD(!xLJr%ioH+Hltf^U(}?p3w*NQA|#Y5E@&S?g+nzb>-b z)xA7ch%ZCJMfP*`Y+fg_UO$9KWa5I$8ZK%DV3~V2^7%+-m%Nylc}v*aKSYiDd@82r zB%$<Vp`&zYGU#l<^0fZ-?kLG*Xg;*tqKFp-UD`SMyN?Wd8qq0Lt|W?HIIWZR*~eJ+ zF+`3^hu4yx4<nT2S^cc<h_eeEsp}FJd>|+$9GS~=BKQ?q>ponDlY*V~7s;t9z)lI+ zUdF~8XdhUOaP>y6%9@<6+lFgad?qSwXwSYp5Y3l;$u1vg?>1%|QiYB!YAY79(1Wd7 zwnDV<M9D#p?T6w^Y_V)uxK-^yorh1vZomk?Y%?<->|He32W2@bwbPT9t1CHz?F@mf zHgmYdw69Nnl;-GEtSi|8wzRxb9_x5rmW}nbN*?E0J4kjK>f2Lvt4|l*bh_mSaDqfN zD6#p71<l$yrR0&Wgmz_{{&`o93!i6#P2w^pQg#oP@?!+Q_H&l^W4!&R2AB(1{n$J* z76cAPx{|MGX4no`gOzu*^x+j*nj}|}{MV1kVMmhH8wXuUA9)txy|IATn?|a{kDsyB zLjinzGRr>Hk)Qq<TX86yKhuTnJ~WblGoBf0!ui~ImQ)kPf3k^<t%(dc*%@td?$#ZY z6!w>lTUHQACwCKDSMw%+aU*j-9KpYm#QN6m<DcNj1mB3xtnJ}r{M}AWb0mg8w}G`j z65!vXlYC@wwZqfXw4}d_c3ZI;afxi?k@kT<;*g@=7+e5}`?uXsXeauzBm3e=aK8hI zO4IFlj<du(lYn>Y>(M>CA$n^z1nCVRKGk#gk*Aq`wcJ4|X4ll>`COo`BpxDCFFA`~ z>iL{SEbwR$e|0qLcr-PhLx9Bym2_cK8E(BaNe;oh`!6QR?Db@l<bA{0p`*qQGhW1! z-U5k)6r1Fta5NX$B&RD}DgCA3nPRd@wyO)|T^6#;I)8oXI^;4RhED<4-m0Z)9h+Ad zSk%pk_km=pdX#3sHFylnXWxdQ{b_Ak(vez)%xi1fQy0-%j)A3@zQB<ScNT8~ltxvG z^fA&xk$j{nF9M{eP`$B>l2HOf1RscIj~aQPuJq)B40$(TpUrH}|NISGSlog&&J5+F zXX65(d$S&+(V$Ba_1}#DbHPxzcc&s);{#d6F<qaZyZ`e7pv_M7R=IxZBDw%L^lujc zn;`gqxd15b&h8%j=LNvQ1uwGfw?MuC=ze^(u>$uWmTNjIJ_kM`<W}Q(K~;Kgm0(x< zs_vT-=k0Fn;PFN`QwcRA-k9Nq5Bq+Wo?SlCn!jwp?LbF<pOL-#^CAB9B3uoOx1Wh^ zaK{@b^EA9=1+E2*{2vRM=CluA(nh`n_-Y|d;Vy0DOMoS^C%6q=0`MJfvJX$cp($eS zbA#=-VnrhRzb1_dbL08>!DRgB<F>J{eu?GzZLI#6W`XZM#`yjZw*MztEaZ%lPw2v4 zIg`XU9|8;ig}`XX|7_il{(=>pZOPlV?D*iUlg2%@@I@0HGqdgIOzy$Hs5x0;yd-TQ zyLK)tCMyQJL7FL%nAm{Mx0GipS@B&E+X~a2YQ-O42`j!IOFiE)eyLfRHhDnR^tud_ z3zqy@Ny#^$J<Oc?Z<c)NJxm)vR(0N<U*p3Loexj^Yi3hS$y-TGPzI_S6%ZpeSb7Q( znwuwIijJDe!Y+jC8s?JBdF4vW=N;DfLf4j-J5iP=f8!g_J}<<r-B6&`N$w|Bpg_%q zT>AL$(h7EDeVDuMg^={&4EAV!7~gR+^WPBK>D!GoMq3$XVP7bZ9F3T07jt`<5v4T< zqC2Z7@-UKv5j*#sQ2k6Rm270y8$y`4!8K->ET+ck{m(cl{~o6;#fge>RORtJ=eYC~ zN+{)l_x#7DyYT!Yk1L-t-%nff_zi`$PgD5%O7`ZbgF1c}LQO%>F7dFdE|Vf;n+oW& z@7b8rDaw?_&YeCH1Exx_9M%Y(+y|Dio*O6fZTqvYH_qnonOK`m$^5}-Z0e?TeyoY@ z*%Zk;^kem#1b$y8paXOJDbT@B+WicdMv2AI8vepqmbdV=W`Fd-Qjj>MX(`xEmhD)n z1sLj5urr?(FZE+L7mnnwC$qFg&G_$<cT8NAp>b=23nQ9={%0^Vsx942*QOKWeyL6O zc7P3c^ND`C5>F7iZ#?i@2^G?4WX@WYOtPPe1)Z%ZImojDHoSDPy>lZ<^;>DNhW~33 zoA{QAA3u$~^Ok{s7|eo_J2^$;t_GhP1heeq5U0OzJA+49!E9l2xIMj$W$hMUvH$Y{ z2*0wme9o&o_LjZR^ZmN9jj16XQsI~vHQ~pgCT-a23oaf9uz1WICFDNAI5}Rzc3lYN z@l@~Zh2~*3*o3hiJ%pkd?1+0>Kj|<QOLcF%GMWWnZ0`|7u_p?-=MlR_2^)1Wl3zWP zy?HUp=%>W~3-i;>jxNUiB>$JuV@I=N7fn$P;LxPFlGBv1wKqn2+a&iK3leTA<o=Ak zFyF!gFGcA0L5PHxzYvzfb6CGiZF6`C)7=X$TzZM^ew9x{mU#(oTw%NNzRuDs*cK}k zKi(x~BzW#!qobBP0G;UZxuBEHp4!q9-1g8BLGoD$4G+&MdWmkoLN>q?>!F@qzZAvK zS;X8f2lLN;S@h+Y&~?SK#n(|hbn6=^Cg}S&5EGx*K%ekg#AaUZU1TJNWiCn)j`K=W zrk`R(z*gs)Rz5+KM&IvS?QGrE%zwwW)1&lmVIIljw=E$0s5|ybH^uJ*zZIWu9g~G# zIfNjgECDYZhGD7h6%*Vb>|Q6f-YcyYTMLh^?3F7qeBeTsf5m8&EkalpVF;}<&lGa$ zDgMo3oT<v}Pgu0o#ca!!@%&E<nEvYcu<Q{pHEt;ztM~zHo*)iNp<jxT?ssJ$T#fhk zFU3@GfM5+5O|AAUn_ayc=XMz4<)b37VGqe>LG{!5dT;i2eGs4E#WvT6`UI6AmJ4L$ zXIYSXZ#cVDpTjR+$huw&<ntG@Vb_d&e{VML+9>zGhP@cAONKG&S}=bzi8)*k^=z0x zD&b$Ly5%)OQv2HzSi9>{`q{H+t-zU4IZc-f&eG*+Z2I;1uG1k-A3>@Y_u78&ql43_ zCDNpgpCDeDHh&B%k-LVv^o=*9WvgW^zd(92lwG^tKDjG)4ha1=o>HnZ<5Y}Av%MsH z9~ByvcfkRQ&`DM3V@2q|csAfhJfH9}d+SCVKVm%FbE75SyfeFbBQ5C8IJ!}CqW1fO zA|vU!))`3o)Hv4tX1`Gx3R71i{Eg6D{9J;m7C%=|>(sjpKXIcSjp%8euritp>C4R} zAK7gCc_aoM!2!SRz+)ex7D1|mD0~vad($ri92d%c9VEy3?EcM&mfw5GqtOGqm9%R} zQ-9KOeLj)Kq76T8g2;7=tn0smU1to2QGxs$64}^)#qiG)*|LAVpSSQDy{=N8sb;2? zzlL8z!Ufi3aOI2EWI?<v#NcdmQTKK=3TV@Baz)L*rzafcLvS?2FBahApLJl;V^>U$ zw$d=<q`vLy3sU4*Rb>F%^lN0qx;olZY?u=(l|&Z=SuaX@2w4)fg|3oy5WDxQq3AEn zX89htmOkZw44-w-8zXQHBZ$ACaBtQth0~P`#HtL&7g#lARrEtQS`>sJoACImU^^~Y z@Y@ZM(xO3B9{Nf^sst&Y@@61W{Bi*VbR`~01&QNnHqvTYZwr&Ft}A(ru1l!8MIdXT z8KlF7Tzj2u`0b}|<B+$ywcH<Iv+bs{oYh6t{*aG^HMGAq*|Ve@)kpwc>9R*Y@ZE&> zk+xqp%e<{G+SLyq+}TmldQYUgP3R7E;~yYBLZWK=7LWpI$CRY<3lIB7V-Gz^44As^ zVHm>pwTA}Kt$0eNZX8TSPDm^f`)&Pj^bIK_Zc;i#z&WOHY=w^O-0d*m`DhCi?>xTe z!A%f8l!qmaRZ5|4a55DK$geDaScD?Q$whKTE|y*f<8GS`*(GBAeDp(#*<JD;z-~&Z zvF~)G!Zy9WT&VbRnqFTHx+<@{WKxhGUSa)i#q$$Su+m$mR*%g{_CLJ7d}q*qdwsd| zGCOlCmcM<R+21xXH>)8!suI6aq|6^dgLOpmNkA!-3DjCBAFPqDn`@;lvsfQ%Fh3@f z<y+(O&qdJpX@~csLMdrjV2w8Yzo5)wdJY~+O?FUF4U(dJ51E0lYTT$L@swE~gyXv0 zA-2fbAZ?Lz<bO->FIhZO1}U$oZsW!&Q|jE2dEPOE7||LPh!lq(zmf&w@C!zJkR`rL zp3YK#_kj@yzdvT}poke+@g050yQ@@RQzRorl1Fp@>FlRF0m&Vw|Bn*E{|KUDy)4%r zOcmUb6tHG@BSOv<{HGMqwVd=_0ekIki0dMB4?ZS6BBgd<)9!{FXZ)ua7+PBg&>FGQ zo^8LYch8iD-+PfTJ9jtJPy*FTy`#iwvP!w5bdhecl;1<*HcxvY{IZ<VVQ380XvTlm zU+kSCeaOmx59CKrV{iW+5c}#s;{KP4VrjxJH2fnRzq_}#sMtu=<G8m~T%+Pv6>C+z zpyKZ;+J`Igj4F0fvA>EFR4h?(m5N(c+^=8}cT8pcs$!#x^O`FeOj0p6Lh&c6n5JU3 zie?qxRB@$>TU4x7@mCd}%2>p49?g`5qg3poVow!ksJKYQWh!n`@f#Hns(3=h3o8Dq zVxx)<CM7>#Lgdeds*Khurm2{z;usZYsklhR<tnaKai@xPDxOyHriu+J@{vjbf`ODj z*Fj}us5nN&c`7bdajlB`R6L^M1r;R~8&z~syDn74)+(l`IIOt_3O4>0s|?wLe|#e} z7@UzIU5s1`m346s1q(K-{!-w(;XdwJw-rWkI1o|7nyWuo$<Zp<8?FH#aW&W}6|V2D zk&`P<+cE6^RQIA3PVtlaLVxu(B{f>w>91JzV_%T};#5EOeEILh--^7K>Mv0J-m2fI z`eD74|Em5{<dsUJ?K4$IfGWVL{y^1V@mz@zr26rbjdBIZY({@4o+<uN#mg;L{b8!V z>ZuZLRQ;K%KV0@>N6Bea1ZJxODXRYs)o)b&b5uX4`k$-*#>Yy0o9aLJnA;V^Ynr?6 zTIjBMO=B7~b@Z70u}1SWWA2Qxqs?QD1v94Qj-5HvSU7Xy)bYj^k@+)QRGM^}?8+*g zCZO_No#wY)D?K%Cyw^6<s>{E(V!haB-M@CJUh$f?@@sF+%&1FvbwXWx5>4_DuHv?m z^Hb0sXqF~_<vbsakvHd5e(a;^*_mR|5Xfu6ar5CmS7UhvYq{WVhAO`TeU5J&$2G~B zqVRK~a22;JBYZWjGn@EcvG^fWxT{U^eH}R8JSWb#og?QP?^)0!*S;0hTDT@Tjn0A7 z<vDV?cEEVog31#<8bc9DQ(e+qW9Y#hR8##Ov|XE*elMLpVj?zTHsx>w;dkMha&UIB z=Um!3b1wZIUdZf5jfUXG%@d?ch-T&7G+vy>=*AiIyebFiHL2cB37&!V@=+qkZLwB< zs@H@TnR8GmElP(!jU&%FYCe#gkeZQPKpST+)ciFUnzxe+ZMTC9)okZHGjYd|!*K<O z=Oq`&k#mWES)hm%j(Y>n-^$5<0bx#@XFE^MbB5aqmnx?M#~g=DijQJwxR-52*)2}v zxOP2WM)h*wIfr;YuBrI<1&(u0=eT$^X(fAy7ui!g??w1QxZbMFdC*)>j+>(THFA-a zI9@*XoKHJ^fb0{mlN*#0$ms=TAFe5Z)u7M7y-2{@p7U<!&Ur6$&8!^guQ9!7-M)P} zZUkCqv9)rEzb1kP=2m~rnEa;VheEGf2DinEZ1Cq5;KBu%-MD}}S1zC(=>GZ!pA%kH zBu^!|FbvzqaC~>F%6|fXNBAGBevRIV)0^Qi==2zL`gpIZ$_j%f_(h5QM{wLAIPW`^ z`wf~fe|rsQKL+hCk5p%jHz!WlA0xHFpo#Q1xNwF%H_kB7l{26;hW@@MysJD5Dt!Yq zar&k@JfDVN(=Fh*xH~9d<t@D?yz<jvjUUx@f3PO-Mb6a=IPN}N+?~o#12oZHyu2JZ zFQn=f?_T9v;F8mn-VT^$&cfZJ^zf(g#YCgYHV~5yb)pGZ=^3cW9NHAa|84XqTqa@= zYD_Misa*hP`ZKV>Q15rbx5~S~E5|caLxZP3pQ#KI7tUqI%P~3vrqW`#8FDTD2-GyA z3it(SIuP~=()h`{sK$k-u@$HB;5m<YmpC~|j7Pbk_@~kwq%rz673`-?9QQLEjpUy< zRem1ykLDQrIhq6R+dGwKf;6G6o8p|=%yA!X;kff^^#elDBg<UtoliJcX$$Og>@xW{ zlqe!t6X8UY#u$WLivu*iDNQL{+rx1_d({p>&i`XWL*_ADQ$ttg2WX;Rbl0F79OU58 zU0VY+fz%2|0yWJe0^(e`uy&tvVVVy)&jyeBms40-1Wwpj{vND}evwYsiySu!&gpJt zbciN2kxEHJdBi1-D}ifv7asiCgWhnN<GQPUjUK}ni-i6wj{^4`x6H~D0UASPF}@cF z!Z)qXG-kb>IPZZjn0ml?k1DqU*PP08AsS-@wM(2m7OO}{&fnaQ^KX~H`D@}h*QSL^ zQyCqqG3M#Guo;F1zj}FCX9mQ(b4K&)SR-d~#({;Lv7MPSY6`f3djAvpDs_p&Sye-G zrW=+?{0d;xp95)?4Ojw=FL_4rD*nW8TmP@J{lCig|DVdXzdTC+yRu!mBwTZ(a$JPw zoO2J&0P_s|m%eLCGYzxfRgkF3)OZ)?D2j5^C@;?0<sGG&=B#PIYf)Q`ho&e>)dNi` z8dVHd(V(KQik>PusmQ6=_(;(wB^A%9Sf^r@iu+XDtzZ$iQ)O&daf^zZR9vs(8WlfO zaixmORjg2Pv5K=*%vUi_#isPLWj{_)1J!^G6;o79R540LgNmLix~a&iC_PkK;GBy1 zQ7?JhXlnWsD!*36DndHN?N%AvRotZF8Wk&4ELO2V#Vi$5RE$&6prWUWPAYOLHa<`a z+5jw~#ZFQg^(vlI@q~)CDpskuTg6Q(R;XC4Vu6a30i};0&XF-w#S|5z9&p{{0E5cl zRFv*35iY5CTE!|ASE^V{SZQ3O31$s^Uu{We4EA@!gZ`fom7YEt`^xMNnqcjkZgMoA z_!N6(vYc>IFw;fB3Q8CC$V^rc)>ir0l;*JOhYp&pc17oSC7@Qtri8jVD100XIC64M zzzWI{m_OkE6D;D2)W79w!Ubx=|8J2BMFl1N=iip!ln(jNh||n}4)`DVl^-Q(CKnm7 z`I<dB?jqby{21RQApJJzK=4lh_u%lI26`W`1@4}lKt};H;8ubr><W8y6zCKnJ?q(y zU$rE3he}ldz9%pcj^yKjec^KO6MvaOIK#IJ#<>!nIG_d2gr93#54;BF3;HH7GDP7= z0S~Km9q>91?uG!!0Oy4%84$)BmCAGh28Szg3A@8lA<}>wWj<!!CJ^UkkA};OK<P{v zB2+ry2XM8Z*8>m1od#VCv^B?J7c_^%@J6@?pf>@3gsZ?WS=0g_;Q(wxi#7sBwp1#U z0}P8+3Sk7EDT+Z;Lg*YYE|%jafF`^SS8qVgfFp4dUJ8B=FbLy<x+xeKiXJ}*+6Y_) zcLG0M@gb13XnXu{j6HBJ&aF|P=K<e_quN&hFF<?D0AD^tpKpi$hUXG+Pe=3_=zTz+ zPRJN^FmQZlWDI%&@O?N+=tH1u5^9fvx&fo%sP<;yayXJ9+^N!pPvNMGJ-gsQ+6Dc8 z4vD6~lLc1~ItMr!?k4C0;B+_%v>A8=juNc{j!RZ*JOTIsj#`WG<|`a`2l9j;r{I55 zf>v1rj7vrL;a61&H*|*|{7v19IPOgxi!<PXk&N38M;X+=%5g&lrKz)lJK(6)JAo~6 z?41YsMBoTGlFtF^d*Z+WzJU-<?harT9K|KP-wWdgG7o@-8Q|k%P;NGe-o5d^_`tNu z1eU>3gr&f>D!mVAQ)#XbE^Xj~A>R%-1&%T+0EYEdXd`f2Ci)&S640Zcl4cj+`vcJ0 z;C~1#x{80O1`;r6pdu6u{CzNL384ny)FF@oT>!kV(hq=dWGVc4Kzp3NY2FiVg{uX< z9oTZ1(u%Rb*>F@b!o1<o^Zl^%7JzsF7l(mEI5%7A%jLj1BhWSA&jVJ$Q7s8S9f`!i zr#G?<qcG_3qq>A1Im!U^1kQw`%*?=d;i!_!fi{)TjYjv4M*puwLKW~_f!hSS9ylcz zwE<lWoRo*!fX)ZLIabMdG4PjhxS|37958x<T1#NxX&8jyX96b|aNKi>3tR>#7XlbG zU6Bti0&y2E7NfTT*kT4gpadNSoC-%ZC;)0_D$VNz+&LdhGGqvQE>IH60LGv)6TpuJ z4lKqL1WhQxQO5QQv3kO70>2KZv7rB{>Fhz2!O;|23KU9|rY3YPg=z%88?ZYZ6(|i@ z3P*`92Ch+Q!h*MybO_fhR@Mf>X-kxat^hbr#6E&$dxD7hpS@IB!wHweQ6Cf5sWf3x zImQPfc)rbX{ouNQ9teDn$4uFv36H&p0So#Bkgrt6K^*Yu`zQ#>0GF*oSAt#+{PP2j zI~RcU{{e^@A9C{l@~8rMa5bhPYDxIjI!rUrJAuaa${J2czw1W)SRg%yrdC-4q~F1d z1DykO+6YS=Xe019xD?RbCXV|WP5`|dc(DloWWaL?_ymrcH1;#B`J17afDem3w*!tc z*a_Uf1&a&lD&Sc-l0OIZ_(EwpPoQBNS`RXWAHm6m15Vqn&;`Kla1^(Q2+NnsASwa6 ze5KHC!2CTJJV=PJ*Iq28SUxj=&Avs=K%0Pta5<o717AC&3`_xd8IHQL9(bSzhknQ$ z1p3!1g)jhLuSNe;DQ1)B2>J;^6+mqr$L#~{2^<7R>vk4!wMwr6o`s{DoddS~Nhw4u z@DLm=M76+(^H^>X*980-jt2N?;DQV2ZVde53z#Yo;GsegzJC#Yj{y3up1RBEGth+e z|C}lYO}HM8b~(bH^`ODe06N}8_kwl;THr`iS_zE%6^j@6gnz?b0?qx#ac$sM7C|Tu zL;-m~6Mm-ByMbO3786A91+Ke=oelJQV8Ct2qY%NsA#fbx;xdPuLwwL=$0sX&8e|B` zHg3kqCnTFU(S${0+$JGHvMUo!NQPjd3CUhdG~oo5CY$YIl_n(1E6EU&Rh4MMdn!$q zRqs0rpOCDakja;cbTxnsk7PKc$b{d*QAr8Ojz@e#va}IR=yX@13CRXVe8R3OO?I%6 z3cYJ|qNb&0my@8Gt|?lEJ&#A@+N1IC*RULmE;^>kZ+H*ei_=Sg)yG(`v@iWp>k%IL z(tqMG{GKoUvoAm%X*Z=;e+7Qz)8yxctGk}2YaBD1HDJafVE5XpwY6(ctd-U_tc_cj zhy@3`JNTQ{>|Rr8T&0QPxwy(ct2C!PQZU{SP_TCP+Tyi~E8Bjc@rkNOyvDWMI;VA> u>kR9R>!Q}ht;<}OwJvAfgmne$X0Iz=w|HH}x|Qp8P5(gi4*zbP<NpEm9Ieg( diff --git a/pipenv/vendor/distlib/util.py b/pipenv/vendor/distlib/util.py index e851146c0f..01324eae46 100644 --- a/pipenv/vendor/distlib/util.py +++ b/pipenv/vendor/distlib/util.py @@ -703,7 +703,7 @@ def __eq__(self, other): ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.+])+) \s*=\s*(?P<callable>(\w+)([:\.]\w+)*) - \s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])? + \s*(\[\s*(?P<flags>[\w-]+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])? ''', re.VERBOSE) def get_export_entry(specification): @@ -1438,7 +1438,8 @@ def connect(self): ca_certs=self.ca_certs) else: # pragma: no cover context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) - context.options |= ssl.OP_NO_SSLv2 + if hasattr(ssl, 'OP_NO_SSLv2'): + context.options |= ssl.OP_NO_SSLv2 if self.cert_file: context.load_cert_chain(self.cert_file, self.key_file) kwargs = {} diff --git a/pipenv/vendor/distlib/w32.exe b/pipenv/vendor/distlib/w32.exe index 732215a9d34ccb7b417d637a7646d9b843ecafa8..e6439e9e45897365d5ac6a85a46864c158a225fd 100644 GIT binary patch delta 19564 zcmd^mZ9tSo_xH7zAPX*n2*{g&h>D1~@9g`o3W_GSiY}0fnnLSpO1f)mDbz|yEBP{Z zXKAHrYNdwZd#0c!SXNZtW*SOmIff;cDZb=BzqxkB?)(4$@O*hbJ=WPdbLPyMnKNh3 z%$&KX*dJI?7r4?OYFlN@E`KqsMD@#5^#j<l?Y3HpeC9Kssw0KetJVqWXX>Fy4R8LT zmLgUCGDYnW=wJHz@A}hwLOOQFG&_^Y{ntNh4#$lW1#(aK`+FiQieo=5953o33gNgz z$ZTK~jvFc`DWXjN^>@iwQD0tigDeuI1WJQZPBw~?qRXyvoJC;t|1@#j6w#_)aimQY ztU3r{IDp9u@H6<6WbkFqoj%n$6)YJbq9T_Gs3VztKhf{RhTr>S2_M(>U{{bgh<u`~ zwOjbdM60CY)d7J9!><<38#SuQ!g<;@I{OUfD(hUal{cMgdoD_B-l$v@ZSoa3<Lvvp zaGZm)hjSv%zJUx5h&S2n{~=<*KSfOZhX`N6e~BoAh(wW?ObJL(-ST?v-cB56XXCWe zEAbpxU3m{wD=qPQz3$^N4v{yCV?E8gr}&&sf|gH`!T?3@d&33RyNtE={{O-n5JGka z#PhOl<g<W57F&URKRQ;Ob5G&5+v5jw)~dz#sus6V2P7&^>Knq=)Z6R8F@{Rfu!Aq~ zy6(|e&>xnrjUn2=_(wo%XIZ<CY!85P9in_%9>sC_o;Io#x}2<UaQ5)D4LH^*r`-|w zT2b}l(|^8Gl|wy$v@SV}*$A-FLt$ilV6rsOC&z{{x^B=!NI?oYXD}X5o1#vCE9{jz z`}gQomFudNmXdpcG3A0J+9=^T_wfu*-la}?Ee@`K9`(BL|2_8wj;@1M0zJaISHSwW zD^eI^V|?9VuGPZbI%~7i&S<05(`A1G2=3#lrFyX|uxe6U>o`vi4ZMfKg}kE9!CBdO zNBRT`vI<Ke%+rp}HN@8ml0N>MX+O0(JIUMHwbT=SfQc;CcXI~i>W|I)y|q(SP8*YQ zECCgB+KMI@pFB3+bJcxSVsm7AU1G(}Dpx!Gg>2}YmXVCA)j2n;m?tZ36q>!^5p+JH z8yj$>{=REObxynWh_sd5=-fY=M{nH6CHfPt)2*EpH;1u?O1lt6P~XsTf_+!sbV<BX z*`zcmD@Dw48w0q(oZ@)Z($urad2+=0wgQE>pr9%{HNhKYAV-3t6P2h~S?83vpG^%f za|J5uEUnOmU4y<^X)5_MsMmx-0lx?9b7U9Ws<Ts-1y;Ixw9v&WXKI>9Wq}aKP45{i z3a8T>P-sIy>H?wKC#Y6b<oL^1Xs6%$3fBmQ+sGVoba{<{dwh%{dr72)YsgNWh!u#1 zYlUb?jbWlQtOW&_mx2PX6MgVTMWRgK;4)&!zzPnqbv34XO)7;i7(2#VU}uiS8<oaH zEKO|z)ry)i_9iq~K<n?YsSNkVf#k_fP0im_hNbO|N(DREprJx?O&phzfxJESD$~a| zs8Upf!f@X@J%y<q1EqS5)~_twN4ZytD!30nxEq-mJRoiys^rspVaZk2Vaes&%Rm}K zhm(!LQE@+`Z$(8={R5rn+jk3%#E_=osQA79V%OzTx0&<Ya>j=3>+BF)=^NpzLwb}i zeNbDhs4K1N>^{L4&_VaS-uzo<?N=ct-s|8BpSxVvb1;WxWu+T#3xh3n>r-5PQN?Xa z$q!poXHWi(jq7=o`s5s!K$QC!?{457VRk!8^XX^7g4v;~Nu};*5>uQp3awuPR_roC z!eBwZ1br+8UtiYIAH)0uj1fe)c4frR$?lLoyflcM4@oQ6V7{0NT#|;$)9qfb67$lU zT3yDsc4NH74+L~UpL~*Cm#g+LPTz7r%zT^8fjN_6j!NA|mQQhttM+^kp4QNYN|ri6 zbW`5*3d<-?R=?4Lw6sbxr2Ir+T~|0q+6;o{tfH>+Jaa*3g#l%PR#fd~Zbfmk$n%;0 z?7XO#x>ei>bxJDkggAo+>l^1@Yvn!HQSBzH)+SWD1$SOlR8U=c1CGLaqBA$(Pg}l2 zY`^k1vqTNL$$XvSy#CLyS3>IS3BNP-AG<}mN>a<mboDDtjPaIg6J3{uiRx|71s5jE zN}my=JNU^yWhOtU5Yp)u(~tg`iwaeG*%v&hk}Fj4@wbKg8~6u*b>&%%)M~4Gqnsf$ zA4bGHUZs}}YP-<CK*&(kX$4OEStgm8)U}(?ZP<kT0>Mn^@E~R-shG|A!k$xIc@-;K z*fy-R7u-EOmyc%kMvcRADRq}8rnm+;!Y5P@Pj;kO%V{VNF9r1o3N7&4W)+%&FBEL3 zI$*NZFQdBh5=;)F3L&ccW0vtWR9pE{kJw$umsXy1{Waj|SM8oQkI?PvT$F?hC10bY z{;T$?$|g4Lf{6&F<^NIaY4iv-DTs^_DxUX=?9|w)npJi1)lQk%MmNKXA6if3H8!LF z{tLUKV5wfg7QK6zatrLaD7D%0=?}t^lIGLC==Ul59+ZB{pbusK+Qt8Q?8@0_Y_<}+ zw9UfNw~}Cj-m1ly6m_0SV*N!ZA5JflW`W+ofsv!<63K6&y?Wol7F(4gMrkPhJCT*f zptSWQcK=eX-jz&gB9V2C%#Y|L%tq@*Y<S`5%^k486ISJ1YRF-`p_ih2*e@<oBicBI zZHvwVOVKfp7Y@(nafz|fbf+FP;fOA-lF~tmWNp_}enKKS+*R4*6lTO5H4C;5R}f`C zge>MMmjs9P*I!RzavWkBCLkWKjqd5gX5B{TBehYAG1%w?0Usv(>U%RvYH#vnnAvi7 z0O~_*2s-7B`d&SlE22u!>=kImZpRPZhye&*WgN~yFXn<$lg@Gbr=aZcbaFK;J$;p5 zh{VA;c;-G@AV*O*>6jzgZe=Q$_Qc4<^ygm060Q=Jk@?{nQH7}-rwwxcroTJa<jwM) z@tmbOsbpWcrvFk%es1xll}P4wgVsE3+`PBRopULm^_Pa6mNf!H%UTlCO_lUFR9eJT zDjX#ihjUIbn!3!W$LNp*GOJsD$ifVG3$PqbCnvl0jdKdBbj8?cBDCXa^X+$5`b8WG zjz~<jgWJ=F<%L;^(dOK9cBjoaO)xFd({UsxBAG`|pNQzor>V(`h(!LyO!7g*u+S7R z`3>lkSi*Oo8*xy@abf$~s&m?`Vf%ut^gR`Mp?guNCbom|d<?nTy(;)ve<&!ChNh7v zk*S3X&_7RmrPtNhAt}}drp68CoT2$NHd&Y}*SoDjbS&%#hVqDzcp4@bw?^jEb~b35 z1zH0qfyF86CQLeZ>ft|fEwVurLH0%Uj+6IzU~6Y!@mDg~*}#RxcSc2SM2%JEqvM`N zoS>`n+V!8gdeA-KYTz*M9DNU{hr6;9xHeBbubbdW=$dG<D7ue4k5xRbKjutm;7*;w zV(Dh3ufcT}ffU@pH8*2r)QLTL?Vhuh=drAe(d4V>*rX4XY{kSt#oh9Se!Pcm9y8!+ zYn9NzXcF8bG5Q%eu9Lnc<Gvb>l~$eCMmrN@kFosAoyn>m($GiX75x+NdnEayM_*Ap z`Kw16FENrQVjdH}WPp0Cw3?iZi5IOUKgU=?8bCrb^kR21C^jvqj2SBzNs41r`#AhX z&v)+-+6Snjjt1h`h(#Pj>m_7stcw3zLN3Hk7ClQ$ajBv2MRc&|MUXjh38BwDq@+fW zwQ)P+PvAJ}@8=A5GHvu;DZvET3TP{t8Xqlsmn?{%&nI>xZSmto-;<mKrD!Wzl<=r@ zCF|eIII3Xx$PvSFVRwk413~bMYr+-}ajj|0+UEq5+X=n9R-#z&IlJM7ipZQtlEaRI z&0nJgD0%adn6PyZX(mSd<dMm}w3(<;<ZkymQ@nYX76f^6T9(DMcB{^5vF6hR6q1}# z1H&$2+w@C&9w>QkROZ;XJ7`xt%xG34?{y_WYhG8R{_gyAk7j7T%|6<W0j+|I#N@l% zaXhCp!06VbxF*rbz^XL3Q^XW7Mk31VDOLDO`_cZaG^RDNvTk8d#mUmDuB>`EjR3iU zgD>Rh9_-E}a70r4Uq1Ph9=co4Fv&JIxIZPvo(bV!VqX>INZcjiT#=174<j>rN(XO; zA;VY}X`$?sFcByr?3McJ!QNuHHHp23&BSYH;Xh_V1A<=@=gGOAJw-D}d(ZTcg&+$v zu#IFSW<-5ElH)eTgbn8C^%2aiEDWYoN028I<NBl{U*u_TO|#M@hUy#U$9X!Jx(y4A zu5P$M;t-dYOSUKW?%KrKe;$k65k#}Pk><o6T_RXc*gp0WZ=y8o`CN{xE)zL;8$H^_ zG%TqKQh2eM^`M8kuzoTZL3aW5V3)hp8lR7wot`75Nqr3AC_9GBK|ujMg_Vd#*25<L zv0)Y9#vkNaEP<1XqIr<vYxz2u$(RxH&`?H7DXJ2CuD2c{o00~O=#~u&_q5aP7`=@K zC^&5E-(!&2D4~Kt_m|$*2+yztxPUHJ`uH_;xHN$)a+gXt=H3LQx3NK=C$Y&I(Pc6= zdGJ7)a0eFCMR2HgFB{ICR<>DzyU-qf9o!~#s5)mY_RD3YE;&-Pl>9q6ul&5=&Rt?h z0?lW;8y$rujkP3pgwr7cp$$&13fIA1a$$zVj(Ga(Z{YMsRWu>IaU2^QeR?$JF_O*W zhk1T3F+I+>=zvH3gkW0B2o4c_iS<l>!ijC5e1pKW4Vt)_b6n)?9JVj7v9*&Uq-c!Q zW@o`2T6V?jt;%W9x-PjoTWMeL(>(mNW_oh2dc>aKs=V*5V`u_163j{o_2hgHcK6~- z9OIWT{tW#1&mUN~w0AJ3lukgOuaM-F`s}rA9A)S|`j!f65!<-efM~z;6+3UN1nM?2 z1`c`=QI7H0g9EraZ&VD*pwi`}Pw&k5F07zv)8@WHC@9epV5?HqG5!3JM`rhS##cc? zg;C4|IRoiq;P$E*Go9W_{_dR~wG5{6+-M0`0Wqy1?i0y~A+0Bd)R_1QDDmV-p3C(W zVgX~7)+M9m^<++JW>+mH7jx|+Z6NQb_Uan)AEerIsh^30u3|iN{5Z0`?*Q=);5KUi zm0axGL(F~6G9!K^_mPPdGU>mPp8baN&*YQReyS)bGogHhm=x!Q6C>1s*i%hEi|B81 zy`P1rxkQoHJAE_MQCYW<4a(KstE{2zzhJI~d{shCYrH3*=0UeB(sYs~JZ8eP_!6Xb zBPY^SanV?*MecW6aBAfoqi_-jg6Ih?OD9qNcWK#7IdCBGO%E;@D25K$1i@y*u>>XV zyIyBdKJA6LRuO_-lNFy&poSlaw}0=*<zOrJxjwHdH-g>B%ZNICZd4Qu0`)uZZ|^ji zgA&EZC?8BdPFIBZPYm`7J^3?zb;OQ~IM<Jsc!r5PeW{RW=o=Tws{@{jNcWR%0$E4n zE|R#6r4ef`__!aCmtG(>89Dq0Be|7P7{))!aR;#1ah{=K8u=(G9+=5*X(lfZoSrfn zmcg!aij#`36<+TzI2t1q;NZsEhnd(l|K;T*GBa1SnM}$|O{~GFY=^mk!CZmIROxkg zcP|MLIYSG)-xSb~TF6_OIbAk@MQ|I|>*QKy|Fnu`OiB?B+ttdySVe83Gr-f(`lKU< zUW9GJ@9OWm!ssMo8I;=3H>Iu~1@u=8vxuf)kMMcSs!5kxyHuH4g9_-E7l~(3cGOJ9 z;Z?&bnX1vG*j{#>TpE-fQ8J3-Xd<c}vxiyOqS{81r83cJGFGY#DFZh<A+NknR!Zke zUdPSFzjqi(SD6gQWQ#10-`-3n%Cf^7sfELu_k^AT(kZl&)X4UOV}?A@wvEa0M(sy( zG=Remz^h6PWYc^I6i*utvGHD2V|QWV1xg1)9m}35KDIF#{A`zxwt7`fK2FjskIg&| z26w0+?aF)%10$<pOX319u=Z3e7~i!7I&By>QA1J{YEdJZpePY#lX^w+$oWRha~tjs zH=Q~(%3ATNPIdIe+XQDH<G>PZ#}17+P*f9ZZ*+jSs&i|wjcU$=s<?^3)G0DR85?@@ z9P+Hc=*C$xMww9FDo}=HKn~n$voELg<-YXg48B~wFK6-PYJItEU+x`WF4vcP)t8%S z)Iifr><f(G2&G@#LFz8K$9b06VR3QjSJY{A7;7l(Ij<FibHXNy&JmHy#*5C8B2~Yl zf9Qyv=Yy#dXJJL%<YP9Eip9z8Q=r5G?-~0zmTo%EbaTd@D`bQzJ7c#989{4j+(yR1 zc~o`eJ5{f~Y=Sp2wvB9}8Uk72p&>97LCKqZGr6B6t9yr*pYeLVD#qYZZ6j8-p&v6p z`{HcOp+NS2>uuWfzz(wQk?>LQPg``PRy{z}L@ugRU~YVtj6au4R9PvSQ93Lc7Hf*G z9vZGcHrFHsd)0obb5OmPw^l3n!{k{fPh&s8@x^1@Mpk6?mAs8p6_#QfJpDFOpOq}( z{(x0={u8^_C#T78S!w*{)1-$cF(kPSmrV9kK-$8hx=m(|%(g#`QQ40`C5trOMBB)6 z&Gw*Vh+$sKtUVanb`H}QVsW=e&;6a<MLhwZ+#sPkLw5t)G~Ml@u%$t8M2=2$zz8x? z*DEsPdsL2uvp;ecYAT@HE_uCVl`g4V+Jso-T_^;jO|suRj~JefmZ20!D%gxfQD<fU zXfh5hHaa5%8#v?}=BL}}L}aQ1a@H`tHxBs1(dnz(_8Sl|;JbV)t%0{jBnDDHwqFJD zFl!u_fZhTfqFwKJ_XONmLTUjG>maVOhbObg6x1l7%X63vNo#RF`*&p_X4!GW8)YHW z^m^4!9Jz`R;1OeaS)uE*7zo=PirMz?^q1^9h2ht;O#}LHMj^9%IQd2|$EBmQVSs2R zk>hs(DK;$OH$;;QhPk2^GR8QXZ;m1xjRSbe2=aw7kv}q={BG>czdoFFH*N0n%qc;x z#BTY695to!)+Tb*l#$V~Qtta!ij8JAVLfeJ5ImTx8gBQf1jKa--E1V;=79-QQC!tC z0C!*a{rj%OhCqh0ox>YNNwqmebcxiM-_E#-F(Def)P|z!9BRd?Z1Iz_K(dP%eP{xX zwp@3z#FE1wh#;R>?sl1bQs`!+eaau?v%y(>_#hH6M3?OEW;f3;yV2KCymow!j#d@h z^F78!Qat3<7(Y{Yq4OPVd8bI|P-RdLjK7GEC%Hq-aE8l<z8=8X$%>&1$y?b&ht2;K zQ?&OG#zDIb6IQ_syOF{5xQ8~z)!j<RKV*0glMA=Fw}x>WAJXNYlC+$pf^MLAUO0uq zBNg}=&|szaF)GgsP5xXPa^Agl$kCsH;-u|);WTo_1tD-&@!GYlW!C14gQ3>FtcV^u zN#4xqo%SmDi?oyPIU_v71JJoH=p1}u1tYr~!o7K7SA&f{b&_1kDda2uAVY@@<UegB zbBC4icN@uf!<77zCK5Kh5C4~lsE1n=eShlM-A?|9uHeeM63uxQha>zY(cg8(>i@(| z!izHhCipgaclbE|eG%~v&kUXOkRI_PDIb+g9?K1nkatk8yvX{V@*}Aq6*VB|O$$dq z_Feket#CNK{R5iBrLu@#1BOebRk&2<yh#q_#t-<!U)qO$#7bjYaS2%%k65$sO1T9@ z|CMqC;YUcxD<e`xZRFsHzU4S5jj0}$Djv}2{voVWef-HT^+i_>TG^|KpBnUlx*1d< z+&6{|*d`put8#Ftk`!%w9#^u{EvywB$*=)Rl9_%6!E(1*oT2o9P?UnA)}Qhf=eLQR z(N)C(+rq=h#F6P~x6fj1b;<fRr-c3{;1+$GD-?%9dZY;U12+<6qce-h&XJk?6NTi$ z$Pv8CO8P&N!nYTYyhqZa{Q<}*y5wSq_Iq6<PvvR&afRglykA46;7sRE=4j3Z^493s z_@#XYb90lZ8q}&7nt;UDb9$tid^5U#@NJy5P;jxC1U@=-*a~0Hg~UI#F+f|@o}*vU zs=RQ0{aifqv3L$`L}b!)wg@kYhz%vup+XG_o7qR2$@WL%_*<h$<D*+dH^?08o-Q|p zW$p@L4|IdZnE6K^A!TD;;^{eZZ_Fs(HIj@Ro7hhSrmCD9P=fM7l&_lY=kC0#Uc4vj zb0lnw4<{?ee)Jy#$LEjZBS(<;^GC#HLxdx+7_Wpq=#Nmaf2_ZrC86W`By9Kt#&#EE z#FE{2@4MpZE;b2J*1yh@(c_{M1ZBn1m;BT@4<$sLB^Bf3F+=?&X+jC^om}g_eCo9k z%WE-lt8pr1H+S66d(c0>?P&e(x8$v&*!0Iy=NMYfr^5>{*S=#u9wsr~C|V=1Gl4Y! zTVfucDk%rWcTw;CEtxYu*5v5#+ddj#F1Y_8Xp{zaH0Wl5)Q_|-$-J&q#cKKQ@*g^) z3vtBt-4ICK_~hVcVGOv~UD1#$<EMl!W6H8Pl8bG0Xis9ZCE><B-!{7^)57AJM>Zp2 z#dK2wb8Xl?$G8KR5>dghE{x;Zw_7`VdM+hn3(`gR$l?MOPW!b5`Q0s$>S=d-z0M&x zd70qj?~&NTfhZkbm=eTldZXG&Sz+AJFUCSHj<T?(;in&Hi$%op5IzZKLzLj^#Niew z$YVioHoxqRmTl!$nsu6dUYI_*j!ml+5;~^!mv4O2`WaIHLd7Co19erNUm(^CBb*yY zkNDX(cd*gn-(a3o1M;bDtT3lH1=r-C(~%GP#(pEr=>brDbNVJ5jBieFS|k={XZHdz z=ZwQiOHB8VLZl_^&5OOp?8B)k`fIGX%Jn#+<G4S71|jQTYwvwU!rqz}TXV(2xm&{6 zI6ZrggKuVCaM<3Z(k8O*t=|0a^J?qfGIR=9Gm_(0Z(149gbOKIv&kLsHoOjr-~4#M z=Qt#g=Qghnm=yt}$2)^WWwpcK3FiY=V-u^Lu=PVxK>kpabbI%GK6pDh{O<Jri?^~J zwt56tITKs-3=6jp5aGZT3E%+K0JfrxG!{w4pZ|_ki4gqi$EHfB1GmzFxfuPevqkZB ztk>*b6=`o_(_%Y;Gc<d^sapI!&ZwO}BdZ?E<R3pm>K~iN&pbl<PDl*-_7f-(g-@O& zqbCfQ7K*H2AvMRn-u>7Y%dh-`xns^4DvP77Ap1F9?%+5Ej)EhMS$}bU6m59O;{bYP z1&R^QWO3*p-zDEqnASB@IHe(u?Mk5f<0NlluIBo&4#8`H`kG#fnw~sK<w@to!%V!^ z6+S<bo_NSQ;~4pT;s%k1%$}s<5t4jwQeS@VR`T_visaAt3N8U{jBp<ds8%k)<1^>_ z#H$iQieGObbBj|`w{HPcpLRUQ3h`jPb@|Sy%6hzvu{&av_$zX-Sl1_jEro{36N-=F zvQ5`?_xXcgI$ov<Y_!!(Vkh@k)tPZx79oPS4@ePWlg<PieV^f2QH5AU=n-?qz!1XZ z6*vnAnaR_Wlcz?Z(IU18F^@thFWfHv6BF4F^p5q_T<2R~N0B~UUrbT|vA*{D+17Qi zQB$3NedU=+@|2$Zh&r<BgM{=taQN2IJFJtwb;R~MA^Ph~+$MI$dFPAik<FxX%A>um zV@7dLz!lp5<09OOgjkrO&R>o0XsrEfid7UYf;04}So{xW6L@$K8UJ{1z9pM1db|(6 z^$gkgc#>#7sefE*&Vec`Ova+>LXk6teMn;tU}~!hf}b(laQu%;5_Ao%4R<#Lbo#Rb zv)JfW2?52(w~2IGYTWM|*(|pGk6E05h|Hc=#IG7kPEPxY-#&!woZgGC8bZFBZjJ2& zrzN}+A5O!8*_OYoX9gbZy-^aPD_P59)=!u86@`<#C6?Hrw=ge=a^`!Z1T)CF*BUI~ zWL)Wx{>jWTT=x|CY$+-MxT4K>L1$@<kZ12&_-ce4Db=S}zzHL&B|K;rC~h(rbzlQj z)n0^qZ$E9I6rGr#1iMQmWlzKr(-X0=TiN-won4~-Cx@)(mzXrvX4Nya*rxxdjI&R~ zB@VDKE}smrh5r{@xIMLd8Z)&wu<jJ+5xap;$orTKw+|S!cn7meL_Iorb1u2Pk<X?H z5xb6HT@4Ibps<MEruAf_Juze=#4xw=$4BI(eJI~Hm~@>nBzgo~a8-=ED`Kv|d>d@j zUQ#;aRP+;AtiGa=z<fo&?;*L4`O0~CVQ_zjSO-2c=tV!@Bba|xBCP@DdugWc0L4?= z6u1a%>K<bn`NOd|nXNEvR46N=T?CG*oc1fFFfXsGg>8*1W~TNOmLXn&ukQw%Ko^$i zP1g>(e>Z8GnaT&r$<3L?;a^?!&jO-fu{3KpnLev`=Bj_AH|%!niYs+5J{#xi?rC^! ztktWgzv>06#g*VFUVAxE=3cDbGb>mWu?c$xt{7d8I#1!<00pZ_)046Mpwp!F$uZG= z9+ZYH9f{%iIKs);*#r5Y56N@06ZmD#<gM8;{Oo3OV0NPDCOJEMv#5|Po|6=j25W<o zI!<uhF0yk@GQTsEG|d?(`}qU7H0%wzY@Scm`1c0vbFLWS8#ugpbf?on_i0~TLo&;z z@YOYBWto&8IfxuBD^7m+M(T??j%Q=DFY~|;z>r6oiTbI&{N5F$_$dXy(nH8o&$Fb* z+`)r*=oc$m?1-iTOw~67ld)WRXE%q)j^{`V?$pZdf--?kT}@t`t5EumOx+7??#p5B zU&8eFizmARuRN;ZxLXYf0dTI!`YXKq$4>6s;<^3*e8-#miuGmk``qZn>DA~^YW0+t zE!;|^myliQ#Lou6@oJJXuOAQ2N9Lun<f(ZHeXQWb4m=!*5|9B%1tb6>0TMvK%jCm( zxsi<1<7JZV04HGC%cQGwdg#>^tl-vnXm9-rQtph6ZbKn*tY9UFpH#r86=bI~iNCsn zeCbSM$!({!592`V8<1=R)ByGa>Hv*^CcsZC$Y@t?q_2^CAOx&L*8piN$$PH+jPmz7 zcFz=`*hD6=moyljii3OeMrFV@a5UiQpD&aC^LLj&zYAO?SMhYAfycqwpSl*?4LJRH zqjqE6uxID6rQsrY4Wkf%lcFf!`z;G0++mxtip8s}2*M?^f=x___gj{i2zf(Cz765Q z{7sez`AxaND(eEA;fPI6c{&EsvV~7?OS`^|9fBV2rvH34GjDrflRs@GBNhzff7?n{ zEzn9n#KFmDtG2D=%mO#xdn=i+P#a3Ir~8j6o-JhK!Xg|G?k*e|-2;c4|9*3yR!t@> z(%_o9X3<Q3(s3esCW_y(oWwlyh%p=*gTpAGZ&VAe;Ram;#FNurR9$jua`6P;$FfzG z9UT{5+r{jZ6ZsO^@XQo6D|*%v9gow>e>eMthZH=U$X`B1o_;n(`7Bh4mh$Nn%O5mw zwWEQFOaGS!E-xdWKbu|N4_aeebCQrsXb&L`r(r@GNrQwmhI(HFpPII_WPUPj6Vg;l zg*1(}2x$gAE2L7|B&15(D5MFrPDnL$zmOVe4N~`Ui?@8fg}x)uvgsQ_noD01(owWp zNUgL|NXOB|LRv^&LOPK?DWp?qiI7gGlZ4bx#|!B!nkS@XG)G9C)F`A2s7gqmr2~<c zvrg=-fH~46M&8R#K{Qf8moYS0K$kQ0{tJv@B}4xdP<A7sR6y4<v{^vcGxTc#-N4WW z0o}~dLjt;up}Pe1P&vc52zVVs*9quxhOQLQMut`j=qZLS0xI;miRESq1^XE~RY1=& z)Fz<cGxQMwZDFWIKrb;=C7={&IZYSvtBjl^ph9daQb5}nHCRAzF!Vkw)w{Qyp??bK zJ%&;NWdTFlETHVmL;AIViW%AfROqvW<qil1%rDTLmE}xeB*Qle<QRso70?8RRtqSC zuy}$O&{T##Eud)(oh6_d44o>VQij?DRLRgs1XRONi+~y!suEC(XbSLj0ncXSBmvE3 zXe3af)1z3fvru4V=v}NP@7{3?Z4=N!hW;#|6B&9=K&LSD%cW#yMX!<DK`HVK3(yA| zuw$_>$m>3eSRZKdQ<nKCQhlJ^Pnj=J<}3B8SpvmIr!OUEDpJc$KGtx3V6LAv!$*nK z2afVn;(U}CeW2A(5&I|!`oM91%1yV>tz>;*p`U`o8S7}OK5(L+^0kkm(Fd0KDRmQk zB!fQC=_l><Q7rnv1%AqUA0=BK_^hAul8=(B53KN076_F2qx7n!0>ws4-Q@Ihc8TT9 z2VVKo8zlGnbUx(GTIcg!MSSN&WU2dfug9LlJv?c>a1a06e-95w*6*=v*O7%w`|v%F zlMPF^i)u(-We+^7lvMWNi{7hUUim^Nz9h9avg#)hfA>7O{Su!4Y6p1gJMo@G^83p> z_%~eSjTO`SMLgxdQ~$8h!d-aF!i8_JaI*mW0mlI$pc~#M{Z@`1>h6m3*q8X`puyFR z-dVwV5{#AMY4Cj8I#Rf{-W!9zNx%u7g+t({*+I#7f5U+$*h;aB?_W8f(9X_Pmr$P_ zRwsR5HK_$>Q5G6apsN@)nWOU`N<ZfL=E_DiQ(1?gNFWQ%ASg1!&o{b*k6wS7=vVdS zxvpf|s)3<bUw+U%dgEpC#;U|FHn0lc%ow_o6RSo>mV={8LrVlkA*LmMnWU~B8Py9E z)`!jyOWv&}?$tVe?5pI+>YgGcX<nTql9InyU&FoUH<BhgOQP2l;&;xPsr>O6^7Wbl z{Hrm<yQUem=2yPM@4kP13Emf9mG)q7CM+`4X;0eEUa$T_`Dd-dp9FmIss(&uYZLL- znYp%K0^73uk-<SgtE8=F@1AT_p^9bXskN!)m*KM;IE1}8dJQPL%x=UIPo)M-11hEX zWm_+v?u+RdJDX-lxP2MZ{oi2kxF*;7>e97>I5wdW^FPB-%l&LmJzxu98;Jj&+`BX4 zP<=vsJSbz^3=2J_ya0;t$~?1k$CcR|m>n^g6TbbY!^n=;a?)Oc^Yx9DJ*@p>rX%Q_ zjv^ZI0*QM)d(fnvSiK)LhWcKZGi`JfBfx%~DK=^WUf&*Qt2R__fT>XFPE!4PQhJz= z#p?unr+={D!j<ix?3Z_tGhjafvT&0F`#~QrNZ<Ki>~HQMN$XO1WdzAtr{+U9lf~-> zbxV(6*Yb{Mv4Rrv@w!L(Ei*{h_0w?DnY&)ipLUb?)~k?dL1w+3#JusN1Z%)nfT0(` z$jQX_auYOIM4q6kgwFWtvNf6}J^w&QwBPfj?mZR=ZFz5GxsS~)>5O1QYZQGAUc+xT z8-e1g*yzO($^WgIf1$<DN|Z4pcXY4|C*gmnK8{ZH^9<|Yp;sypX#cmTvHo_TM^eo+ zOx}GzDrg(Cs>#ohPu@@GuT_%k??)q&D&C&eV?B(WeWsiY5yuqgJ&VPT5V~tAv2E|8 z^nXqfV{c@&UxQ?K-X$Rp_r0SFbu&MX>!O%h<5cW2pvfx92iu4Dq^wABerVOCd+sG- z&J}Op5QaCoVJr~4nRMTg+KV-Na~Qtgh)48+wc|cvX?1%RpY^y}(18!!q;Q9t_kKcF z?udpOx9%tjnSed|7}~i0Jc-_Ubc_MY-^8v}2-slRw6QL#mvk(f?)1CG5A@7BC^)Ex zQQHW5u%nnx0@{i<gvqgF$OnDWEs%_Hb@4ey7KaaTmt5l5msZYLIt9%5GpZ^v9lDqW zo%n-`$p;@KD~_zgnDM~v$-!IFk}inY#?bc#8ZH2F@F)1v0^c7aB#nSH?lX{1_!>FC zVn0#WOpCm66k_q=1Y$FYGKn3*^ph!MM~$i9<!9J11o!t6P&SqdxPW{-XF<LC3(gXU zF`v#P5xa5{Hy`kNeV-W~M$`gNS_L#yK%ArL<Y$O$S8~tVdUSIWi!b5Bv~yKbt%^kq zUELi4bZtHPaMxhbI&x!ITG~{EHHtbNvr;=Mu%L|ozVk8swG{h<sT_Y<0qsABWPjMd zza0|@jfJ`MZg^?oBG%z_^s#`7Cqs(X)$Z)d>fI?O|N2nD7uS=|J{-cw)swK=fx&+) zz%d4eBS}te5{$B>)=@qb;SYS}<&V_jVM|Q6PXY<+C$FGY3I&OVL6iTu6HaGl%)YmH z@bR?8Mi;<f*?U7z_x+anpWzcQmZw|M-h*$YS!9y_3xtOsgbzb5x(HvR{79GlM?vNS zGJSU+e#8Re+1-<0_c?iQcW=?p<cr;DiTf^N8RyX6N7-KhjK9hPOhpbppMBO$*Ig!& zd*sOz(LWgf0wh&A_n@mDbm7wvIzneZO-lDDcsC{M_GBjcd7a=@oWJq{6iI`@>(&Ii zy3nih2|oAdH~d1v_9lp?kc_>tk=65=q)^8A3Q%D%q-bvvulj{7+Iul{p^KFZCY0c+ zE!{Up6c_J&aI`rx4>eA-LY*<}zNaBy?e8D8AEy`ugJ=^9{ojCpGLM8ENR+$-iX(;n z#lnbrM0+5Q?U@I9Vn=-EK(gpJa^iqSR6@K5dS~PctH}9dDLk70I_!T3{rBrkT@T(t zW9bWX$>@Xe{EWF|=E3Z=Ee<s8f82asz&jo{XR+@op*uWo#<Y@e4-VqH|4PIkjpSQ? zAmctt=hs~!6(8xdk75t-e+A3inDJsHcy3O@bRgcnx<nWn#IR4nB*I4&)KbPG!Gag| zzjNLG=;_`Wzp@wt3q@_J!6G@fk^QpS#izmdae8Kd-bM}$?X<ngQd^I|qb=X|7su@d zGy={5+5q<f;Ws%h6`%vy0CNFL0j~pU0H**y0&W62-{QDvKpH>;umT<j{C=0?E&!SU zdjW?)YXY<Yt^(R`aphbH?nDUyIUpNQ2yg;c1GWGT1I_^M1G?VE-y;L$fFXd<fQbMH zzzJ9mSP%FR@G0O7;1Zw>a0k#C!IKz38bApcb_bWpaX?A|6@b?PHGmU<7C;-IGkTs3 zNCT(=LjYp|(*aJvO2B3S$8L1^w+16y0@wif?|(%K3)egx5vhC&_r)Our6QiObnsMs zij%R@vZ_~9^n*XX=24gj*nbUhvG%<qy5RDa=PaBUG!0UAvTFGb)sU`58jjS0v>T(J z#X}aOBkj)UNMn#TeQV)jkyap$LmG}W9_fZN2)QA(BE{eMaN$T1{O5B2ZQ+uUww$(b zy*PNuwy!PR0uW9iU5K;>=^~_SkTM-CN9sj7tG4>&k*-7XO6JX*`b5c)ISa<kc(P=U z%UJ-$b5l7dU=}3jaC7nB96&NRgPV=tQ7CZ%n}+jT3Fx&W&t{6oLn1dG)T!L#s8<3$ zCu${wYc6t39;1~BvZeu_hyRixXPzMS3E=&>KBzelsSY^-CvSZBC{dp4MnvZ>_cWmN zKU<#6O-JpyoQa!Q`}4U0e9!PUeAtMNjLpx^PdfeA_9fR1Ih8YS7yYuXs9r<{HV+M{ z&_k8@q;REYRj+8WvN?wDpFrMgPV5wxWFemBo}JeA^QAwe`_kVA`qC+bgp_|<N*ovB zItR)zO>*+mg~U!&<16{bOg=@)APc;#g~{X-hLDhpiD6$4wQzW@WlHCM7-}J57h}WL z<UFKp$gz+W&9QuTE)x*cIs!!_d@?H^B?m7ib}GpCWzXAKi+sQ|A=&W+D-2uVvM|ZQ zzX>jgx)956StM|TxETlk-Ah6ezFp{VORXSm(6ECRq0WP@HTt{wAm0Tm;K9G)9|>~% zZ~xfB<+6y0|6dtfMNP2czaCIvaG+Vqsvmm%|L(me{=f8I<mQh};l-k1bLKud)#+cv z1?2co_Xc@5>r|(GY{`>z=9dhcF{>oMtYlgNmm|t|&c**Du)cKy$Iv-XK0af1N$tr? zS%I}pe1ho1U(*8nIagY^{|g}X{N&o5*FFsxy!|2~^MK8O4S@B4wSYB%<$$GtX8~n^ z>41p<D_|5L7m!Wfz0t2A2ABj80JwJn@du<gkX{8`0-Oaj0geOq19k&y0Gk0T0ZRe+ zy8=$+x5H<VcV4iNL4QT{Gyu;4Bml&K8_kdjXaY2nsecU`ItAD$fCZogYy+eL5&#lF zJ3i~AfYX3Fz<xjtU_HPInDRZA$M2Jf=4M$B=?^IX*}`cq!w=&dxt1=2Ipt3lD6F1T z+_H3K<?^54gYmB-1mqzkynSWhbUY1=t9_?^a(;O@1`#OjB8`wHN&89%NG(#Uv{*V_ zI#aqpS}t8GeNFnV^pNzfG)gv3=9IlC`#^R;_LXd{yj>own4nm%*seIB_(^e1;Z>-V zBb7Gg9Objh)yfZ)CzQ=fP8F!?r!uIDRP$ApDx&&8bwG7Sby;;^6`<~_j#KwnXO^oi z>e1?a^;Y#k^%Zq~R&iE&)*D%MS+Saan$ena%`(jj&1TIJ%{Q75ZKl?wb!+!(?`gwy zDLRF2oNj^cO<jh5q~5Myp#Mw1)v&{`*YL67qG6nIqH&IKk+Iyk%DB$B!Fb4c!uY-M zvQcD`m?BN7rgW3u<TNcYZ8nv^Z~DY^%5=;0rg^h@ubE5cI0I%?D%D8yq=nLjvK09k zd4arCUa9Dj^>fzWSuX7rZIy0}VUb~p@xHOMsfVdIguP;7l{k(CY7Nre((j~H+9v&5 z8Z3*F#mffDR5A;6w_Nrw*=1Rge6qYmK2!db{Au};a(ShExtz#fmv5ABlh?@i%j@K) z<o}kRlefr!mH#QfFAq_KDUuYaigblkF-(!C7^iql;Z(R4D+T4(Dvm0?V1MkP=%!3k zDwQVXbmgncEtr6Bl)S2&>I*fOm6T=5+L(1b>zAzSS^YFwnyH$((CiDEe`&U9%6Dk~ z)`V%VX>V&g>#}u^=n8ekx^=pJx^ueUbiMQ@{c8OzgVV6wu+?zi(8nk<8cZ`xWu`@@ z<)%%hU8a4ebEY3mcT8Q)k>;Lesd=1vjhUJ`3(jKDgcw#eLi(t5ob++&6Vh4Ir=-tG zYovRnhoynC5Lvctgv?ql8!wwEn<|?lYm$8@yC{1?J|7xcEnh3&CqFFzRNf^2UjC!J zReoK5N8VW>QA8@@6nzyL3aw(OVuT`J;ZnS$s8O^jZYq+MeU%x?A<D;;70Nrxo-n-0 zswY%6s=caHs;^YvsV=I1QC(I2sk*Hasm1D0b(A_$ovtn)q*khRYLhx!JwpAcdYpQi zdZD^p{i1rgdX4%u^+xp;^$ztRb-lVl-K1_)-&A+al4WVKELk;Kd$JB^eV)~p^;g!N ztZteTjYH$ntkmq$e59$@*tJh;%e8N4cWL)&`|Aw4(YjZ3JD}CCq1P7O@46KIAbqyp zrC(jHzo3^GREEKZ7Y)^hR}C4)8O91@n@MEWn{&-$%~Q-X%=64!%^#W%ng4CRX#T?t zL$h)?+u)oH6MbI#3Y3{5n<+adyDgh7UnH-Vza>8?KMLLb0G&m{cKRqX6$-@=#SF!2 z#U{l*MJMH0<?G7dmAjym@&ff!>gUxTsp~brYF^N`=q~I2LWkZqd}AoYtj3s6na`WK zDIEKs8!MeGRmx1V3$g_HXiQX-LaUstEK#mdo>rb$wkfkzgH^LsOI2^HPN;6G#A>y= zNIhA-0UG>VeOVowH8|@rO^$Yw_BriKhA)i4=27Ma<>uGfh%g*>jC9L>SNwswyra0U z;FX<~A<BB?CFM0`glej)QN1y%7T@tt*38!I(j3y9(0r@;QFC9T)7rJ~Xg|<4Xxp?Y zx{<m*`T_c-hE;~w4b6tDhE!vw;0cx*Ux6Q(Vt&fJ&|Gd_YJSOl(|pg&Rg~i}i-u$1 z_m)f7O1DeDlzuO)-F9h!f89=z&5*fd+hu!X^|Cv%PVzAMDEWB#RH%BLe6##B`B`~0 z40Inx|F&w6>NL#kwyKNz5%o;9Mf<pRo%X2qN9|Q@CtVs=SG(?6{SW%z^d*LJ!!pA` zLrA&NU>s+hV4P;O8)q5IjISAw7@LikjMRA5*k-(8Y&YIBa;A|ct7)96&@|C>)bzQD zny#5{nA%PEOg+uRF`f#HXPJ4qc_kL#TJw7IFXpS}HuDX0JJak+obHj>rL(1xvMkvU zS+1;5Hc2*HHedFv>;+jl^GWMvTj19Z%8tkyWnashWifI&#@M9zR?(~w!K?qItW^zE zTj8Kj>wdu)Li7pxA^LIpQn=lh^y~FI^>zBM^_TQF^<qPuVGKGKVb-7{yUpj!+y-1X z(1AbXR}_m>AFHmYLe#%y73n7Frs_&`Gs<;O>YmcMbk}tEbWwVRevbZCeY&9v>tGaI zO^NBSiL1eN0s=Cmon>caLGqsRV)<fuoMIHb;2#Q`a-otacPq~*lT>z9gt{k|>Y%Lg z8oOqp=1t9anyZ=sZL0QZ?OWRY+S6K*E>ow~J%RR?=nm_ebP|2J3_h+@zgE8i3-N2i zw}u}KU5o>aQ;jo?ca13~o2k^4X3jM0%tOsK^DE|!<{B(S^rjA%AtX=0mwhAUW&LD- z$?l?aCb>;+Q4CicQ+%rEgJrfsIYU*E^(MT;-&qM7rB<&k)6LgCqkB%bT6abV0kKU! zL9=9K*j^W4PL^StxFn;pS7+yo(n{Ary<ltMgg<`Wj^?>Gob*7-mD;7tq}!y8FjuiG zRhEXCwab>G{x(eGX<3`>9wt&L*T@Uy3*b-p!%*Ah0g7aWMKMKDq1b>wU29QXRfv@w Mx34{4bSdcn0N`!UFaQ7m delta 18962 zcmd_SdsviJ7dL#*4akVYK?US2ASepzoM)Ib3ZBtX3?vmTPt&A@I%8TQ=!i)ZKIpd6 zvhvttX5~5a6pD$am6n<%8W~S*0u#-Yl*jpg`<?-Ndf)5%{`vm<yjru@UVH7+T5GSp z_u4m=RR@$E45%`QT2~vhr}PM&p<3{ix*gur{-stTKcMLgb-0jv)H)$Os~(Kh5OPN? zMXFjbRsD=W|Juj@(3iFe>9}X6*;t#alcH9)h?5ULd+|xm_phGgMO{Qe9CrwWcf7=y z0VGB=fWQAK86oN&GW!pXGq3`Sh}&viozz{jQN&3)b6nL0j*DBJ6dQT|cNBW7dVkMz z+<4LIq?m3495WU=K>Oh!e!>8U$OBPANJctPFmV}xW286VNAw#R&G+g10QtZDv!LAp z^JdI`ej3NE3PAyS7MdBg_Z|L8(Hj2%L&k3w&J!{0yoGbOY;vS#apl#{=<<6GwM~nX zd{0C+ine$Q95J?oT{zCp*}^yxXB$Pb{9{Q-z(DbmY>uleZ+l3Z{iAtNXEHS)mhTlt zN&*J@71*YeT>%k&Y?!d(jJ`H>?Qz>|v^<u|JRXmoFYq|q=&~q|W2w6bX%2`j&C3V1 zl@ap11&94H?mWB5;}p|5;fz=8ZlMY^;5wP8uXV(`TkHXal_eMd*yb(~r`P)Bw61rz zR6Nn#xD=B<hB^D^(+?qlik!Rd92&rJEe{JTa$5DZC4Lnpt?j-2r-v|%(tOs_UFt9N zbQe8+@yO$Gw<xOh_d{Q+wynhQE1b<%`XnRgG^gj>wFjWfLR4Ds9R%x}ehOJ(IIfc! z>CT39XMOq~i+II7JA{gvz3A-=lPaKZbq6DB{#0!B{VTT7JJEra{`bFZdMTCJCQs4C zrjVKLuuo5;V5YvSBXF3$cK+{8eib<_tgTm~Q9q}paLSpQC)~}hW=Xz%07Rs?SK(}> z$z*k4TE;oltjxJ<t;}h&(m#a0@45vMzX-E(oebAMat^P|X|;Y<_Z#_7VBbgu#^gFF z(Vuc&Z1Pjw8^Ze897IyYy@RcSv6SC)N<0x3QXtkOL&%%_xhzg`vSNA473ADGVklAJ zDJZDOPKonGXvr>dWP%hG%c~s{*W#40VrPJ&+S0@;s%;Izbf=L9aneNpCN_0_8%!g+ zD8Dj0MOk2_Bv%+>g(D@+tumky`#sNPD+;I6yHG)`e@co_%^%eY3+;b;3vKi}Z=ppf z{FO}Y9BH2<;9oox7)3TmcMDgWoiYjLj8!lSO{3K))^vuopr8QCDeyQj22Vsd%7BL3 z778t_L<??kK}4WNZ7Ige$@4^{c)2j+@gy`TCL;}b+eV>Ame41d!#oij$mgJ{u@I8m zIO>9hJAAT<JQ0-2Zwe!egVJNpqe>plfLl?VDX+$w$+NY9G?wls$Acnba-g%qLWr_m zhIuw+GZWkj(h?LA8|f={{yNiT=G^xk(V?GovWsC-p(L?OkJ5%vUv;sfdS*o@*D1b$ z7D0A-FuvJnC)Q)<CM^B6I6jMmy<2I>17?X~GhO;*XKz$-nUeED*H+s$3VpWx<?(pi zIVnL?t{UD|3$=Q=uw1CX^#!;v3dn0_0tVij4QF!uIHH95O(BdpjNIyy%BT2|NJ(1K zF+cA}os!z}dW^6hrqYyBS<E+eWxU^qkSUUgQdZ)15yxz?SzNvyy3BxxXS$3mpX?A< z?5E&q3a%|@sU1WY<=wBbjN;793dx|#@)qFMQvuZ>S&-uljMm+%s4n+F0I)CZO(xxa ztuUL)mzgtC+$(f{t#6$l@k-Y+(jtj5SOty}#!;A8S$-Fb*LtcGmwrFLz%I5m{+HR8 z5@TZPTya(Z2XiEgg*wAq$&la_#hKuCr9g-?GZUP@3Nq|v+YT;B+e$@3<92?ES5WJM z$=kuvrB~XEzq-%*_LkY&8w>Kg52?@8*Enfqc{7`0e#P4?rO)E0qPzjWit5a_S!$E~ zj-HspU1*9?8KX_xPMrg)Td3hW8C5xqhez_OEuE?V)_E`e_=Q$?i(BZSy`-a{qP!06 zxCPPwABFDIZb7q^R<sx8S39%(Dw;cSq1ErSUnQ$oQ0b6~^XdEk@G9*Zu{kg%E4_Bl z;~DMQ&(!Y`6ztj0M(o)y%w{`y^79Mm<<87|NDJt1Q0h6F0?nM$2Qzo&<o~E?oDMH< zm)NRqSvZ;!Pf9~04K;BbSIb4hkI|Vx71d2IDXQI0iQxXR-CT!eGEI;a@Neh}c;AW~ zvA)(BOe^BZh0yM4j*bTGk1BIotTxsgeIr3|sZE=#9%op79$gVhBw<l~Pk_0G?QE?P zuz(u32aX}%?G(eocBeg1XpbzakkZ4kBsVOD|23A(4^#Fy0-xcDm<of!Vv4ZsMAnME znj8!3t6z}F+Odo2+MeD9^Qo>Eq}P&9AHr4VQwK(zPwNHztngcnDv)s^N$zU4l=Ve5 zG#i9Lc_Ql6SzICgG>zkI4)kK1gde&IQf3awIX=I@R*(&f3JeFwqwLrOvZ-r7KC&<Q zvTIDpE+*FfF_?sy+Ix~eyQ)OfNqV=8h-*n4ml^1|sed@n<k5I8yRXnMlF00CTJb_O zvb3aOIoa7QTD(%o)W1r;?566uEFJ@6k~+$^#*#43Ax5*a8MTJiMw6uOc|ngbc*Zg` zjV$TjJH{qR$QhMSci<@DZo!TP3byVja;$qo!cB0yTi}!+?5NC~Hb-~*4EWgO>C`Cl zpnD>ZQN@M#PX15<Eq8Jc@z)0|?3~3pBHcs8?%?7e9GHd`qd!RonI1kQ_#<qsKGPZ7 zgM1P`ubUYaLqBO@W)S*Gpp_0(5Pd{p@JX1Lk8xHc*%VRHc~~DvDS@s@C5p(D+}X@m zw3d6Ez3q~s%z%^_^ckE--(i!5+~02sq}O3DFpTen#9ce3s3|;;mM{id16nO7fdwn7 zCr+**A4k@T;>im=dc~Y!<}Mf%J4iKVu#L}&^>s!;_VpO29D>ohPh)*o<hJV1I(yJj z;Hu^Nv9*u|)Xg=11UxfWJin{pv}t-c(M6@ojjZBHeT^fomOFQj<C?l!=~k$z5Twqv z+_h`4{c5p0x7B?GD?FH9>`qokMfcn+XKNw~0`Hn9jAMX6khoi#By>Y}awaMvas`(8 z8SlDqHHTsCROYtOuH?T_<M?ltWNfrF_(r!5VU~9ztD<|0PLuu7#rzEe=@av$c&Y(1 zvC?UzA|_V!9N89Q33>x0sGTn8N^ZrZ1)gVSTGf>##HOU`d_~b+I~qNL4OLN1Ujqdz zHHuCMBC}#u{MsP$PV5v>F?kf55}XAdpF$f#$-ua{;Hbxx!=YqC+()r&!}IZQKwYOj zjWeJCIH}_#DLzuPh-l*%@cTo^uK4kyog^SZDSCnE62?ed*!U7c*eEh{#PB>Yfe3ms zxTCpQK+#-N8ngXQf#lPKUbzwA7aYsTK(<0U9E*F1WV_25Mp+&2XjsizuKHV8Q=*C? z5@$$0y#(EfawM)9VO(K8oopZjlcZTIHG&|mdJN<Y#OG?o{&>7H%TqMD20k>b1*05@ zR{v*|mtcmzNiQTDlj234$g!k;L80IiMwdhGBxQ^*uyEX#s8G1cxn{Ni7j>pTnHb_6 z=~lI3Q^uVn%T*%g9C|jz9Vjbww>G6&>AMWo*Di>0cbe%kEHpa1;yjPT!IwrdFS(ag z$_5pMG1vp?m+WA~j>T>8=OtqOBj&?TIm7A4>{=tNiahdpvQ)#P&B|htozJJ=-eHFy zNkyQ-0~g<e7J}8gwCHGdz=!W|Xo}6tx6;r1l8j!dQSX6TgTrG%0euDsICLwAt_y3( zxnBJv?`A^X?pFFQOgEnnR&nq}pP5N$N{_h9x6u*yn?l!23CEVRAX6(F{zWn<MJuW& zFQsG+>KE&EaP#5BZ63BRpZlFTV{lKf6*F!PdacY^hy7sjc?(V|nX1$9;m$7kxYyu9 zw~#za$t}GIpA6xO?Qt}ZIdhtW)h!Uh=pcd62oD8G(chGUKE(D|TGRqgPlN-_ZaIlI z>65tlZ3$;udu_0fOH8$li$Y~gjbeLe+J_PBBKqhiQvE3hykKd#z~qJ)?&X{mIXZ=Y zl6$(z&mL4b)|zi)*J0Ywgab-WeWtU)*~v=ZyoEE%ar`t5z?I7_c6YAGt+S4$9;SU5 z_YCUJsRO&Kq=94nWsH9#erj{ta1Fpct)9kphGwV<V{RnFdmqn!mCd6VqsQ0+K`mql z$CV)3E?!{U>vEtjBV*toC=vY`b1bxV_UEcS5m6|Egv-e2J_GpQ2a_dzdhnM9lQ;S} zVrOHOlo`dWnj?T3(XdCgwKrsR<tFK!)=#vQj7{qvQ-ubgVqsO{y5wpNFNt(jhgs=( zvLr29R7KXM4G57#mC&rKIuU)XB^T3@Lj3-RwAZiiSy5mehD)_0$fbViLmGhR)4FEC zYN60Bu2Vrv&vKkU*OY~mXp2yM^zX%CteEGTlI?*u|8H3BcbdtN^r3vwaPqHoRYX+_ zS|5dzxZ<k9Q&G|YNjwN7-=|wd_ee~Jg+FT|lQMesTZajiS8rl7b$0hCchT96P`8k; zh}+c^>-OK}oktaUKSL*Zfrm2OLTf?fR)#9(FC*sa+Qv?EoPAWKaL}}4dAy%NO#OFf zGG*DZC3-bfS$+k45DL=~W*LT^QsR2(aRla3f7XyYw{42z%ZYT?&m?U?ukbRk6?uj2 zQ4Q;s1?^5JQwGe72!TzYey4-2epws@Ev}&t#pIU(3O}1wLwXHd(=F`=tY@?Y`@P@S z3W=6R-5|9C=XTrnlb4(?kahI6pU4R5@@}F3AbaXby)=g}&Ln+g6GCsm7(Ru^=iGzE z^lmm;B^$se)sthg>B(jkv8#dNjN%(aHh#e=5Vu1+H_jHRV>iEJt4Owdm}niTl&2(Y z#dPwIasF9cf!kE>addYr@fSIQ3q0QzQ1#E`Yk5wWwO|oE$gURBQ_(l=;g8TpA<mVR z%0F4f%mjzOyS8bLJ&K-TZr$Cgf9MRQ(@C)+rH@ya&K?DH24-JK2jcSTb$S((8=AUQ zn3@6$=!hHSup&F+7tFZOqxMAjEE-03zJ-J<`*q71#9`M$)f(H6dbXf85UWxqI!Tr( zl|j=nz8ZU?t)+!jE9Xfz;c)KTC9;W0CBreXSQW$fsVA?hvcnEi3y1aY4n6|JFZdv- zR~-b$dUZnn2G))zVkeRl{+!#B?@^@$F#YZV#odB~U93lSy1SrpfzrXSk7Z94)oe-x zKejSQq}8K3@8u-Hn&<%s!QcuOT5DW@X<%kyx3Lzk<051W$4c>@z#-GxA(PU`c#T?g zgt#>`L>6*elQ?3k0(x%2G5MZDhfY~99@V*yk$BGI)Dwkp5N(BnDuiD>$@&}Z@2Tk2 z)H$E-y#}h{9%2vGWTG}YcrMGc@nRTSvQ!&aI#Zww$v``BsS~_8r8hUin=^QG25-*d z%?<G8vc0)PZ*G`3*Ug)oWK4skS+gQWum{tx5CkZ6&1mD?OKh;X7>q08GzN^t6S~x6 z#pE2YiHmhamYL5lt|JwheG303BX*SSOqKs-7t1L%`EFG<>e{RjVu9zfEt{oVPBPhC zwrPcopt8$0sgMz*cG+cQ9GqLVk95{0^=1m*!q_%3Mb!qd!dFc&6hX*ayqereth!#o z53XPfP%#F#Y6B_L8Tv5uv*lW#LxJr1&U1cCyB%cP!r`3Y=BlLRXI(lBHdLPsbCc_3 z{2>#Wp-<MTrC2iTARbvcI80wN&xAW&8%^|49RrVhcx$EdAWWVO@*?(1oJriq4dgR@ zZ^^3zS!LYE*owHVPn1k$R@Lbb>{yoXNW3A9@A(}WYDfqyyM_By*)1~L&@*r)!&fpq zf$TDL5p5tx3_AnMP?I^T=Z%NL>wkd8f-J7q$a%jjs<E~Bf8IchCPVi)oTv(2t)kH7 zf$%+cKib7iUNt3!$JL>7I2`+DS0I@Jx&a49a@^FjGy{V^{17t01bf<=uOb+gPp6}l z-INf(Q&d~oKiUWTYd+0O#(s_V4Ga3^(;>)I`sb`=qCTDemEG^cU$zD`kp6w1l~%zs zBi4vkkJ{=$JjOc5>DN!7qiN?h&wl?$O0-%)Z+Xw*Ag;EBB_iHjg&GBPHV&T{MpIE9 z``5S#inZVML>LG$>s70Cn5nh0T#OZEg?P_lB5WrpVq3wT3+x_&>2DHtnpTk`0-3c& z;%AYI&J(pIT{Mq8jo;a1m1PNEA4Nj4=7}znrCFo-_$YEAE1hpPkw;kx{JTbyFsK** zqLB<9w6)8<uY`6bwvD&Rok3~**>6bP;Earp#qr3yIB>uI21{rYyFOM7wYgOS;%vY$ zj}XV;{&96Eu88-?P0#h{ku#w-fT3)&z$Kg<9h@w>Lh1)^&Zx(n5EERUkD|&PYQ;jl z=p$)BvWXada2yV#mI(4ub`JknIQcXCVV9d{g<*!<s&A8rIU4>!3Q-QxCHjWh)jh;! z^bXWYmwt_bRutLt+{Pnh)sQ!$d`#Vi!FRBo{)!lfDg)PG{)KcBSvb@T|95ccoBoWQ zd^U6u`FhykA-B&#MF)<slWG%m2g6^n4;kDGdt^pAyIbk`#|%#i44wnK827=)^bgOG z3B!99yb6kY1#XwzXJz<FueH)e0_nUj_pDH*8aX-@D1w&m6&H~+F2qIM5u3S=_00O5 zg)Pr>fE7{AIr7!;UTNLHUzj<i&C$(0)E|TEg2BQ4J%rQ>UA1AJT(Pq@pEjQ*(IY1C z^|#6V5&ij`vt;v#8T^8?r1MB6|NA$@JTjG^z>}FHEsFj>cWh|qv4^s|UKP4?mxLj> zBhf!}MC<>+9j?=3w)e^RBggYycrtL*0LcQ>@$HAv<e5>jahV;2#+TW!`y%gg#^E~Z zafa(3F6igh4$jNB{fV0eJqq*3t=iT4$T^g9taoj)vyk4yPF<mGvbv)R7$a#O6+7S@ zXsk4qo?xX>O>sE1$0BmcDu>YzL9FGlra5W_O3ozq?Ejayb>G>Qg{lg$#Ra-cB=GSO zbSn6~cPW2Xqq3JgIFXT(3oFF#+f7Hv<lMfZ2J&)l@6xfAqf*4_r(HjV`W=rw)8%+! zqizVtbx;G_sVSfe2j#I$cKNh3u2>a0IFd;Uw=ct;?4^1(N}S}deM%CUs0U&ZY_~Xq z=~SVJO`z%LJjK=RB1dFJk^lCv(B&_4WZ>vNX&bRM;9RP2aY*P|0k`N|oWVFwQu7#? zEBsGjK5ZIJmW&?2|C&oa8a<rfpF{o{oy;%HCGlg@NJ~PDI{e2Db$xOj4Ppq);vZ`} z4%&{QH&N!(Y86mo={Y5-0efO0+^5Hp?8$9d7>Jw7%BZHUl{xj+JX(uFQr)wEsO)jg zi`bD#Th9UG?gH_)*63y$YC!(~ViN8hFp1TVFe<M}45x>|$do>r?gj>v7%!NFdk0yZ z7@t?}EA2yHVx>_{2-q)*XL0fnuTe|~*=H1ijQ6x>N)e(=9RUWMxgw}pU=o6;H|og5 zF<Sojk>s^8znPNiP$d!j9a5jtAgvNS%0WN+KNgGis<PB9ZeCA%|0?UAUPIz#q-9sh zQET7Mhr9(FuaaBVr%J8foLXQLI{f|z+!NLA6BlSjZkYb~JUo=KxDlP*11h;vSTVT; zF{lLUFVsM2fTI!t6$53ed(NM2|90FrSgc{S1+_vevP06~Pb$WTWVl29zPgv?Kg?a@ zc=46t<m<5&B1#I!?e9VbKkN)*?_$w;X1>uxp3HlNfBy>kC2!QwP+V1+N6KkPpWJeR z;=~|9%&P^@sgIur@IVQJQ?`dGN5dcsUAfJ;zP9LdB<wtvLl%uc{69UJpBT@7WFW6S zF+7?DH|+sMh-vhoTP}Bq?SsqY{u8OAxjQg~{UEz1i(L;NIb-QOWSA}rI(XUi4?qm% zR~L%)@q!{^=tv)%w1bU)dx@ByOChWBL!$hBrHz+ZhfD!YpXAY7`K0RKQRG~HaPU5% zfGtmY<P!PU#OQu;s8WMb<WWrl)>y|a7;}%Hb359h;g?9?f|SV?P`oas;UY$Vs%e-I z{f*3YO6GONb1IZ8rvCBXgF!9i>Qx+D^(;!oBE^TnJ3oZpL;ZG7w8tJf0XJ~6t01xS zLRcJb>x~1*_XSgfS=_{Ku_qSg)0<&r$b_DvGBR_*bAmWZp8aeqm{~E+4P*NmPDEo} z0cGU&gdts+z!q<A>g0|uBZCV2iCV~<LKQA;TMF~K8_=q|)#dRx2H^m0!oJ-?IzQPT zrJ5&`16fT^L^GNCWX#}`<IpZV0!%gphk}k((v*wPRv4R{1Xtg??d`EJJ<~jUO=ZV3 zD}8i<9DTCi=tQRQ794vz6#gMHOyO&gb~M|#_MZx0;bVKYgN=q<fWmw8qsB3{#@rL! ziG;<zdeE+qxuP#H{e~?9#jD@7Y$`P*#3mK@eF$T=(vxI}?I~$Bt`pe3$D{QAmBQF+ zHg*>A3b$F<pl6@QMS%qyDoW~b5Rbk@zOxPBOV5zNXQuHh&yd;ABm~{WDWwL5u5ZcO zXVM4uL)Pb8zr=~;AbeqI<1d&Q+cV&jVyO5r#|K}L-=FCb-vCBmotJ?&{tN8bPFa9? z{sEF~pBD0B9<w0?sGV_i!dHaYhXsc`uC)70LhT8WKY!WL%nG0|&?(pz$?k|J$lPZK ziIimDvpT+S9%*^DH}AKPM9nVazx$26IXjM@wwD~7ox&UUlIyc0C1Y{eILEHA5pCz! z%+aN`G7Z;GnOIbVPyl_UyVt;f>2S0K`BXECOqtVHB^kv$76OZYKnf8Kb;RY<`y+wF zP{yMfOn|v1)?h1S&5rFO$<aB9PsxP?4g$bXfDm&Gv)PyijG%Xz&4VwzX7eM`$7Zwq zpJwx|kL_Rw8=d$CGn=IKY$(F*G4Nit6tN0P9=!w`!dDm#O`&w=7sS0gPI4Lbyk>KI zB>C{UINrLOoPBOgQXmwE8x8LFwhgm!WfOvQifZ4ELu<a-tAE}q3hRMmhFiq~lTdfS zv7Y3u;$D2lSW;b_${(#KzZUnz(LQjlRHP@`xrw4MQaCr3-}Vt%FgGQp^drnLwS`43 z;R2f)+OIg|6xlbokiRyDgwFe!ZyinU&P(DijV3+jTch>ZlWpuZ_*fcF>iKyq<1_HG zjOTq)Hh&%8cAmsIdW-x?mctUg?gKVgXB)l|5R{tJ*3?<R$(xQreZwz6EeJ~EqnC&< z;EEP+Fo2~JA<v$VJP~3N;MDh9%zTw^?gff_%=eA>5HoKr#QCdFW`GofSkMzj6h-V# z1%LD+dDj^!Vwo6n-q|BQ7A68_#+{LgP350eVo1V*82*J(M7JQ;SFLzKQ1_M0D)yd$ zCaxj~gmpKqmQ*fC=imH<99obNx)r7+1p8R%Px(IibwNVVJaDrWDn3O*Ul`0AMw9U` z42qn2oNeo_5Dy&=%<I>0R+F7CoQqsu-Ci^cn761)HCeTAf$}q^nX`zE;4M3e{`Q$* zkQE8^OJKDe9_=`K0w^A+reftIqUtur5$&RqL}v4ds-morrU@MQ@~3eows?=Tp84QQ zi&El+g@Fgs5XL6Zg++7E`60b@j6}Ye!snTY;l-k`KFv(h*i{fMi>Bj`ksUAg;&1*$ zzJ4((Ts=%!ptwAo5Y7&2db(-v!xuY?`me`Ugss2J3CB6S3wYFYB?!GvfTXVpUA_tn z!R}X?(_lr3yPn7vkBy9NuMxU@1m@$d)rqWI++UY+#3vej-qSb*=4bRg!mTE@z|N!o zbwpj+@X+pw|9U*9avt$bCZW%hjl{D!LDWnVO13K5^w>s%hskqxKb%FH-8pwt(Nifq zmv|!LV0Msn$4MmqFb<uoN}@^Vk^ugJjzli$k6;Mg6+HHKW*Ik-5j-|~Dv$C#mE#3_ zDd=8BrXC`WB~$q=hsfn6QvP*45tkN4dc&MgFnQUuC0N_A<_#msiqhWv*bU^PQUz}* zAwQQcW67+ttbxhLJRa<EVtXV_0a|!3AQ5YpcXYLjY<Tpv;6^|3fFJ>2ZSRop$`nfP z=H9&^-}P&#>z7ddqoOI!fW~bIxZbZt0D^OdA8+JcH~d_GiRbnGW1A=CHLi@9mPRIo zmtsIEm0@KTt_tZ|WUEr}V+A-$$-Jd~cyPY8G=(L{m&T=X;H(_Z-A8!?pbl^ma1L-1 za0IZgl<>=jg)>gKhqW99NC9ZeNYS$C!5fycg8Sd2zfnuc*UO?KH=+<ZR#3GI_(ecl z8F{p<C%<7S>FG*i$#9o6m2seVE0PI-sQ??G7_bmf1|UnxJFa2j-cELcun%w&aAhfZ z;L6K*Z(qkwHXJBi;bitm2TM_L@Nu4qMA!wkX`Zfni!55cuXHo)!Zo8A?<88d9o)*O zv&5Fsh!IC@XPXUPdqbCp!ARXkA>x;aqCC%cEP%m2Rzx_CEl))K!dSr;)`;gjmTyD+ zF>i~ZBVRA%E$#U&!+@1n`#ZvHr^&AJD1_(Ely6Ua^|g+-;{W4fI&)9EB?j#w>sJin z!}pM%R%A-P-`!3x*-d)C?BYl4CLg?<85|0_?{KhZ7rFg%AvW@nD@R1Wf!*kzPu-8! zlMhyEajk7w`7D3!D>Aksg3nq{Y!xGo{*V|vP66%zrr=xd(i9+Wd@oozqX8Eu#HSQr z(v&qF0~a2-#q137@$00eVk){F`-&y<Eq1c_#|t~Zj{N(T1b*oEr1q6$WidpFp7QAZ zb?qHAcXV*S>i^Qgve!twJG=D6S}gnpVtPnOC3KIFhS42D8csJ0X%u~vrD{rqY$A0F zX$o}-X&QY=NHgesA(hf+g;YtW327XiD5P4NC!_{C5~=H?#Z$V#LM;L<n`(u07?ld? zD4HguR+=cJ<7t$TPM~2zI*E#fbSmY9bUJM%Y$`U|BBZk^71Cl_FQg87MMxLY^Fq3q zo<>^A263Q_Z6Svk`2aiE(2oUlB}2Cg=&KA}FQ8QnT_vFG>`Yw(x{jd>1#|;L=LqOV zhCVHzTNzp)pxYTbN<fd4GCWJbs~M^m(31>J7tqrTO%%{`42=LPjQKpv1quZR8QKOb z@*KFr(Axr9$IxE{w4R|?1+;;o=LM7kEv26ecrzm(5>O#B_OXDrFzQwTz01(`0@}*Z zRRY?^P?vzRa3x(RpzK^k=Lo2np-%%9#w=mE@j?Of3v~FJQa0#thMNR(6hjpP8pqHy z0YwOyqwxZo!q9F4n#RyT0nK1&+iKRBl%cl;RLRg^1XRn=s{(3Z=y?IPh^7MnT)?v# z`H+APW9Y{~g+Y&ExlKZWm7%W-=y-;@1#|*K%LH^1L!AOTm7(_4<nXGb5ko;K6h0s_ zV8>!td6kzU)(2R8lwMwnR3BjTQMwA01xmeYwm`|Jo+{G&Un!;EV?Cp}Fnz!<A8W0b z60Q#z<)iHPQlj($Rv%@HmlCHB81JL3_EHk{0TX<bQZFS%A27*Bnd7Bs^#R2`%EXCY zl0hHf@R5dlDHePn<)di4lx%&#Vjm^dOBtpQDDzRe3zP+;^s40oC7<HyJEh7dnYF#$ zD|gvW)>ZZ6W!v|jtO^nFD^8J%t1l+~S&8S8A@AY5SiS-09Gv*msRmh}$Npg(IkP5} zcbz9KYj%n@lDA0@Q8n35Qu)ps_STUVe*9tS-sx}rEaC&|$nf=eZ`}LR`s055PyLAL z?GJhFGII0nX?;E{VNMf!>S|))R;{*h)quD)*jE7#z#%|0Auqf$df4~)qQdnxK5(dY zcBM&h@!pG)yVm_((+J^G*)|rRwc{AiF7CqD=XfU<u4FjSbhgs&Y=3C_np|HLHS@pN zAKXvi;Q|j9;kMIvEFASmF*^WHuB(__FMKiZ2zSzh>p@NA=-Wtr9oD$NZQO)4Sze70 zPJl0jQ|04xcJR@t^~C*dZ$3YU?0vU?aQDXrvFpjrcN4lS07dvhXH5)=+&CiqJ?ttK zTKXyypO6QxBaV$DB8Gv&#?b$9$@w?Pw;Of*yiFu@Q@kjR^xxD|ltOYh-NJpyxH(O9 zj?COV0l%MZeu|GtCCOXT`I{+Z%$94Q_22qEegn6C4c_mzNqewI92VB_+n@Fyk4JyK z^lX#x<$1z;7Vu4ZF9GkZOWx}fHyU2RD*~Div_jfs_Uz9_75d~Fa{RrN(hz|mj=8Hy zpcpc{>Pg(?S}>)TOYzHgV|+JIM4KH<^Y$<sWwK{(r{gmG1C}J4?q^6JUxwK}@IOJb zy*{>e?QH%W)vp$UD2uuwh>{v?YA@mFkku@1lKdVh-V5;YZn!XeFL|DT>~2|{js4*U zwUYn-J11>77LB)O_RjY=J$Iw8cNEdgDq`QBJ+S5w%<AyzVDB6EfPA`#5fI6BB<Iuj zfFExS$geb%Z^X5dzJ7>Y+upOE)63#<fc=@ju}}U7d+s69`~9B%GXBP%_&4_Of3W)> zBG12{!oQM0UVC57SAImke1BlqvIG{fc>GE?mjv$^!)GlZMLVY9=yPI+ntxbH9_&yd zBmF?lYn;UP!HtM}7Kz2t$+Zw)gvH>>BBts1AcTA~@yDbo^P%!xZoxAk<Ro~8P00F; zHrq{(&x#<wPAZDth%=|FK9q$%-1|?0_u1;sp$C>rt4Y!1UVQq2y{^dyKmWi7Xt-u- zmH*&YAO%mk{3E^ra{Z~N{Vn(sfMh?t#=j9aS@Ofv1K~!3riJnT0rxzfy$Lh+i2P68 zLCM9LAMibFWa+HwI`=f6OF8i&zF7Fc!rcXo*=gb40{k0r6VQON_Ai6-3!vaaEM$_u zb68vkPLuAyinvbru(K(WwqWn{*?em$Ikb~SDu38H!pG*4bVA&uDT3M`v&}0dmJeex z^Fd)otnVlh&ia3|r5Gyr@$~56p@)_b@?mPA_zx_KzNO@|5Bo)biijpc>TQ`tVrR5% zV<=u9hp_PEI>LXX=~0Eni-<S<Z3!$}aTQ<P*@Ngy%ZT-(ROMQf;VuznOJmJV1BWZO zLAXTL*}Kr(WoQ>+D=`Mv_!Kr$&>k)$?|(Ejeh({BTpf&FT}#Bc`u<P1!5^n2v2O1T zL7XWTQ5M#Z>y)L*<u1A6cGhD6i<XjcAFHGHeu{Pwf~tpmd8$%!lr4b@5sT$+`glfA zDx7}}Iz3!Q!gig=l|u4c*i{mNC-?;`8#A}K!!LBFJ6>wn1sk#8fgcCayHR&XF&*#{ z>A8DQKP6hk1iN@Q0Tv1Kcg<+v*e7g`XetB0JGY@iOv7Jd(J_AcOXU6CiIL54%k1j` zcTQY+^^z{I{U~~6J^68WoJdRBcFXy}JBf79wD9e?2D8sk5Ou;5VBg^dJw^V#$JFQG zqK>sN5hxpM8Jt%h-hd!b{dLC-yD^XMB9gs12@?=l@_v>%ZzsEWFDjrn5XyqQMpNk` z^8DUJe%5}ndT%s8YCqYzHwzyr{=PRYErNx+{A#Q;Za)TrSd;#d<4JrW!9J4sYlpY2 zfd2YAG41Qy*WeXHs4MrbhxT2_2G<YcDWJJiz?tc6b%d~b<|(9VpMvkUpM1V=5dRgv z0omWbbIk$|p1j)VzleE%Pf;V8vfm!Q5&<B5H0KNZ;w4W^Z<A~L6Q)_%+F>7}w0}@- z$)|&0bu5A(?|M|f;A}pXvpijhX4}7?W<gOp3xvmCkM}^X5MSxspkw}r?s1Z$1F1<d zf2-Y;75Z^*82-%XcIITUqYE2M&T+E!KrhiPa{548!c$k_r*fze3&J0bv1h<SJD<ls zhNd}JNysPi#0fB8tf$#XDstLjFg@r9XZ!f5*-55;qTu`2k=H&M(9_3j1Fzz0;|hq8 z{(zm#r44X)p+_C$#wV%#h#yGjgK=Xff#?1e_7`Wv*5eP!dY!0v;x2o0=u3-Gqm~P1 zm1hENpEo{#ceeC$-gQ0lgf3?f02pMm$4S{xfBc>}4*nQC0<PERW^c~hJN47CqL}8n z?dP~EU<i%ly(WrXZcE77LwzGoY{2O7A@(K|`o1%NeJ<&II6<<2RZM1o`VcdhC=Tc1 zG_dq=Jofr`4=0N5lj_4-(G>F6;a;L_5`QFm;D6p^f^2^ij;000tikp-;e+_7kZUx; ziwLj6x?(cpNOsx_HgxWL`+ZixJKlaHe-!M3z5SlJPA(i7$RE8%_@g8ES1ys!NBi+P z*T{=U^`-@zJRaYN!b9_!sSA-oM^DHE5qm4ns31UP>{C05@Wl%4`Wy>p3ooYRucI&Y ziu;WPOIW;Z%Pt&&Yc{c8W(WAZ`Vp=P>~G1)vB7>D&s+9B^?M&tX&bI#L4V?JM*;l+ z*?@6?>41fRRe&9UQ-C_a9YDxEj!OY(0mA?j0CvEOfHi=vfS`vQ_W(heE5MrpZ2<9o zjtd9$2514J?{lTxR3HvO72qSl3BV74JAfEm4Eq7pfYE>=fE}<9umVs8*bX=d_zLg~ z;0}QMi{m7KNI)_`3CIRm0Z#*p0m}dz{=#kf0FZNl8-O-|1fxy{7y#LTaeyMgbAU2H z6<{Zz8t@IE84!orvVYZ><v;&P2U@rf2P1+u2GO=d7Vi3NO9x9?|I-%kCouf;U;AGL z^C-v#MBM^h+k5D^u5)SG4;D@gS{hPjlIOp-a6w2{BE_+elOpZP=vORUH%3R=ozam- zA+7$-!bKx3MjC^ZLmG>u>N2h)NG(WljN&+?J&|ZHS-3=`=Pz2gBq)Td{{}HY5RM>S zgmfd)7m+SU%I@!rkz$2#llH!S_Op<^8-K_Ur9Sc8Q%}#Hk?43naoW5YPdR2J7SDTr z+Kl=06P@#)d2Uu>pVZm&`*0+wZVcH~7eSWR{knJMwKP7y_!d6VL{sDPvhxhjP2HB& zq#a$R>GfUn?mN9ZkuQH79AwpCCYcy@$?Bv?BCU_&9Wi7?eS+VU@fMO^AMbZ1)te^u z^`?FLdDA!3g_K{EL4tmY=~UO>!nO1#{eDXD`%LLA*`g<Qlz8+QgMqclHyg>0pAteh zn=Kq%7Mmlt+iW2p{}dfMJ?k;8ILkr?)<^T<*{p%U!#Ni2s~m5ePmCl_-bnE4Gsc_! zU>xg_k9kT+j?Q3(p~GicSj)n{h*@arr)YlaY=PsXgK_X=v5-{1EsVEno6y+6Q3owT zo%W%f_6@N;-}R8UoH6BoDzw{o&tVI<{STp<@Uw?m@Oh5A3%}6*?;L2Oh^(Q<|KGiZ z_5W*cK{o&AJX!wBgU&gkJjXozH+=7hziI;Z{GJxjCuJEz`Ty@gemXW~??-=p;V;|p zqlH@sSPQ5Eyb4$eSPobWZ~$flrUJ$TMv+fj`;^83ivolL!T@4G+jZO|0L_35<QtII z0WJbg1F8Z00NVju0UH5p0c8LO0DslMf_K8d*~m`^OahDti~?i>41kR5mTp`e5DB35 z8ae`8B+vXcaHI{`RDcy=07wBTfOUW<fCSK1hf_1)3g9GQ7hpSJBj8oQbbz(aLjHAU zGSNOL?veNt%Ku~G(td^i!Z&>NU9z~8pDmE!aZ>is62i)pe}?bEzp@~Z2a&M0s(|sh z*k$kC);1-tlr4$?X%}fXX-{cyX}Z)RwMwT+H%m`QFG<^^on#5JJF;=|XXPKr56e%> z@5mp?LloJHLd6Wl5`|l_NpVbZNztqbRfa3o%8^Q&vRt`VxmkHkS*yIJY*7BLe4y;C z3Q;Ah2B;LOmsM}7&Zrb>lX{A}Tzy^LtnOZ_$=2j*#%rF@lxyD7e51Li>7*U0U7|gs z{ZTtF^R>*qnKhXf-5p)4PNWy>`|01+|66}Z|GEAPeVzUneUm=W(ACh#AUBLM6d0x$ zo;NrRD-53*&KPbP?is?2J&c2ljmBF>k?AFq%e2n4#dOll9RGGqdX97hrdhgM`njw? zm8v$X_iOs1+k=@}okcfRH(BS@eXZ+ma2s}^!CJ#bL%pHdaL3?pl%Uxn<22(O<1wSu zq&68$gG|FrqfJkkCYh$0?54S<g{D%|%cg&s-av;tO}kB>n2wviFkLeJWctN)&-BnF zG6$OD%_-(|v#iuS()^@(mU+I}Wq!rH-n`Ac$9&mL&G*b0q7(q5>mrSi>ZQ*~OQb8L zJEY%BsdStCJNZ3%gkquMEyYpAX~iQ&h*GN@uFO|XR?bnDDwiuSDt}R)Qk_>_Q^l&2 z)dSTk^*r?|^=|b^^=);C=4p*Vo2{LseL-9LmG)1qC{v=##H>f?Ch6wumg-*C?a=Mk zeWAOqYtY@)arzYfbp1j775xo;w4tXV1>)8i42CSjSVI}ay~=RO@Pi@T7;Q{6rW!Mh zGNakJ(fGdcW24421X6k0WP>zTncg&Qgf#Y;4hzz_Y^pOgn0_mTK-x^5&E3t>=3eF; z$fC?#Z9ZjgfdtyjoCU`+NT8dvAI$O#=}qZ<=|GuARwi2^+aTK{dtdgk?11cutVVW5 zc0qPUc1`vl+3&IkGLOt(9wZNuhs&eoJ>@BKg?zYtjJ!ZTNj_cvjC`(qfxJZis(iKl z4SDHCdA0nM{JLDE5G%qIWr}jeD#hFQLzB-HUnqW3sFXV8MCDXiwOhGbsZ|+OV^nig zOJT`>sU+$I^$c~1`epTI^$zte^)dAwwMQMU$$@x3($r}Jv|Y6Y+R55k+BR)wrZux# zcR|-hAEi&$OZA!h!TJ~U%k;07>bL1X)}Mea#~a2OW<Zp840_`Z;~AsLG{Q95^t|a6 z(;A3wHw@?((=F4Vrbi|}b2oFQ`Dxh9%jR#Hoxq7_BT1IprN^X8A(KAxEcrOOTfSO( zNcpKMOw&ViS?keu(Ix8+>Kb*=8B&Y`j5mz8jW$!M(=@_7-h9{0O~r8ujmV|brLRis zWy9sq%boIf<qh&CIj0z^7^_&USgY8hxUA@;j8YC!KBIhIxl4HztNgyQuWGF7CG|V% z_todrv6^MtiJ5aU>oTKtX}V{13v^|=)w&Jn_^vKlKSlqHeyM&37O%hIN5g*%SSTY+ z<FQcQGhH`PQ(v>vY%-656<{RAI0Yk#k)_KNvP_vtmMz-_ZGQv(4#DhZ$}8lr%Qwq+ z$oI&P$$ypKl0T5gD0(RdDrPF4SG=Uyhm~;#gAG;2DwWD7ltobD8s!_xZOXY=1Cun* zYgTDCYD)KNK8L1KwGVW0`X}^@^&9l>>5szF0}Oi%8OGJdw~Y~|fhH~1^&wNU$=}=s zrW$3AGbfsdo1ZsVn%A1wnKzg>nzx#_n|GRbnID+BGOvT-r2bN|R3e>+g}YAru5_z( zyL6}YOPFw9nN+5fX=MhPr4+LoCL1Mt9rnCYwpF%W79~%RpTvCb$Xn%NMX(}4(OZ$B zP%8|IA&OCoe8m)n9rIhLSf;4N%D<s#Q<T9RD^<Hy4^%`wO><4dY1eC)8(ueTHykpY z6I@oLG0mtmj)L1NHZC)+HNIy&X#B#+^j(UNT{scOb^>l?lx(`}71?IxdR0Ho9L;*o zA<f&FoejZou@Q!7L%bo$kZO3>u+#9l;b%in<51%d#!Rf@Hd8ZG*G8`qM!-?k$qvX@ zC_Yhq3AfZ=`GvAgnWEaP>ZI<g9;}|OUZMV49i=&;`BF1i`(vs04c#*RoBCt=S`!-C z<u$@C(jl@6*?!q~vT8*-yq7|)QR~%aR3ECIsD4ges@|&p5z8$|(@#^P*`_(B`9Tw? zRcg)JIog-B%e5!9m$lt8HJRC&voqhxd@r*h^NQ}8?pIw`y-Yt#KUXg@q!}g|W*Y_= zOO-~8akz1k@h#)OjR%Y;jB`vUP2Zcu=Kkh~Y*y9Y|2dIJ8Z6D0PQtKq;cCvy(&ZE6 z@5!H6tWa%L-BbC&#p*O!nqut&biY))2Hm6XdE6zDOodc!ve~j?*<v`Zi*Qp7x@N3~ zHl4p-^2TcgqNJz6Rga4#5{WqshIAS`05UB&q#_YZ)1<?sHtBNdN|?}TX#-RxmZiaj xtg=b6g|fA>b+UtKwiULc#lWV^%jC?h)nnl%DlD)Xo8ptW38IET(sC^2{{bnq7zqFX diff --git a/pipenv/vendor/distlib/w64.exe b/pipenv/vendor/distlib/w64.exe index c41bd0a011fd760ce20ba795d9e535e0d2c39876..46139dbf9400b7bc0b64e6756ce17b4eb5fd7436 100644 GIT binary patch delta 26888 zcmeIbd016d`!~MNhQoX~pd4i8FewTsGAIZdDCkB(aW+R3BsEhWJecKh&;urJPhv~c z%FJxCwA8f3p+rT&8HW<JlCohFYLuopp7(R_y`krMzVGj!-}PSC`_FrNmY;jw>t6G8 zuY0Y{Q@X;Tbd^J8szzGVH^baXSi5$9=h`;snDlk+I`~&NTWi0NaaHYaGFH{T4Sd`E zi`q57we!2wo|5@1RQZD{9*}Xw%={UYCa3;#GdZqcxdT_y<;lc0UJDn>b#UcfIPMZy zpDFCJ-po(a-Og&qvmu(qm<2%^E|o$mHQf8Tz)}r&2{GD#EgG&s!*OYMI4*$w_<OXr zh2!e@H6eZ-x|^miFhQ>7BL4A!yTn}hAott7K|+X2f@@&C_%0d`HktQxmy<>EoErkd z*>VKLvd{4EvfbbE-85@ronEsK?C8XCf~fs5Si=eNH9|#Mc&rnPwGYls3_*ZZckeOB z*$hdIB-H@H4|5<`UR@F`lwUIq7OaLR5D>&uHiH|<RA|dp0auy%%5{!w4$HP`Z?kvo zV_vfl(->H|eN=AAa5?pEa_X}oq@;c!`@d56ZA(3kWGb|~Q~@RR13###k3j03{ho*S z^wHBo#l&zYp~4jIF;=kLG>Y0sJ=t}8ACI@aI4)bPL+MR{qV{4>=BVwdabn3@qj|?8 zDvqH#9obuRk}~6e&aRlAB;}#y(#6x|)d*V)5&hc3(t@Wpn;=5Qni0;&UyxdZsqi^T z(%1ln$j)y`w|@N?V&%4Uz3@0QzFNF2orhSq9Bom!=4*TOO1gMCOFWYy9+v`LeK`)a zAl6I2vLCfRW`l^d@@-jBPXxP1O|PK;4xr2J#Zyvk2PGF|fnI-9j<U4;lgqPeE*av{ zEb+1+o|Sg^ko;Lnno5q^pf2A&1QWFq8}#cAOKYE?M%J80LA;n=g*M_O^Xn_+{3&A8 zQxrjZNzZX|sm06|x%TnZc9}PYs8&?%mmsBT+2V0zih4*X$W+N`ostt}(;eBQ>-T1I zg(=!M!7kC}h`%%!Qfk)qa&uikMJ}Par03ZGNV2I@Tap9*qwrTA+iXLv6QgW7Dc=|S z>Gx{W5L}p^Y-e)GiMkHvyuX_5a>N^Q7v*70XIm$Rr)7yXS>l9P=IPieTp2H;$IxJj zjYoU_X&u{Ls8|%9C%uM=DOh9Hr?VlBA-&74aa?(|zN`#QbO;^DH7~}%{_p|Gj0&G2 zShK^&NjFe(K_ssTuh<~nWA8ae_K1+<miWb=$i}$ILmx~)JZUg$MYHL6vsO+~jwBt_ zv4@U5`CqQG7^iOTJrPxGfRLNbpuNVXJ4NJ<5j0%+36oR&1)E{zRgSBo(g>EC$fGp& zJ*e<LqXp~s@Y4I}+NC#3ovBdTE`I7HFE5g3{e$=m@h5DC(yQ!}Qy4$wDtq7*;1JP@ za;;+i&OzpVR}jr=@B>Gzlcqeh+2SuyEsHX&{bxu6hH>1!dp4V`HY2=f1`1#^w8#-g zV_?-uYiXWfdAKh}4n^dBe?!=2IHd^Hk<eU4s8tbq@&H12WuXrt)KjpIt|zI%iqsuN zssU11nBE0*Kh?W7b2|sL8Q2xp<eX~$zJa=_B-3Vyr<^PW;jySiPEq<k&Yt5k#OnA9 z85P;#kpliP?xTg&W;l;H*^ohj=OdSFu{QpMG`rPi6EmnMM}?2WYF42gKTreVjS!}e z%Ci|BsN^`yNz`l}`MOZQ(}2je87#!G8Wt%9Z=&?FiQf@?=J|$q;22t<1o3wW&4G|0 zKCl_A=uj*`>0h%tmq7Ek(z_ky`uIX7j%2JuoNR_%2+@*>ik_yG*fq=U2SL2wbdCms z)!+-k^eS5S1@WA;T3N=x4*|ct8r7qfEEeSvtS|cL_qrEiG<l|r-%1I{Y#+5M61Kw> zHX~9ZA|Z;TA$iMjUX^zvd96}7Nf3W1%t-ER^40HkCH^feQ%J%^upH$P5LWV=8gNMt zFgX=wRD@9=tx?8DZ3`1m6_g8zWsMdMvlSz_(DF=ZN!LP4N~p4^=pcm(P?_dXa9dKA zTAs>>BF94_y$WlVT>mw$R2Y-7&|(e>HTnLNPy!{iPEIHS389G85D`>?7^R$MUrN1M z50-v!o#dl%-KgaHy>^AD;x9tQB;<lcMG$9)3xarD5KZB^n2pBt$>|fP92G=ttK+aC z6=7m7@EM&xDt&bNnDnuNb!!q0<)t?_J5o8d*9~pcv(kYE<+*P#N)EaOc6k%yHvV3Q zwa|(B+d11B({e}I_-63U9n-~2RJ7@yOrz^K=LTv&E?I-x(;h?9dq0vXOzt}XOtIdC z*ojFXn_WTl1R+>{M~hjD>}c7nNEmZmTOX<6a-}dd2$p}Vq0c?JPt?t#0h3s;S}1Wr ze9B67!DdKd8?qYo5TrhP3nFPNdJ9<Sa-$w)i$_tyeN9hj?w!M05h=&5$Ot#u3|=Uk zcvjwDHIz+$D<v=<2^3bC7Uc@oMd7)H$!Ja!MkzYsk~9YM6@BR}ptB2;dzVOsX3H~S zazR`ER(9wBM5i8;o282)j~=T<k2%UchVc|4$GEM=_#KN*dyKy^Y>>l$#Gv_%{yj%p zMVfRS<5iCTE8+`g8n2C1?)Q;Qwzw!fO+1?;x5#};9|0l)4#=a?s+(|^8Uow%CFvkm zVRYys>>}3Qq<L8L(5(BZeWY+7B&{%Ph92K?oONVJY-^*!gG#z(VGTA~$KRGF@Myk# z>ZGqRATT&`FxEu54SkViVMT8t|EOSHN68|wQjDV<t9^#e@Y^@E-hDuf`06an30s!< zBuhMD&E*UK+L0yNg!|{St?A&Nko}KYlY7kQU$vBqA%!YU%0p|Ke%*0Q-{p5u?TXpq zN!e(f*leqAI3&>R&?nGj+1QLCvnvY1jnZ(;Lt2lru#lX_$`>gue9Y$R!~Ldq&~PQ5 z!r{Xnh4&BJY$g1iqr%}69!*z#*{AwM-J20u<vDN6W~oaA`$Hd*w=;xNY7j7ra%oQJ zq%pD#8jWi{gaxO2NvJfSJsL`_tc7Sd=``4tmGRZh!B7S>sljALX|cR<w_=@_9D<n1 zqo@1jaHw6BYlV-LAI^4rg!3PSvnw9G0&_0W{M>O~ZU_^t%u(QAtFjqJoM+LV5qWRb zLlr%JdLMe2YwF&GJPIA!ndr2UwD3zg-Q-xPiR2)Yr=-{9nw^qvccq|H(hn*YBMtaz zS((t3*@f-$EH%gFqF==;CLe5vT|i5-ddNf9iPUj#Vyx?qSXRSp7YKCu0BH&KSG3r8 z_@6<j^m`121@@Lj`X&LI-38GOt!m<J^;qbfgp}*Ww**VyQ^Sy*DHsutfRdhgAv&b= z(AeTnq+{&Z44({RH62n$QVU!9-V4=m8hx1tv1+wHkq3>Du_EF!$ntCYax@vLq>Dsi zHp83e$kReS8jHO3W$!7Wyd1jd92?QGTUsP!k{wI<=3j-Br^QnQQExM($`Ko+XbP-w zv|-HYb`GT^LzNKtH^}}D=h(rHJtqWHw^?-@K-w#-zJ>sWr6bn+U{z+8|7_CB9VQip zVLLAWxukQg_I!v^2PI%E0@5*>k3s_l+6*6_VS?9WC-edOttf_l=G8f@sE39lEv@y8 zJk>i%>y1>;2Ag3yIM8jLF|zw!!9lSymxi)`az=QX$smm)K{T(cG}y=z43U~HY?2|U z-{C+FXYv+H>(D&qT8l3xj+0cB0@(^X>><TISVxivIGOwfae#-oLKf3aBr#{{r8u_F zFiS^=1a&(78pL9~i#_{bncAzTMb4<-+aT5UVY|Jj1dWeDtQT`ohL?r%6D5AqvzQ`n z5d&FlCuUw6jqDI+ly*czaX>gHWnv`nvk^6cwl`5X5kjO<p&6P^QaxL!p3%wLxd<>} zu$_|1(U!<Z*St+C45UnGx=It;N$jf49i%Cck_NUj<EvSJpWMW0&L~6tF`*(8YSX<O z5xU4<f_R0B<t?4Tp{#8VKMT5PE(W1=D3#Uv^ziGRighZ#5ruh1dYJ@Ka+>UO0+@$y zU%p2G8|ItIN2apZeUtf5{n>fnsK7`xHA>n66%xd6sqFJnR<WVkO`7h{eEoK59N8(q zEZq#h_O|-lmxcHb;DuCH=)X)en>G0tcRZhoQ6(PFhN>_lOZ*lC;XO7dAc%KMWv>Pd za=eTlk+%4-?*d8!CJ@~jE1TN%yE|cu_Mn_gJlG3?M!wmDtqy#FALq@U1_txBH4MW^ z7Xv9Z!hUZyG3dDOcr0Zq=Qjh#S?X*}<Eb(JVkyCXnmcSva2U^G*}~u)evg5j3?8nD zVFAW?%>$NejN;eyV2h1ol0WupPox(}B+~3BwW0+osX&g<$wheyU93Z+poJ!eC()8M z<tTF-ljJ+27maCKiI3F%pL7mDg=CY)%nwOUK!U9{Q-~mD*4uI#7TB-wY4)H)0F?0n z1hTthUf2EAfwr!J8cg8>nQZ;ou3-nku_T4+%U98$`3$}apQcixw}6J++)Q?3Y;xZ` zMXn@48Ure$!r0ucy!xfCQ1E(KTA!8#O3#2@3mXpF*Hwy>J+%Bwhdhz@v*V0`mYYce zP6T<hN=4Gi!pKSM>2PAHNjl0l&Gu%-5D!f?>lqU09z;sQ6FZ!iPf5pzv56t^?w<W< z>#vo&JET8*J0v^85lO3^vKE>OwPy^}c^AtgjY(^H#Fu<83C_$t)Z4iZXE@qZC$e23 z9a&na&OECxx=#=T?|5JWh=1zKEGR4v110f7a^P7oO(oKxQD9YQojkE%+l{25`;xO1 zH-d<gv532u;v&N#Lis(@KvKpZ45viViLRJIrg*7|{3*5beZeWTq`uU*M6Zl}J1J#2 zI~O`!)5t=?I=Ua!|1;ABJ?kIV!yJXE+0beBLrsBw<FxeGZM0Q|15JM~X(}S96MYOw z(WlujM?40-h>m>Ohyf{Yj24q!c14$cP?0&QA6iR1+H9axt}!$I1noRH!b&dg@&J@| za1-g^P#d#F))MB%@+yoTmk`{x2;_lg<3>d5HxN_q>m$$wYT2W(o^dOXE&5Arm4c|< z&~x`N`B0o8O4DgA=WNX#1#w6uwT+83`xcuV{(8960qg_^a>gTyweOu1bRtO(YzCU_ z<F7UM6!NuxCz($~f_V`Z8PqD&()XASTS<vl>I}|4I8Hj~+qP_8kZd+75F<|!eNz>E z<PQOi5l&juF8UFP{(z}oDW#&M=v|k65W}wCOh@gc{U}m}b}7_+so+^|AbI*CIVo?P zD(pnUUqe{R8LhG~&NyisSo>(dmbx~PzF=}hE4)r}ACF?5orBG(ztU7EEgl1o0=rOP zAN<qoC$?G+hVMjV>`e&q1@pAj1OHailCUwn3L(PMn$3VIN1-Ljs^B}%f(a8w(+Er` z@NhbJ!r5pA$y#+0D7DS7dLKNZ*27)njjCjXkHY|<T`Nzqo9I4f$2#{g*Sld%Q^~UF zw4IF!JC2U<c^D`MwqkCF3SwEfoNk47BhnSDs@GDDQ<}Cbu|(}Mf(q>&SMrsGS5i4D zw0Dtqt@b3C&h}h&t+oafH4`GN!C{iJoiqa;tLiC&W`pWhXcG`oY0&<tD4ok*1|K!M zhCZ?xro&_D>kc8V#6^+wgPhG!uoou+%1hQRdsVUeF8g|Y-V1W3AjJL{loTM36_dMc z4iUsNHp6RRi$m<wWy4UG_)V7GIq7wFD6*@=D5NK?XSXB&)F(nsKdNY=qE<*`x4VX< zMI<6UVXR!&2jQqX#$dQL*9mi>BG8Efqm;n!)Ii57KUBYYe*6jOnYDM>ux{P~I}q0V ztK2t+LwB*!Sp~Z+u`b(cSQpBcb`$!(Gl=6#`$juaEwqyFIBGv#FG`UHz>gSOs)kJa zxK$T`G=!8Z#r*`UcJqDa5Y<gnz<Ngc4Sui(%Cu4gLA+{t;I+7;<*<ho=K_6Z|MNb2 z+IlA705mvOrUY9=Eycj-vKp*(VoEt#oQOm{@3VDL(cLa$HVBr(v5+f;9Hpnz?=SYz zBhjWWDUedKMda^c%~4_eGVk3U(Qj!yeuQj#m7P2aE=p`NI}p=z%vPDHm6@j`cT@>2 zN#+n2{r*2XG!I1y*2yAB7PUVmHUH8s;sFuQ<|v4)ltpOmBaymaSpV+Nc&wj9GgV)H z1)8c&t94;(x{nIp0h2*xybXsfV@lutu^LWqy-CsX?_)G}Ff#idkzu7M4yo2OH3iXd zg>3$!*I&?K9iYQzi-EPlY*>$O)AQvmpbiFT#C?^dD%@IW#T!knvwt&IYn$Oqq=G61 zq352L6LNbXdH*CQGXefAOZX$MIUEV(D50$s*JhZDXvirknElWrPE*D_WBvGlIkA}7 zu;JTpqgZqYQQnAiX}Z`d{v}?qJg{Mi%*;i16oeO{2eQLW@fXso)}jtl98PFt&xPGC z6N*BPM>aMFfhHl0Z0mNgcVi8_-#e@-Hi^HwiTxFu*yr!hX!41zHp2~2l$u~O)PqD- zJfNAlSsvN2)}4~dhEW??sU$-DX({m<o7yw6)2p9iHg-~GBaKxIh6#uaTV6+2-P6VM zf&(ls;*~7gKmW2B9(~HL^_;=4X<?ahf;kHPNb|`VBksLFF=^}tOSK0jb5vddUH>io zm9}s%Ib5BkSES$2&24Gqx643hJ&OQLup!_f++?3^&GkSvQZPi)D_|C@F*3J!7v>({ zjemV3>k~hX-`v7B#T)fM*rR`~ZgoNh4pU3FvorByhxoK$*TcYx63TCx{4EQeG$w<* zt^{vK@-Xl({GODaFwB;t9)j&`$t9d1oByD&A@V?RwWMfD9BhU^wz0VhT|7RdAiEQS zh-_>To42tK6GFQ#loz{B(snl(H--jE8JM-oxncYjIyXEL>})V~<@~|^Oz?6)H-u7` z4W4>w7VDVUt%H7>+-;p`YP|RzcAxHSSfaNc6qHe25%RPdE_}jFiP5<mZ&LxT36|9B zFf>SiV~asYI9UZx>HBMxncH>JRpn)33W_-U6UdW+VgmKQxT2nB3?_iR?R+yQEgQpH z5)ID`BcTdyH8mi0BCRVGx_#uW(2*4fa6o~B06ARLeM}r%#LACZMz4`+zd%b=^}k|} z&1u=zh@K=N>be1%Rv}}sx+|<MfM%GBBD@v9-kx3V)tNtal-YXq?X|&9j<*JIi1sA9 z!oW7Ol_s2&WqG10ag48SdW?{q@>?Y?>DC-O>G2g-+<TJytt060^JEYgwZ9x;-}esV z4<2EB(&Da*d6XO#`44Pca<8D6vmszJbpD8KO^PtS@{zK<qUl84mRq!j+Dtl|VLkX3 zUF9uyD=Cz}Rn0t;d+-}?vHr>Z`Ko(td2%1~o)1t0-IrLiXmW1)keXfYZNVyN_SZp! zl>RlSjcJM(bs|J@tAz^6vZpZUy>T7ag08h0mJrVZLwSW=v!nF>W!zj`qa*rcS(dbD zwXfCvPh!vX@zh*q!}~;=A&g-xP<LykUW#v=;)_&#GZden;ww^oZi;V#;(Pinr6QK1 zu@%NGg|V3!R;~XbKuR4l_y+t#I&iTHufrutC41b**R!@6;m}@UNytIk_$}+467Bl? zUF?^Bh`#g=o0Jl6mXmBhVYL~RzPW&@@bi6HYs<s#xl3-5-`*!J<izjj2OO~kxkVK{ z!<tEXQCpjOin@;=Avf#mhyYd)u6!`R+-R>cya}G#tk5i8OAQO?DMzl8EgBINTA>XJ zqmi%Ghp}f;dk3FaW+T~rY=*4&Xe>5j)Etui$?vgssh#=Ek?gC~Q2s#%`!Ur|_&x*4 zAm>!!u+2`e24E?LU2OoOBGpvH%M#DxZb$YboNhxpD}kNlz#p3V_-j(|c{ZT$KxeTA z(X4&TYS_uXJ)H9u-$dEx!6e+{xlo6Qt~Nu?7UtY<7azBi9qgCFuiMTZ^vmSGyw1{| z>1=*Fikc0!tP;#C>rgxC78qDSbe6ueu5)DMnuvg<Hkw=dGE%y5Yq<0gHnD?Vb~J&t zm#}eQ;Uo+8`5PFGCujrrf*@A7l_V&e>kBIq%b(Cu=^LA&_#JRz|NgDKx?@Q<Bry$1 zDA%i%NP*?1d4(1H2~CMETstNpd0Vw$X-%C!h!{K=h|}p))*~&D7e=z7Y2Icx5(oG6 zcFF_itTWO&<tUgAbFIp;C>#tk!#K#LS9us|Svez3!yep(Sqdvl+rf*>;YR6;79{)M z%+3GKROH;P#8WT$7H?vIrlrM14Wn|>Kvym7*<!UUjVmrRfFQ04Ps7}&EjacRo17l) ze--6JUzj@WL(6i~DW$l9mcGLG?1S{)aSfXwUZI_817H+&wHdw!C)Ht|ABNo!aOnaz zHhrHxVv|W~!zSk5KTea%()&mHU3-fD&2_>7*JgO`Z8?RK7)#$jo^s66e{$F<PpbVj z!MfNZ-8#epx7kNCDkiyQS4^@myt57C?CK3>KVWL$>T6VF(qxK|4bJ9bmOWJvT1o2L zY}tTZ{;CJNG$4h4b~N+Mh~eW#0y?sh8Akre2v(Buo~D-hW+w7q|H^VRdnSv&VBYtq zW4eEf%@v1Yt5*1t(p47-trU2gmK*If2nrRi;4Fba?WkYa{>(7VDfUBV7ztP+GBMIP z%cG|y5w+T0pfUb1D|f!hf(8Z!yR|cxyb0yxlI79k5-&1$7;CjH4>>krU=lyIfvp?Z zji2;0J3KI>$GJCX7U;`<rToZvNK5P%m3d5N4h<k>jZDDOODti~K7Psr_Q#+=_swq9 z5u;wfKY@>bVtzu6<|w-^Oy?_mvw>M5{PY2AW>z>q?0dF4%P^)6wX4`f)oQWX-CJG< zg4dTn1l-dO`H3cAIrbXvYk3_#EA5kyngi@Bw3hprGXtC|v`b{dtwQ?}5tlIu?!M0+ zW)%i+Z=^(>aQh(YHiOPeIgM3$a4qE+xVn*<2YYuM^d0fPGtntX3PO<d4cj`nmtiqY z;1aKWv^Gl4SY~Ur&v~&sgP-MV8rjex8~M(S?B<Z^{K8+@$n2;=$K)*mmusAKU^w-V z2xG0hJ$PG^Ni7NaH;EWU0_GV`a{f2|lNxp`JIq`orzIftP$_CIM4gk4DZE@t6W%Ws zFHQoS^oc@`qadrc^#=g<6+g?qJa8!IUD;rXA_+)~N!Yq@+}942uvPmNi6;+gF?E2w zoi;NUOETU)X{FIM?1da(O$~bmDt-<7C?}o2+sJ;;iRJ(Lo&^l;$^Y>q8#Xiu9Xn@e z0{(q(sDQzEb7(Se<5`zs)4h#aqzt9oApMGigQ^D2V_Syh!-(pT`?{|2N1P`VTi>n` z?Bm?7#!tYJZGEqxaMDKjDtwxkNDKtFx)~$bU%5%iBNU;Mcu4?7hQ6+_j6@!H#KJ*# zzBq8(?R*ikN4E2Q&hm%9tJ%-ABf{9)5n256ud?eS2C&W}7j$^)ieWRvN62rvcX(Mz zC#m0cwsWK(A9|gg9+@5;Pdl-t8oPCa$z6(2e0lJdxWb~gxIo@4<k1sYz>-Jx<X!UF z>`~z{H(N0se+(85w`zswPmB}n+>SImq8{PjV`Qg08BLwyFNkOH+Jcu##<Q<RMe?hs z?fz|44~@V4vI)-x&>v{NI_SJ=^Ns$Fy*;ise{(APc3c7f**%u}Y$$*AD$9FzQ}WAD zb#X*Yl7@q*<;c*}bPT;5Z!15cU-3Su6zvfh1@x8;pkb%a7F%(h$JVpn<GVR`LL|g( zxyojZpXK*mBqp$8$@iCDyh`a|O5^3oRd#!PCubY1Nr-yzb>=?d27m4qc4tC3ziS=y zop>;ydAZykE;hrz%4MoBdGU9qshE>j*0Mh*X8D|iwXJmVI2Uj%YhgvxH#>q{u$GOO zRLRf3%zmEqBLDFSHfeI_ZeLi)q}qtR*kChM!vlMh2AY`Vl&$F`MsvzhaZ)1&aUf%# zPCn)~v`o&>-DbG7j4hes%_qFZUY}AF`V@QHeo_UH&zEapz)K$8VDfHuYvXNM!y@yB zMt0tVlpj4!*JpeOGvu>2!-=J|8EH%WrP@nvO)7nNiEYaB4gC?Erf94UFfaN^yWvCY zt@=mI_Y%93m)?0YIPuqTf=g@<YWf-_iN8jjf<1^!>HFbrmNwPEv)gR!)NY;bU!+kl z)dQ4HZypQf@f)^bYG}eHnJJs_cFSUD2m}tuL(3dRF(@2yU;IbR{vx|QHB#4K=E&3F z+yxdkEvf$#=#gz!(+bdtpGucO9fXvN#X9+Z_gg7423y6cZ1D(G@5gsB3t&b*CZs%? z=O%r1fxSJgzvrK3G%YMVoOM<mw21y8tYw;yN8?QlFfB*3>P&_1>`msA-^2f1Vp@*i z^~~V=N+6c0;$xz&>?WI*pAtP1;c`->jx0CQ;!$myAsTv6JnITd4L5*x-&7a^Z?Y5l z867S+Q-ZZlDHj*5Xm)^oqnQO4M0qSnkq|Re$o~VAt+JVoFX+y1Ucy!ugz+iOY)3(o z@6^SJiK`@1mUAc@LDoVqEO~8u5B|Yo)^mEiQz=ws94VHt+0*-t8ajcpYPFSkNbj9% z8wzC3AAwg7Fon@w5!6`6C8%*L$w6%`UWCTu<ExRpGsf*U#A`l{Jw||?-)gwo#GXzc z8W#<&#mGta?Ws1cMr)RzSTHZ8@4_aIoA<mRUe6Z)$U6K)i#Hv!o7j><Kg}ifMqx;= zO=sJRBJBo=bd>SXAEiZq`b%#sT&d(ALH;Z=eir0+ufI=Pme@e=AYS&sa-VMFn!Tl5 zD89QBc6Zb8OZKt6J%hr^w`1?Cf_9OKgF;1S72Q8XOCN!uE=UQd*~vX7e*QX^_C**! zeBJKpUu0|CcAup7Jtc`)M%Y(h_UEUcWSYG}_75=iS=ioL_O8(D*qXgHe5Vs^Y}E?; zcQHeE->53p*gtDP$aDKc1N;wQTeci`qGt!W>H4pq2>JI=gle0?)5Jd5pB(dpY+9|K zhpeol>4M5uzwbG;U13H_Q;8oo9&c>=yJVql^O);_5PP3+q#b+U2m6&dAUYh}!ry&w z_m>CX;Q5G+Z1|xd-QS1XE4J(~TXZOl>7VPW&%TY3FOKw}%bhyOn+<xd$ob4IOqn`~ z&t=umB@AdeigQr>wRBt{(A`B!pcx8_-dc*%War<2vNjA${$NXLBWId1SoCT{>sw5d z(Li(e$p<KHkMCLU8U2Hrp^6~<CxmQ3i0x=o5&l&Ua>I_W^)sgFt{rMG;*3MgVP+UV z?;RF3^LbYux;7TdE>hwl_U_DyzO`qt_tSY5SG%XaZjUtXYvw&`3t#Xx+dnIu|8^E@ zoR!$&omujf=|dK?G|YAXp{&dEz4!wg*_7ua_{Oi;^5+Hj$zP$RF_;hHVrk`9?7Qbv zdL8>zwzLeVSt#Tm$8m0e{2Xi#M0%1qD)RkX^nN5F6kgo))M|5@SkCPDK;ZyR3+SNt z4`N#p6`2j7o9;o=k}YmKXFKqp&0<xvGu^vj<-!{@ye_dBM$TfcFU06m2Z2TN_gOS= z@V9Kx3&xo`@Z`qAfmPy2Amykj2n*!hffOGT^!XWbBVlt&28DZF93~MSiSTUkI?@*o zKhUOIZr~Y&VaGQV$?as$^Vi1pKc3<1gl7#VH`Hk00apJ)_qcoem8p9csBX8|FKFsc zPYKLK<av|G%q89t4nM`zjp+3ai=E@gZyLz5=lFG>(;f?h28sHixMD5lY>Ap|PP>3~ zbst+hr#nC94OTN}c$bb}q2+Kh(q@!`H3sY4E({c$o1eqhq6D$cv%OgETnGO1>fPh! z9?^7q^JfeQt>juyqd9OkeAqd?USvK0)s+v*W)uI_-_rml$t=fe1eqOs*^YntYkp^^ z{uS;QvlkK2{nZGV3c56fGv!hTZ)eWMF};ez<dz-MHWcg{AW*Uw!fq51a@FgQs3qd@ zm&ik~biqYS_Zt`rKCG<R+q?T0%5ZoYq0+w;$rrw4JB!VH)?n6i-Zg%97IQa=z5RYv ziYQ_Y!jUZcXxr%;R}|vq<JzDagxd_;r%|SXXBq(&edp|9^`;8<-Mb-dI~8A@Q4#pw zZuVSB0)JvEd%I*bzicYITap|(XsTQZJU>4xU5BEOCEk_CTqp49kpcoR0-dL_(etCt zr#4YN<oBnC;<j9mc_Rw`lv*sQ_v>*)-Gkuf`81<o7ym7f5`b9>stL#pu^}t|n$2MR zH+m0=Ksu={@GUU1>54rYXVNO_UK^cn!y1IDcx3n5i|#@FkVn@2i`f-{sZ<yUr;#lf z*KU@Go~K##i@`B#4r4^z!XY)?avFVlR$9xbZ^<B35{c{AaJo>6Mpk0q!ozIAi_b-_ zscskOmaP4wn&ak&Qnf#rf`VJ$k?a5|ftRb9<AP^nqBAL9Ikg^in~NATl^C$%1j{it zsWFCEGwXu9n0fy*#&?J0JP_lx$&^Q1jG9Bt@1<wV;^$Z-tyAojr8N+1<l^Z<{vFKw zKTjcdOKOWe65U$vrDIw3kW${o#Kg+rV5^0ZG~DvYYf(=K*O0L2)}q{0I7}j+ZimAB zQyTxOe7W%s{J7k>!`Y5DuT~ab-fb7V`jUy~2eI6jGko$sBUR7hYLj|_YEz+(e9E@J z{5wDAQ?_B@sIb$?Fvr=E9uOiu#ulo`UjS9h`EVHFwcD9~Q6ir=fel<V(%Y4^i;B!f zS;Ocf<kRELy~o*(MgGw|t_iWv3_w)r={D>+NlkCrQ^~QI6^>V@oE)J@ju3PJ%9vP( z_3t611u>@)7a(9^-~L$oXdCOXIKr(a5edj~9FDWR#Z&czNcYdIqw@Mnowu?2#a+7Q zU<tqw=~QT0e95oGL&zUpC%Q`4(nuq=nQBB=`F39V{pxPx5*JMedZ0(E!|AqwOqOc) z<8Pl~<IMf|ejl?9W+R__h3z*7@NrjIgSm@l>1(B4crRlpjn??H{-t~Pr=wVFX|$nq z43eU<B;qkP6cSYb`;S=i(hz=59GkebH=p<+Temda-T$M1n*XB@S?$snd}A@#>BL2+ zEz$l+X@Qf|Q&BHv#<QGdWl0A<MvI_~F8i>Z;7T6ed)kN(U~MpYDR-&+p?svJTLoS+ zzlVJnBb}4JD#Ig2!)|YjVAp5PmiB%05eAt64NzW&v&F+0OFp|W)h-YNHvs-P;nNdA z9%(d<SjsjmzhJ&<Ma{@?lo2kZi&D1uSGL$BtwlXB!;=F?B0jzEEXUq~r9gLQCaq)! z?CZ*-Dt#H#P=Mhi0^||kITQrhXkMlO8WWd^V>N7$CC*|Qdb*Pmk)ZGhW)j$|2;{X3 z{3;97kpP9SSHg|$;b-OWt!?2iB0L)tzCwEp0o0tK$nOevQ!Fm{+MrEt7^PF8-2frH z@)5PGcXDy^+e7Vguy9BC3Ps0qIW0Tox(Pca@?I-P*1bR>VvEgCmc!m$k)_kflIKhb zHbeIu*0RE@i?oB{o-=i{8FUl}rJ(aS9yFq4qHZ68ZH7D9EUF@uU;PprR?*Wv`<;JA zOT#AiYK6ex+r%0whPZ#c>7S6qO|0k21$@{hwryod=muKCLqu&QutK+zd>y6cx7)|S zg16c2mC<~+l`KG9!21kluZ!6M-LO3iQH}JtIgM7t?X`}lPs=fHE@4l_cz$6Pi(eJN zUmDEDuNug&8qBt=O69vBWR0r=_?wH_(^U!l!z`9y?U(Q{3!}dUM(~!U$=`o3@66)M zNZmT@AI8gI(4+JmCn5fflvKhtt&V0FtR48BFSB2*8~EePS?Q}u{M~_U|Eq%nT%lCR zg`*o1s5852?bO1ZS;zuccjLb}z_M2R_Yt5Fp-~O^rYR5KRM59Wv<2(~8<(H|UZ7<Q zkU!pi<X~i<k<#WN|9*Vc0`|jdllkixk()TgL;6A?e5CCPVU*reh)C&8g@~2b$b_p@ zp)k^<QiTwt7ZoB`nxhcor0EKgCrwm{8PZ6FNRkFCM3I#KqFK&;fs`b(#8R5g(F(nS zXrn?`678kX8;EvN=*>hwo==I0TZwK_XpBZq`dOi?h;C5mYNAgobRE&=YQ<Aeo;?cP zK=f9HZX|l6LQ6zfDs&6cWeVL&^a6z@gRV4Nq3Lmjl&8?L6>*e8lg&{Q6xs~aJ}0Fr z9@#J%tI)F5FkGRNNXAE@(};FcXfi%ZoI;b)QMy|qw?q-ql0q*a`a6X#CAwarR}fuO zBDcR-NuDYtU<1+H6?z8In-zL1(Q6fYC($bux{ByU3SCWfkwVuIU7*nQM2}PG%|vG_ zbc4nWPnzOsq<{p4mWYm2=oX>_6uOmY4~6EiY)keE?L>5|Nv^#I(Ki*^hv-IyHWGbF zp(BZ|18v5@i6zfLB_N6DoeE9Y9@6^?EfBpyp>v5|rO@MuE>-9}qD=}tgXkFwT}1Rm zg<e2(u0ofBHcJ_bX9Wc$DRd>#(F(nRXrn@JCfZA(w-W87&^w8KJWsBD711q3L&=<g z8*j1~7s`*_=_!xsETrhZXSmJKN+%(O2ZuxOyj*utVO7bA4DkTxgu?ky;fw>vrf}9O zoIG&;RyfP%vC6eZbD83MRtYyE{AGnRNZ~|+Q>1Vb6%Nd`x<Z8$rf`zLnW%6)6;2vB zBNUFUn5u}2;S`;qaBeA_8Q_dDE39h@s|c)Yg>ypTEC8p!!r3cx<kbeuM1{FUVbZb# zW~9QjDok2`!1Py`3y7&UuX+)WMrNO_)4S{0KO5Csud!3>qWEuL+ugcu7tinBxqI7& zFbzL{A*+95K-khbP#5<=qgBi{n-#Oodtj($o7h)buQ$6!eLonVMU_JX!hU;7(&I>u zy*$Z^Q&P%tQzXpkrm*b%YFK*r3}!3e4B^)dW}m(pue)h&Pv|8pyY=Ra!PR)}Vfj(t zjE~Ar3i*fe;-oq&zgn=X8>s)X%(;<S-g?02u3}3z=J3v|*qM#J`J~Rw;qBu*C+<G` z_BoB_6}IV}5I$!yJN!;ZUg*WXeJ9PyeKBq0TrU>1*~jVEMdWEr+}&@pR>MCCVVUo~ z&L<YK@831@&kSI;cT@N+dAn1$bm4U~%i4=~&%%n{57#y5QA1KGJ*Aj3_U`+^QLEyx zrDudE5ffU(-?8$3M|Qe6#BRn}Qnk~~+XOqEH`9F3J8q9fIq<Inst5kp&~!NP&rj)V z2kl_qod2-Z-F+LjI&W6`fjvJ_&z64>k`&P%Q|hB6CMXuWIw~Mcb^%hTn#sduZJ9`~ zEMwO{=<Rk7$|vpJqBeXk3;i&r>)Q|sNs{KvlCnx`2NF7I(_sfvvG8%$4IJ@*-N+Vv zn4h+EBiWYh8`JM)I69T;%XZUbURxpSp19qch<7m6G!U%Xv5-uD)@!in)|e%_>Gf-m z+6-^SGXIZ!`3148*GIAZy|>tmkG}3Rum>Gw8%Y7hCm5&z9%L%-rjd&8Tf}k0yFm4e z$vjF7U*y;fuXbm7TYK}z-ehlV4dLUuvx8eF^S%67z{eqcM?dz=$C3QsZ?KskNASI( zS>?wDe(xLXqmP&KJ>FoR+d^I5il)xem-Vo*zT5m*$tQ2}IU6X1-xtLax1Hr@MltR7 zsJQ6$atWZC<54#i16E!)r;MoMHp9E!<o<TG!+YY4l&9vZW+yzx?8>HXPxMP%)ZX%g zLE^P})dL*84Ti34*Y*HDxEnjS-OwizK}`z~K&j_KTYv6#%FX_;+`efh*YwiGoS(id z4kFTlg_L9fnpdh{$bvrg=ieL6Qa(+OD?)(9h#gxNaE8FYnST$h`yb}to$JW_%lkO8 zlb;%U{RG`l8Hmz-kVr@wP0Jmjrjh;kM};fx`$ur#%dr0%-8Ys}X|Kp1$V!*l(9Z;8 zuXds4{utBUQ*=Amq5z$Re0u!3buIhyvtaZ6m)f%|1KDPVq9@xv0=^P6jv1Ri3U8~@ zFiZy&A`j)b6HE4HL42s+`=`FVTplYYr3oL>dJ|K&0E5*U5rfR=XqgBbi<E{ST7DwQ z6Hjt5&E+=;4u>CUrSK2gr#k|B@%l16;zcdaNe@3DaW(EO*`vm7gh!1_QO|#XsF6`0 zsq!!joRi*E<?)*FoMin^`6VB`wzE6$-jT)bOyh5EV8uH}_x_~=WlERxc(&j)g3_lq ziugQq0T%8wd=xqvf2BawjV@4ZaVIoFvd&`#d%ANbA2XKC+_iu|6~=Dt>dSAR&q6=X z;?u*}3!g{u{22Db=e^j;&pYr{qh*pTc-s#-ckhBNV%`)X{{dPn7&JZww)}>B#izJe zwBe+i@i$pP9Bm~jmcBnsWj79W4(=UJViZsjfOlcA?I%K;Q3uQ55*AsV=x$z(WM>Nb zc!V4C;%fGMbp+pIDtoOu(zrZKj{OA5y4leUm$zhrI-o}1U}9IR!y}8qp|Lx-J)}7Y z<ECeduI5!F+*Qawk7;r6Ro3}%s3B4n=GuiN>pV8;a8G{3tE}>HAisM)+i}>t>x2-F zyWik{A?%?oBi?PF3r(3<P;wlk^g~3qUtuBh^{^SjL)h)Zk^D(3^Enb=|FaS1{CZr@ zXCE;Hbt36%JX$~A^d*J|_O)B!;PEWhq<kwgAITX>nr5YCp&M5cg0qZ=`sB4AQb9!5 z4C@R{28MEEvz={#T7+pRb|#+H6_C8mU<c8A1MqIrP4PbpX2C~0@t>?>Nk{wfvsSSM zM~%ixvZG;-p%30n^MrhQmXNRt=A{bzdW$w<6{|Y>9G^aqh14W>zXv<Nx~060D%0Iy zIWVQ~Oc71F7~2Z^T(u*Q7ukxMUZE?4<>JDWT^fjL%BF0b=nT>uNDG&M#YL>Krjzd3 z_LQYx=Q8_a8N5d&8+^=<zq^_h9y9VMgV>5=Q@YKZO9hZOk$X@cn&|aKwiuFA{<FU9 z2I&2L@eh6E@jmlB9_+DTF0K4e()DX=g_OkNxh(B?q@kKlzVtR${sIioFP)`7bJ*hJ zas3|5!FVda*QRP{ufUHU&7zjckcNCt4W)HMnvx^z*tpGqN$Rze2vd<XeGa>IJhAWc zgR;<EIi+e79xY;RNGwtkE-@+}?M+pL#;Zb`6rmw=*p%8h{_<yx)yDF_yugmtcGY~y z{;19LH@!gSV<*&2`kUe+$A!(sNO-~vY*^j!DQgv`zD&TagpiNVUBm0}F>IYvP6hsm zgtlomnjskz-&8_v)BQ7>?Q0&1LH}{USLTCBp+bWo)zK90j_@4%B=wwN^>UCVOk%dW z(8#&Ia!0yj?Uur3Q#W99l9#n-htR0A;q!k)&SqIB0$qNX1v@!;BUsUiXw4$__6ciY z{~V|+vxM>sC63Z$sC3N}1<S+67YASwy7*Gt-;XH2Cg?L7<S+HIVJb0n^`T7Uu9};y zy$m_UoPP@g1_h21ERP!Je?@xXHE^UOvsmRx|ELd;Ka4D})pWw3tCMoz$1BWlq*ZMJ z3zdL`S?t@BiGG=2D{lv|iuBFW?|`Wof7(LYRm&nyb>n-kV#81M_nG+%8DEwA*xU^3 zApGSOSgJ?<!nU3A?Xd@=GW`(!B?s$RSb6YKFP?8*29s`ZF=2a3vu3tsB2Age?wsmB zVE|s_(}v(CY!IT5<+UaL&DW#|$XSxKC9cw=85BcD?TqUeyxT)|(7&N!IwRq1@fh-< z=VMXuHIII25`FC;egmO7KeNiyKA8<L6y9$T{P8LEb-{K{fYMeHA-&d~s`L>^xt-=i z65nn@Kwst#H3BK&0U%QH{F&W49mIdLjOost@tgk~6$rWELoSe2L^ZpJ+Am*Zch8Ip zassc-)V#J37MG*V1%fy_4X@%GrQNLXY<!2`R-l(YN0>5xq>l>OwzIKrCE!@6gknLk zD`dZ&{TIK%i;X|$&tL4oUOX4nvH2Cm`f@r_4@HiU>OY-rJ(udWXgW2`gQq2KQWi!u zM$)Y5to2+MKG}<f)c1(piQ5rzr2Jy$$TC=sTM8&+QeZ4Ye2loQfF_fK<@er^G?Pxc zP{2y-)1z-pK%=xjr1?hi<+VMeiTPb#xY5O@q%%$IR(%}5PhuhG!(-lBqL_96`ytK6 zY5(IPO?V?KJ|E5Zk=W+*sU3~j&E?P2=$SDFyYyRs_Q!cc#HdknR$2i!Za#{}`p*kT zDRi2A!Nz_Z(UGNI@G)<PzKLMmBy(vQSkR3ygn6P(Dzty00NC>d?DrPj!il;cWd>eF z<6iy2Q`|rpuIpuy$fLH1Q5eFu2>KHTwun6Z3-hr2OT&j3%HIhQb8@AXXqHMk0Zux4 zS`J}(7ww}gUzH+z&g7~rbT6a1|3fI2{>%uBbyD^O_Vj{JO5A^^LIV4s7vd8iHEwKM z21Yq9+ULCJBu(AQhF|pM|H=n+#C?~p8%*#v!zw*nc`<<hlGt93G;yT2@>%*fzK(80 zFO<c3DVeoi%;NnODLI78XH&m1*yl>@7Ny8mUUG32aInLtDJP}-&$4$eh434vu_Kpa zeSd#h>1J5(vqbx9#d_}|*={n|Z+!WrX>!)GMi4j+SqJe?rXuZ5no9Q4H-pTj_<Wmw zud29N#m`i%Q}J6BB^4j5*a6Rr>DOJw!75Hvu~@|wD!#Aceg)0kS(WjFinmqd@M4;N z9x6tv*hj_FU6k;HDt@No+BQ_>wyIdAVvUORGQ!yPlghZOqC>cnu%C+b2^RfEsW?ML z{K+br|GJ8ws92?9y^4~G9ubPXQN<)e<j)BzV~mOgD$Y@HiHd7g+^phu6_2P`ui_6X z{;uL(6}iqz0XnD{3Z(qG?kXcq#i1(Zsra&rYgPP2#cCC=s`$H#oZ1C`Du${Ut758( z*<Cbgls`9FWz11=nTkKCC=21A*=iS!QE@?-27e7MBJgG(E{*CIo}-{NLBa1TfNMfJ z>h?4$jMFJVMCsz)_9yDFN^?1I_4sQW^$&OVxiZw*oc36eCIyFnjgJ&Rt<&@?RsA%n z=ohQ{u{g=U`iF`<Ry+Aur226Jl7B|k-%<5fKTzbMTF}qT<*E#*ILc4;`>Ec_RwaVJ z>Nl!>Ix5qz{=O0(q<Fa%sy|ruSKm{@jjBIa^@qs*ksOzIR}sJ&Q~sr?eyBV0yb z^e8{o|5)|5-cjP)RDZ+nD>uyg%$(`-=1qNWdjGi#M$LR-`rH!JSS5TQH<dHN&E||; zCRdFA=E51dncN)wn~jhXQ2E>pZaP=Id-R<Q%}DsCa?=oFIwVYxHzK?kK1zvrMRF?n zpy%N~BT|?rr}rG_E?fj+&I9&=PoZ%ZI(T;?oAI|N>-qO@fvFJqk8HUqTmcd&=K66H zcN^~MdC%3KzB#h|L~Q>rvy!XamSldWVTrBtLVr%e%_yqXAB{g9uBB0_<1EliWIsFE z8W&4q8iaHV;kb}cj&u1@5!3j(aK0uT=Ud>$`Hphsd=ua=HdM2K2S(l}pY?k%IJqru zX%xrN^NH?iTu;3{r%!O?^l`35Z8A@~bDU!jEHWg+G*1J1*lRd@?6;m?4xCqk6X%5p zUU44n5k7^9?;xBUf#MWB@G-Yl>To>AjZ5ITX=;vzpcnp|U*lrWxg<DnE^!V;|GU!E zCUV?)xVFTdby`lB(23J&yf}v<j%zFRj$T-Ad*j6d@&U3#4~O?^3;XLCj_Z)lac`+* z@a^u*1s2qBfeA;sK+V^jd$n7UYc9w6<YVS)*|bOf+oPu7BJL%)FV(2d)E52!$@T%l zp1^5P?<WJ<)kl7E&B?n);J9F1EM8Khdj4BY+ghh^Fvk_a{iVv3gWe9;;U^@3pJzvV zt|OYeW1L=YZXP_hY*-E9+TsiWy$nu`!{$BqZ7&18zsnuUaWQIo|6TmrVI21mE*Ig1 zzA>(xf5C9he^f5#pD>K`*9_*o>N`|>prxsG(og{39?o2l=?gBXU=J6Ru$v3QwXuiV zdPQ0^g#(|-ImGd~N`)Qeu5IgzWn(z*3S6;TAm3mo&LhEt^C)(!cd2$Na?EqcrHnAv z<#80(7Ev0{acl8>qb;JR15cT>&x(;#IBq7K^(Xe`lWy$0CtX?K(;gk#qSw#G9783x z{=_Cf?d8+PK5ybUyAonkYjfQC4H`{=0~Mg&o_+GvuYGR*z6_Jb!f}UH!CmkVg8$E- z*so8!jZ)?y=3W8j-Y93xK_||4grVBg(}DBEeDsXdRl63s<hAANSix~oaNBMu`D(l{ ztq|7>alH`NYlKG;+x#HFL!)=#^aZY%q%NF(gliGYvxWE3__%OBkoJMJ52StKls@v5 zg`FYnBn!9Y=Dn8VR>HmYBm2-6)D0cPIgDuQ9~aK0*r9zc?}ADC5}cb{F!qzJGnSBb zE*cr)G`+|-OB3wi!c(`ddqtBew^kj`mOS-tZ|$sVOhmY?KdpDrXhLNXqd$psZ!43v z4&PkBwY9u&h!fXgl%DI5;Ldd@mS<$1PyYI1jixheeBibIphgqfp8oA~c=cJ&aiedn zzo*dz4W>kSv?TrIp#-=hwORL&%msYWt@{7Jy5k09x?H60SU;K9tT)T;h*c5m60M9H zHwVrQ9pE-X8KAy#_MA~u!TA>X#L`%a#X}*5=d5YxK^6Gl^iw0vC0~5P>}N{Q_$9<^ z{4}wewza1S>VhWNjhho}^5lXGJh)&qMsTrDi+7{k!MVP@bzEoDW3F?-Bd&A8L#StW zxqm{HDO!oMhelncs_<8G+kQeA=oW0$sv1uG72s|C3x5>%|0(tVPpSXEsnlbG#_#{X zQosIX2Tg-Vfo9;`;uofxRHbq3`dmlN;~pZHJJmE}#Pk>D&YwPT=IrSsi>Bv~<uWxR zO~v?M78J@pvc!P7FHD;`XZqgJPMU@GU%G2HJLcrRt0<-aM-YdIse6ZdYksmf7x5bT z^ns7QWt^d6fr@!5PE>K6in%HZDyFFztD;dwFBP3sv{#W+FqC_&Q8I2-@ve$3D&Abb z#$SU!0i$}WRotWEb`{&Ay|4N=s<>9g6)KjhSfpZ}isMuqrJ|r>tb#meT)#9x(<#|U z<$I{;q#~zcE2mVaMMX))hV_RLz2|1tyF$eUD(0y;PQ_dmh4l{tG{ej-Hl_C)RIF36 zO2s5qe5>l;pyCP@i&V^0QBW~f#c&k^RCH3Y@u{4z16N1HdgBaD0BiYu*!ponnuZw8 zUa4xOlY$;D3dZ(To8TD*?Hzy^a!1!2jhZx<wrIS=-dRRXXN~g$H$`s7`Zq!}bIm{F z@8O;HfC7XQXg%l+KpWgn&|C+OOZVcqXzV5#z!+FstHF;2z6F;AdL!}RiXcOHyA$j% zpj&{CaVTwt4B-M?p$Om;E`uZa&4D<;9)c&WBO(Le#-S$z^k(24xKhww!8mop5uFPx zROwRSQI)O%-iKQO8TxzTJ&jOg3>q#0ctfS>3Ts{{bV<l;2VQ_H?P$hdj|bryhQo?K zG6Rl=qs+zuAE@+WV223&>3HzHfZO4y287c(E3H)ud_I!nzJttc;9fX;eBoLR{0Ytj z^v^(l9K(D-2LO+n@lPx~HNYR?Hj)r<aI{jOY@ln5l9?Ou2+nAYzK{n7^x(KWN(k61 z77CD$hD!pL_vE-T@K*pogrhXK0>|U1RuG`!CIY{Odmnsr0|<tr8uj;D;Ohx6n<8KX z&@NGF347oYIBL>T;ID8#pe5i^9KBLOmjPdgqgL4f{0W*_0r;e${n!Wg7|^i2a4@UL ztxI?(1^rJE?!q%RRc&@)g-W*qzj+4zipUMX9_c7G=p>+}KN1672K)k!(y0P=82~<7 zC=xgjj%r^C+zCfAg!L+2Kfug!-Wf{9vA|JqEl4yExB#vdbSZEt++)y{z%_83H(Cqm zlZieCP52HRHE$Jg6HbOc;BN;03`Z?T=r{-i9DFBW1YD6BB^n1}CC-Zl2zVcO8IA_v z&p>B7Cql*zco=RiWNLuI5G7H<*>EIJ_zup4b(AqM1ZP0v!_>t+r_wWk#W)u}hKvbl z7V!^dunJfMM<peEtkQ&W!xVlsP?L-GpcB>rpmhXpazIxCFTznemw??yDs(jPI2@%} z2OKp@X(7T4oCS+I;RPWPPvJHoA#N;GcDRXXMnWyleh0yK0{%Jy|L+W-VX)&oCMp@E zO~U6$la;}H5ZG~wQe!V*1)LLR#46y_JdTS5T>u<BRhcQ-z_ruN9JdOCW$iSM`y7r2 z%N}5-d?g`L@)O~xW=X(xa2qgF>VZK8I6fm$Bk<Gdn699!fu9sY9`ttL)8~}J!5qbf z&Bg5k_~F23ijXhpG&6`oILcrK@DDheiY>q+^H6&T)d0tv6#hhDN(ov5{8ZpPIO<~) z@EDv4i&!0S+I*}leyB0<>!lc^sAM%Td6|+<DsTmynIaQmvY?3|R0`Z$#&I)16Q-6c zg&PM9T+VTez&8R%zoN9<IN<kiH1ZpP8&@d(O*o|j(-rc0!1ZvH*9PDfII1vgu4eA8 z>LIKVm4pcUu2K?80~T48IZgP?YGnmX1NN$<O$J>9j9#NG&xF_E+)#T7Soj*IBIp^w zW|h7P{1+oW)_)U-$8gkKgdeWO^a8yVn7a{~gC-1p8-o*cIB*Ocjg>NB4II^`5$N?U zE`uRMm;*=rBB0L}?2_OU8sA48(1ahD@eei2R^W>tDD(ng6&yt-T(%W8Muak;-^Uo$ z=;Hw3%by@&(2IZ%;nF}q2LApT3J&@vaNG`MA0eE!Q{fi?>C*zr*W3uAau+rdMA!^` z?sN1#=u%+fZWID^5^xC|MJNTHhogaZ2{?0)(sHwbDTgqqz)uC9hogCa3Anc!J0|#5 zK%Y7cUeJX1>(Kv?;c10u$O$EbY~VIHs{MANPhntUaM8y_$!D-|fX)C`z|lIf3V2ba zF9DaHRmM;mu=_bI<B*93z7I!f5@yw-px~S8%^1-a(A@A4j=YF9AM_~TLpWL+9s~DY zM!#WNRsrK16wOBf`d?9|Qvh&_O1}?Gy@oLac|w=(P$1B5z!W$tL@JQ`0ZX?T0YrGg z(SRa+0d5iaML^qi?ByiWh>sKCXz^J9+zq!C^d8{1aFl2ZkZi-1SS1O`)=M-YSy5MD z3=xt=miUC`#cBZITWSCy**!^!@RCXs{-x4{x}Ox84nQ(Al04x6l_ujNneB*AXjN%K zGXH>|Clh2`BO$^jmHrb*#wg+wj)tQe5R#dP_=KWLlc|TSYQ!h3QRuyg-86}skDR(| oMtC%Ck+!sKY2D)Sp3i$L-m83XeL$%u(oXucb?<;u%~y8+3#e=V=>Px# delta 26060 zcmeIad3;UR_cwmdNp57ixe2+MWVo4#Ig%hmNJuy#A?7G5#E=@=G;Y+K=uKMbafxnI zi&pU|ZB-Q|REY)&f>1*gmA2H0w3^Zq<9*)ioD=$ezTfBf`{(z1{r-9Kvc1<{d+mAc zz1LpnoLjKep<szaNl(6EMSN2JRB`1>hgKC$p_zE7Vin@c*I6sRQE+L+Z3Rm!HUKv) z`lez9aHT`*ib{pQM3X<L;kOD7eJv}S%H;mHzw~5|oAizY_x1D(uTYX7{nx;Sa4v!~ z$DK!H2XQ!VK@@AwceHcg%d@_Gmo{qyc&;b$OL%TeI<SD}4kE{kzXqP0#B-d9eZofz zJB1a&eyo9a>{@sZ67C38wEYWdg)nhkX2;w~6U-Ah?od~-8W3U;F0qb6>o#sE?q7ci z99PycN0nA;MhiGs2!yajLTkPw`xO5d>^>v3<=?jtXok8(N%u2ruy2>%ISmo3{?b#9 zvl%06DWwL9AEoh}SX7qRUM#w59w1tc#~>g|r))-hk}1|L)C61<=DRmIu0Ax?s@uyx zw(k%p7LN^c6pPJa?xRG@?<PrC5ryKU$D;IC^wm`9M5?vVQ^TR^6zPOKx&_CjN|mU~ z93bgFj$%LPI`i&K=U_^0K~-9fOZy<NH7znZ`ethJ<Ve}yh2s*Xs-iNaH9p}u!^$J_ zoaZ*1C_%=W6edKUmm~bCwP}%Zi<T<E-gNM4#e<uwNG%BC6Q#?ErS=%76zQ~l)muSP zs+MK;m4i=y=tAVnvZb_pCfYrzdmBUfE#}HzIwco*s3j&^4`EiG6rtXRM1%M=Il4@` zEK8`5sz{Q`Q>4qHbVi<uqJdQOJhT!mN4tBX-RLsL+OX<~y!shhZ%wNer5_SYJyDnZ z`j!Ps{gkoYb2LHj=EiaD7B*0)+AaKj@t@T4m(?1zYMXj=nL72imoiYO?!n`KR~Lcm z23g0pv!%sd&-F9x)7@vhqi<@YJSzZ=w~h@<NRf`FNMmACq*);6iz(8tiQ^MrO&pgv zc3gQ?l`^{{M$*(qMx*<GSx0pgix-4t${#tQ9&3l)eb`jT;BFhQaa>WEp|B7=I!p>% zzYrSo3bn;LJS<zZriNw6S!lT^QB*=ys*weD*fGL4QOTR<7Y$uW3d=+v49F*s^m^aP zQ>b)hUQV3_{u)bhYU`E^QK<&H>S{CkUSo@$!UyE0@LbUevt#smoAJ}D99K%Ui<YAi z<<xd}YOK!)(K;inKwSJgtrci|zm`z5=%ZqBa*ilHiasBG!e;#NDtq7*Dono0S~~kX z+-O7#eqr&>f%#W|L>{a00yt8o{LN#VE&4q5Cnw3;CtE%?kmL40u-R-CNnttJsM=<9 zMk_??2+Vk;{2?i;)woa1JO`Qg{{vy0@s?8Rh)NR5Q-yY^Lc>VtZ$+pKLY+}mHA$tb zQeUc49U+CaycNs?)Z>ce?Htf%{PIWU;?gtUc13A*Hl$Of3S>Y2$Yx8D#?s&q56i$x zPAU%A)t^U>zeyA`l4&!hP^)NiEGN;%Q(8g>&0)ID_=8$zPqhVkC=jCHPY1tB(-pCJ z1`RqZc-PfOl<uJK9U%_Iu^F#pT(GbczhfrXfc%ScU31czJYUG%dq}Mv=x8%;q`||s zgoG;Ek6cphu8Y$1y0bK^R^t^2CYI86N9*mfx;=nj2Yyjms&t%oh=YwZHj8`=`wUZ{ z8Xk$#MR@@#+fM_9g6*&{^O5NpG9im>C3#DQpvk+Ee45%gS(I)}N$TF(>}%NPLi{c& z-&wSj3rGmfyR9YoD+y-DDM`hllqgDP<*`qiHi47WL8XDB)JXe4zAq}BV(Ar<*Jg?( zE=1ibE<y?&pf){zh~%cCEXM_E-%G)#B)CcuJO@EEfjSya4P1pi3mdF2Wv(}XW7t<I z7pP2EYM5c4-ISuTki1)>WDZLgi^pLou@8vSj4)A@DzJ1+wED-Fvz*0#{#Y}1G}yXv z-rAZe49fw>W=y@zy151A(+PuFJ>*Dpg28o4wRW)k2OB_0sx@1vZ!TS;2Asp$hgP|z zSdIyahJDE#nvi!h)!OAk51#vuHn`{$Dbl4R>6p#f<}$}k=^lRoDN=Qw3xvJ~DO&Cb zqIGFlrhwGs;;@m&b%WdTTtvEj2_vA6pQAd;+E)7>mZfO5sQdm?_8g5RG3?@2<FyCM z0LrP5Nm|lc#Kx1}?n$atE?wGR_l)C`q_a@u2(dUR%w#hzyY%uHI**)UmNJJ1WX3R0 zT97VU7lfrl9VUgPn@R1YOY(7p(qkwJCJq{uCr`2D_YPGXdWforrk=rOQ17M=(P}jV z=7==084b{HQ!KrM)f@-49P9s;W77YSgO)1h@@#7Hq_70p;c3&*?4f3-(lI2H`i(>} zH2(?8h&;fE9Hk17(i17sWhs^uHXPY0(lcwiFy-D?Sc2libE(!u;wtfH?3sIq(RXwx z)@uBiN~gjb6Ai15qD=WTcCb_oVQeZ)gUD2?o*IDh#kq&pk-dPyoe^e|hhq)XT1mim zf=#3fCryOh;u$-#!q}#&YRVKt5S=NIiPOL7$#Z!g;*lfnQyw0%+46+@<>HYO?sY$V zvF|*(=r@I68|1toD|ZTEHjnVk(m*P^hGsFHbW<<Cs>t|2rv5NyrDI-*Jn%&}bVS`* zQqis|uuDp!%j$z*vLs7KMfHd;YDq;lV*ixgTC&3Co!xE(!(gCVi+$wj!K}P_nDAvV zt83mhApR1qldV4>2dzjH-x3_`<u>ChKd_!H!ZW`(uk4)rY4Ox`45NrjhlCQHFpNwp zIjyyl3q6q?6!Mh(q0+Nc^4(UHbV~k7!(5bsI4$Bab;)7uNQ;8}K0~p*q$_40oEP0d z%hTE`s_953UMpg(YKL4_<9inf^kV?>A{;;%u~!i9ja2zgTS^P)q=>{v0_xpF$&NZE z*s8IwIEryMa{mx5@mGhSI&%;*o(CmA^F($?8DKO;pGbr{*^CE<unV4X!)Syp@ebiU z#~TWH<f_o!3qu5hk+dZIHprr@h9XpjF6m=Yn9cY;>}VKaG^3HI+fcY!O>M5En$NL` z#<mIJkm=qmPpJP*jC-CtUX%<rV~moqMs81u#m#KcLVZ~}73rs@AihnBJD+1!#?E8B zY1*v%M$C}BPchnzo+?WRkhg$Ul3aAtY)~dl&M{#dEV`N3szUd3sM-fLVH6S)q2J}o zQao!j?mf#gyk2$09AMsZ+Ok7ltwIYr^Bfs44LIg7>fZ9lFwZdG*m}Uh>D@noHS-P% z?4WQ-m0NaC3QI5><*_7);dPOR2CyXW2tJI>^bYKGE{NyMUQ$6NhNno!VDIEP$~m!+ zEw;mE{<a@B)b4#9%`HW#ue-EF5!25gF(-L$Cw9Vnntp=+i{*5;B}?$h_2`BjVxNI_ z7?WXNja<=<mHUhjoYo1s-bzCo=7~io^8DlpSR&dwyz9pjd|CdAj;Ic4Ci$y2IEJCS zoSY0z-ftsn3|2lye+GnLs?ad}Tt)qCpnkURuFFS)8OjEG9Ak-k^z~oJulrFYv|Qz^ z7o;d1uWv4ohm<_%1v9#gU8-_t6a5mp<hsxqb_{kOj;sf15>EGfqI89t<|SYBeYuV& zfUbK3iYT}1!7lrC^82tmcCV~jG^n@yHVI-m(1M@s%S<ifg>JrVe9L5EZFlx*%kIKA zKJ4d~?E+R}xY63?=#(g3qV{`%Bh}Qq$}@df8~?BQp6sfBiay8t#dtmQVx0r}3Z>oI z?0~m<kv$5?ZSf_ps>#yvRP5H-I3l48`D{^OpdiPwj{^HQ>yI+!&yDQ&z&!s`m?62N z@kMdpdSaP&qM8@#*}@=`a9__p4w@-s`!F|CkZ|}o>tu@5cY%~PU*ET2Gfc;QC$@OO z`4eW1rP5aS3Jt~?HZa(aKg*^BhYFjcm=v5Q9Bj^h3?9NevGyU+{6&@-(oXoKGqZ+_ z?EbaKi$Z#VL?QKlGKafz2{ou03|Dm<Edq;1Xlz(4ZCkKAwuOiB!`bQZ0JqPv%A{v@ zxC)(;KTl?j;n8k$6X<lVPzItniFI$4>befbh1P)xwxX4{?=|eFCAgN+sGpVZQdPL< z!X9HsT18|#;F1aT3RrH0Hb{RN3N4sR*qV9KV)uY)V4Cyfb^~as=(Zpx-xg^e=Q_z= z3XwMod52M6n5P59q6g-FWQ$(!Ple)OaDh6Rqvg~6*^t&gLQsD;z4efg85qWr=&LyY ziXIqNl|heM$muw~n+}d^ZtSPlorW1uNUAlHKOmq<DL6(K{e`tr>_98QQ~n3{LQUuI zfRudd{nDgkDbfYH9?a=Tt<3X+ePgyuEpENP8H$iky0gp(FWpCaRL@Qg4rIDEom$rP zp{Aaa8s*oin;6@Z@yaf<E`<$lV-zMeuqka;hczC?&h>5DD-f~9KRH9TPWOQOK(l@H z)%wn2mM-KhGqsJ$H)E$j)ghMn)6KE<<>}-=aP}i`@*XS}b(x>6x7lPP4oX#YlO}o} zyTyL;jL5|=L_a3cOIQ{qvO8La!FS#dG2F=R(%mTn4a4LV_rm1t8O-&gNHU6(cMj5o zG5?&r7sB#}ff@_fRZiXvmU0V_gMX*<&D;zlw~XW-4Pd_Qf}&&UNG-|ggFaAV5G6+8 zzj{BZ(Q+_sCyK#diWCnptK`;q+4OdS!k>eg)Xu+E2eiUw%s)h<s`F_99YIkUIQeMP z%v5ex@|7>x>2{rj8V~loop1iTUy!LRN`+xdP%x<+iV&?@)KUi5imo%UB;9)i#kwc% z6e|oXQRSa{@HjWmgXv_?l~w4fK+y?`3?DqD5|il4r(31bIX2^a-(l<0^+dk?JT!rm z-yclox6L>W2{`h~F}yb82t+LLUXbGQoYgU#205E?#zBrdKy@jGOsnr$P5b>_55z&v z9EjXsgOWX!X*RnZP+M{u*K4q)f%b{Y^*lwom|}NUUdAdqv~fs6dGd1hpu=B=_{SVq zt{Od(F1-hP&@ni_18jeJEqShrbvz7yS_)y-bVsOdaezA|&Q}v}wowzCm167X>Ssrv zfJt-q5oPq`Ma%b*qUDIAXgg(g!f@<^aQ$z}#270elWZ03Ql!dMt8qhX9xJa*JkqGM z{2Y|RR(Fbv{}W1B5FewbKI-Jxpk+9(dQpAwGt^yy&S<6@9VS+N7*!f~CAXJo)opvq z+#=gXkHscuI56GEpjc+*xa*=hu+c<Yc-?-OdTOUd=|{^W&xI{4N8IJMY<i@h&pt|o zQEWA?YNSF>=5|4$zE9bP$o6edLCw*rNEDI-Im)Ux9LV)CAb;HsO0*83#9{l{{m4+^ zy_UOuI<Mp1Pe3-Y6n8lE;s^QH;jBEW^T-l~32PqAQ?diPgmI=@uCw95pUvwBp#=*S z5n5U#@~iwp#2q3Y_3a>%tBBCfLn37|8xY;w{q`_gf`+0i*vxG@y@7oYJv?Zu18s;l zcxOy;@dLW>oWc4#Wy^Yq+3^c!t}zjzbI=^J@o3G7lJRF4grekm4;wk%Yf`0vvtevh zOk2ZlW$Uek4-a`)hq00vuY^y(p}cv4n5bDwKG#RG+btzm7UC(EuqRx7801H&sf{#{ zHsg3?L%|7Q?6;VxfCctcG43qN{n(8AvE^vw_;Pr~^mFyBOPA2-J@+9&o}{8$$R<jS z(mm;l<&h0)vcDguWKviTW+64q9DP2qbS3&Bf5@aR{=P+fsGk2gLZg%$8}@oAqx{<^ ztgK6{@aZS)ahEPVZtbRZDK*0L0*Z<eZN@T?=$LzvEzi~R#D*R0lzaguf+$(331W1W z+~FtoTGuY#t9N0cdaDbS6cw5<5SiJV@3vs+1@_*eREN>|5jIhxbR~t3u6s6P{Vw)v z*KFZ~e^^>KaY`trk`|m3l<tcMSXK6-rOcfQD_8DmcK=FW-jwdCq-)FgihKhz-BiZ( z7c#J&jw1mJZrB6%q+2L!6&oT<`<M-goha=5hwX?p8SXk^zOAm6VlnLZxnHyEv7-h? zz_TDhMcRo)f0$cZ<~#CcqbOm2@&1~I;Iury76+y!6zihgU9_FayMRl}lfNl#U?UyO zE|xex&%tKAy_4m4Z|%O9lI%{15~{I<uieS^b`NPIDVv_RydCDHbx?qugvG0#VXs^x zQ}u~xXM>kx+~17v;px^|IZYLhl|jyBp*`9*Zw8IUs9}Up$#31j0n(L?@8M+t1=~ko zfI4l)lV7pI9_`b&-=_v#6)in);BG7b16u_-9j&5=eElb?%=HGTpR!wwM-wM~1$nw6 zk5P1UNj0qv^vm9MuAY;BOnpDj*t<W;6zi&K{Aj8a-<<vgMT_+(RXC+W6+lUs^!tfp z3x9hD8y+_-;Sad3G@EK3$ohm-Yj`3_NctXtx}~TWtX?Xs8=!sx;;qzpkQ2Kd*Gl-d zk{No&cijdxN4`%0if@?w$2Mc`m$U@r6w5O{uUT|i9S$fit>}+D=R|9oo&59~d%Nd2 zx9b&{?{metkW0FY73`0mp~ChG);xY;8*4MP9R2w(*tCsaL3IltU^518XQlDsru^;d zK8lf(^t<lSW@$6)ZN}x`Tl5?6v1joi!u4Y;xK}4(`#m<aS0ADB30vK(NB+Uj(E|PP zzwkd6;%Xe%<WayJiB^$6Pzgg#K7r#wWtyWU{W^%^2>?2%$ex0g;f2Q(4VYP*aW?TR zaC8>i)i;y3{0LvmRSB2Ek&3Lc)2LIe9fL9(+}nfykxl5`J|Du+Vv%NBCwEn28EPy- zjb*DbKQ)%4#$46d95wd*3Y8)iXlkRA|4<nliDA{b90kNxqJoRyABNe;VY^-R7@Esm z6MQ``H&EwNp^p%9kk?;fT@%{7Jbr|O(GS^IG_siqVfjjtFYNgyH^#+_SP3`btiy<8 zlBJ-G$c@j)8oBZ(=AqGM7L^fI&5c`eR2Pl4xwE9-2MJ|ZmmvdKf$+j(dAZV|VtgMw zZCGJoF!#hz|6(O~rE-Z5pyXm*#|T;hx}XR)II&yM1$8yv#(8fuCT=5TtcBW~RpOnu zvHv8t5>}<K(}^L%kT~{NqMz7>ibT~t#UnO5(dv&K6Rw;%WJRf|$d@9Wu^HPbaio(w zJ5Wt*tt9?bFGOFJgD$XPefl}oS0bA={$?fnu}>$bay52PiMen715Z(~>7c_dHe>Qu z=G*scq0cw0s&AZ7^fhxzN*0daWa&w*@&^s1afACL4=0IrkewV429#58iJy0qBfrX6 zBrLYk;xZJH@r7qLD_|fd9`u9-fM+yEaY|t0Oq4FjZ{m^?eS(f^INFfb;X@Ks9-K`n z?y^dt4qmhw=WW4B2Z!hFqB4)XwkTpKijb7YP?62qtI1xWGjMCEqQ_Fx+oEt=nP_S3 zIlDhGUI7C+n(bn}lLLgcDI}inN#fuh#2&8PW=_kuM5P;v@EB^GA5;#s%qT)GvDDo} zyUJ;K7tYzb|Db=#(I?2qgr_;=UNp(yHK5r4ow@DbOjXWp74fu3E=8Le-!GxVe4MDM zIB+9h_)}pLQmHnhi0+Hh(y#<9eL8I;&#>3~wQt!rMVSe+_kOah<d%qH9s(@!8NaY^ z`gMz{-VE_#-JAAkHFT&A96{jpbeQD_r5gh-J^YGH+;5NE<O3<$%mVvI@kwk*{|LW& zJD!8h3r_+}lualIMR%~o+w6G8`o9_)hlxv$zA9Q5x+hu(I>5nRo>V-}HMMx0{gnG( zK+EpP%u}2Y@Nq3QnXHu@RD-*(lwwaE#50_jP3(O!T^QiOZi;cjX9HPqN(bTX6hI4> zm0}XoQdm*SX8t5=GoXubrk-UE=-hqX9W47kbPaCVU~|FM)2hq(jmp*b2b~;nmUbIm zE(FEmcT3SV#3tQgCkKS`XV~2Vp(J1lA3--3%aiAM;T5`mpb^8G-1af+I505C{{>_A z$FM*ySe`u1^Asgq(JEjc@@)FRSmDj<>^}qB3N!22xq(TYDn6pIHx!~RP<ME%Xovk! zV;)nOgZ#-LBcG#O$C6U_3vWDUw$uPOckFnn*5NbpUx2I3+NK@lkF!71CJS;0HgZs~ z@J)9%cTkux{ulP~AmhkW=w0zg)U5`a-Gg_qhB?7dgi8$fKsV+#Ex#hHWbTl%jh>PB z<62uH^|de7F|0?auVb-pnL@bYGE2l|ECTz_ne*T&K?iS9p^k8OOZuIlQ{t+yDG$J7 zIk7n4<6CU`V6PUVekT4kGu?&cKqSf6S?S=eMyoISmFKyiwno_ryKIH-EnjArK3ceN zi@ln@Ug&y@Jx!l1NOxG)kaqnml@kGbBPY9Hk>Pak0Zqq+m!&%yCBgraSlC~Yz$KHD zo&LptR>3X~3C*ui$`X-!xBxvD+ntq<sl0S56VbhDG?EJVT%|K8$*Oa_3Ba-9XW5?# z4%N)}1xu1hKwe0~*7+HSU^bHY>0bfe`!|@I!@*0fb+&YO;NW~1QNb1t_2nzrnxR4X zf8WqV!TvY)cxa>`{L0!7>nzxQW8;SfVqzB!i^2c9hlx<er^C7n)9qNd;gh{epjU9C z<Qe6=zr3)5=CfVHv-r1|*ND}6`Da`tto?W#VtfkQKcbCk7dV#K5JS<sXq>zSv0|V4 zMe;fj5Goa!Ffz9HE2>alw44qqsn}HC2Cj{W<IS*zKhF!0dw^SkcTk|^E^-GYf_pS4 zAAn5@_Zc%;yjjV}LjF5u92Lq;YfbJwk<#Qss)TvTDkIt(IASWLe2>Q$6|CE+0m8%g znPpU8R^nmLj*pttd>Xlr2KtCu4G)gI4R`aUpICZ^pK#zOHX|dk{V5#Ou=8=o*O=Yp z@6=c(VtFobbd7^U2-}aMbk9b!D;b@II~mM%bXbR_&#;Pr4x+2GIQ>M1Xy^KUeKU0b z=+9UXiH;_-cl3Gb3?7>c@`!XcZghn3M#k=UM|a{`tI&?VHBeL<bsxDatZdEpEWe6Q z%?zC#{26Y1WFQ2KQgXE|t!9q>5}$f^dNv+?LSBkQW*@d%{}{H8HZhyWvaMllLXU!@ zObi*U@?OM>ed-E`{v0$KH)<@qAJ#oSTb0X;kzWNxbJG@c(*}?CI;oS>AM6*C(?f|U zlT!g>hrJeVVppQ|?7%C|`oF=@TEF-TJNHUAA#((C9y3Y!rIF=~2@%|`vz22$a{06d zwx6vyR$f@c;>Y?L?>@u{ZCMNDhUYs~YQ!V6iVxWIv2C6DLK1TRHSD9Y(|Tn1V~0|G z<1OXQm#HpS>Q1RbI)bdLh5kxuV)S_oH80P?Nrb{0*0Mh1eiiDAS?gEBgj;J^+N%fs zn-wc_>1;D@fZ}3Sr*vO<%{&3ykbVsdAD`mW3(LJ=VTLm}`&PsAXQ*!ma{Frb&iE2x z(*@>}`IZpSkFCgT)%NEia?IAE5TnibJ0kFrb%2p&Ic2Lmi3J>2E{&_jx*f8L{hWC$ zY}z|Yg>E(@2U#*YR-^5x-cV=-fm*UxvXc{XLUNOEjdfO8cNn~o-AB}zz3N??c$XL( zJ8@9ffIX<{$@4@*(lx2PezeW_kA)6oU0zH1{y7>YClnz&o+lFH!__5Xd$csp%(Itn z)PIAv>XB5}9!xA6KlujoV9-lnW=%fF^jV4V?|~D271!4ZFOuru@8hDc7M(CG*#Igo z{yeN?gJlKiROxoA^f2Yfb6u)6zM_FiS#7;Tsc0YhX?2sH)Q^IVeVSEeg~Xgxm`YP` zfGqaG=pivK6T?}KW>GrwZhk2iS<ONwMd)8wIEqF-Im<>(igjxPPSf7-%~`f#l6Sy* z6SkI9snYkjkCfb_<@UgQOpJRn%T=yB%g#;e(_Q~I20H@{w@#~sZFR_#42uV_yUF5k zqz3`h1!!oo&Xde>9p{2}oi5ftzROZ4cM>k%WeX>J3p?+!)sy4ezk63JOGHTmN+P=_ zTQZITU$icvym%64cd^)*a+f`uoYdTo5-b&taX&0rQtu!I-2a0Oo6^pG(_+#ObF!HA zCv7Hwuw_#^3I_|=p(&xl8-K80r^NbN^N|yeb7+T1qiRIOG{0NGVy1Qyj0J4+)M&@; zzmruluYhfw+DqSW4As?W%X62{R<Zi2ejP#!(3eJH!EXO(5k^XgE<;gH(2dJgxB^$< zm?dwJ)oA*iMP(0)$^h45;^au6s$X@>F?2;I=FEzV-&)Ucv!>(qkyPo=lq1hD|6MlL zv)2#$@m1_}c5v5|Fdj6eya5vBs8iho4Z~EoluxK!`IDD|9Z#{^gFdV*FVax`7}F;W zbEiikm9iVN<>fdH0S}QyW7(Y9F`;Wu!`g_xnrJy|!;4U+xPWdjF$`Ap5>K*zg&o%H zcXoERmvd0LauL$qu4jMF?i08f?H(X`{sNOiM2fAvE(iasH?oJP*x<J&>ZKD+nk)O9 zV29oc6<Tg*zrHoyWf9sAJ!vi9s$`?*jP=dGhEtp_o_H!aAI5^VEjm`RGjlcx{-;^) z+%Vytxoqv+F3kteRc5&dxw#UcpKD%WH|BN~vOZ(p^TLI-Rjk)M(aoz0vxYf2h(|De zs@TeTaf1Fgc5a?uNZl#Y06Zexh7+JT{6|Fb5Trt<YlInwF5@YndVJcmi23ebjYnbf z!?YR;IXPO*lYWW~nr{*Y&1E_BlUoE~Es4@kcx+@d-k(EsIotVKfS>Y~qnPywtFm+$ z*ral!F`AceGTVGpdNFv)VBz8^FCc-qa&sVrbBLz<V#~LbLBgI}Hycx3LzhCNZy8Fv z#SPRa9(klowET)E=*Fox*&7SIg$GC3k_8>}KSiTS-xdKieZ$fqeVZH?@DI$PS>wn# zCEXX5{U-b_SeDyC;&fX^ie&${;ZSO<qvdDb+zsR8hvt+J_d!xDoM<j!Rp0=Y8tdKU zz!&91caX3evQkC8cS|{3Yr!*UhNislm9vBjPnNmRPk8<xwqW59;oIZvEMh<8vHFFf z!Ye0tyD$2l_a1s5x}cMHE53n%FvM^cY%gMU`E7&`Gni*VpAM(*p*N7}sLD(|j%mXl zbv3UgZ9IFB*fSn)8{k>tDZHC!DG2ksbqvKoAIhK)z7Q{n<IL%lyoDVp=)g~7e-`-q zA0;91vNvJLSi<)pifL<&rzXGs#ljYM^!n+Lsti#`k$qLE@MA1{alUYDB&%C|74`}{ z{<hTZJT^UQVDu$29pZ1EqkAZ>Zn!n$^^oJbOXrb}H>2L93ImF0i!6?BRnC@JiruDv z4`JJ>=rX)_H|Bd5R2U<?mcvFAju1j}*!IHi5x3t^^a3y0o{``E$c7hf{#KN4ANcfO z8wpUw&)#7AqV|I2OBPoY(09g{*rsOFl7a8~T}2SSLYvVHGD)gQiM|Sl@#`<?;Uog- z*rvp|H`uzOb|DY8<0kR&ht%SLzd@<~ZakiVtJ@O(^N;9MIWx`ki8M*^DR4MO|ADJx zqNNHO&>6YsM%swTOOqFYM~`9TjIBbIQhZ=Fi+pElM0dKgr~+-f>$aZdxY;4plVoa_ z^<&u%5ErogEIa(p=nmV4Q1wb_onQ=_kt0~m0nf_enUv!rEr-)tmi%sJhd=*&j+JMW zI*=pkzvNhOhMj-6cYgFiY<Sl3cIvKp5qHytlf|t2Si_&8tMJ4&C`#no@F3C0Vs@A7 zKPN-MW_GYuz;hX9dE&XCGlUnAu;kjHJ~;SvI>q!;Rpwu^uugf_FbZ)zYq-zZ%IlY_ zo2ano0DFIlSy(iTITk1RxR;WlVR5m^zwLW@VCU{*lZx*Mtq!vE_lAdx{oo_BqoXNU z-oCd<{%uedoDXJ4erqqQc(04VO=HjB8|L-JYiLVxa;<W{^$@e@6}mSrvK>;(_PeHH zW-8_T-{5Ar1OG{}b?X#Y05&EEWWy{DR5L^<83Ly(8L*dq4rx(JtF;*mz`~;ZOdj<O zyDf#gCiFpdN+pM25H6iyICUFMOs=H%`pTE~uq8`d3+Jw~ua|nZ7?nmwnazAua#4IW z^6R&E*DQ7BI}Qp*Ay~;(O$qVDfT*Ex1?@{NF!d|dr&e<AkIa2pFX8SkmcGnnx`3xT zikuO0NlRvC)Gfo1G!?V=7AslS+G}o@QoNz?zEYT4_at_DS$n<(d%SFqup^7@T;ASj znyl8E*9FTsOBpNc*X*z5!NQ$h%(JAMaO*3URublRYUj(j8ihv*B{O|x=qcTP=;kBY ze_t@k(eb%tkdO3cx)p`qS-UZO=%4d`#YUTi=mXvL-E89uqg$shalm33aPs68Hrwt~ zD~8$iXtn*tak={oWW^4s`0`VwBT%y%JfemFQ+=pYK3?%{?-x))ov@5$tU90n;d=Cg zJUvNaVxlCcO7~KwIyvcUvR(0(LyIyb<3-e>i^v0`is68>8NVAxTzc)sP{^>W;Q*SZ zoP1D@6Jrslgn5)e=U6^TTaB@b#2J``htMdzz)OPpBrskT;9dxPst6R20Hya<)5~xk zVk1ET%aruYrt~1Br(!QC))gUv1}_Bl%~i#1#@HsgheQ<XG9ZK-p`;spn2S>0PSf=R z3s3X<Q}(zD^TgD+8)h7lsJo|<S>J(Dqz0R@{aE(OniTyJSn^`rS#yldcxepVxyG~g zHjUH5X8eXsMzn%%zIf#gEtB+fk!&-r8^h|?gb2OgVTQGx-RwVmsT(ER*r2tduwxrr zyLO=41Q;4kCix#**<Wkt2v@hV=^q4#WY9()jECO9V*LP$wU9S&eW3%9TiK2e+6#Bq zvTGmA5vnuT$PZKf2ardkT`j$Qnn0W7_KIdzFp!XQy~GZE7%fC+u=^i|3kyawzjghD zn9*$Fx}L(I<80$Pe_{Q5?9jRxVN(YCdtI-XO&QSj2KcNS7I(jXT{-09-bLxw!7mwY z!H|w)4{#KtPs{RR_UcFN+57983z_e-PuH&%-dV*GHpB|sN3eMt`un$osh~8D9$vvk z_;rPy*0_aB*!2x<g^S0S(?>0Pa3g5fF&ptEMP?G5toSa1PKOC#<B{aQ8x&Q9`f>QD zL9<WG4f&|Qmylh=KKjU<|9T;6lLorWlU2e;9;*^2d6-H>$OBX&QchF|7rDF2NRT5{ zLX=ypM7kWP5*e~lB{F4KmB^OuR3cV>Y*FgWk^i#fE7TlWR#;L2R+W5BrI!$0t<oh# zA64nKM3<`cMxwW?^fsb5s`O5xSE_U=(MwdijOYa_T}gC)jvB$?%*m5fx`yZsm98Z^ zRi$O36I8l^=opo5BsxN+$z+!ORT@w2IN4pL6}O+gN|TdNZhV`1F6Emj^1B*QJc+d` zt+@FvsB|pJRH}3W(FawUyoK^kl_oEtyhWvRh+eDGbBJE5(gj2psPqz|&2KB?FO^Uv zTTNI?^jMY7COTcEw-KGB(mRQcRq0Zq+pBaL(I%CyB-&G@tBH10>5W7`U99xJhR;W& zL5<W>!cCQyiLO!U2BND}x{>HImFBQ*%X?JXk?3tI?N0Q1mG&XJM5Rqc7pimw(Q`oO zV-6xIGDA&>B|1~3={bcwT%|>#MU_q`x~EEK5FM$~nM8-FbT-jGDxE{Lt4hxy8jqD| z?h8QY%YPRrktLKMt8@v`*Hn5f(bX!wk?5l;y^ZKnmEKA8c9kwAdLz;FmJEG}1PAXF z<#lL!$3}9(>);zUDX-(qgB0Gt8;BQ>^-N`DD}@Yn2d7x&j8r)p;2c&tNh$|!P5n12 zC#ry@Z#Ly8sWB%t-GuZWD(9iP$|JyuR5`a)4jiTWRx0NQl@kk&zsfnHauUF4u5!Lo zIU+cYDrbYr$p*(OUu8)uD+erBl`~i6%mGJGIa3vmvfF_9&nL=o3{{!5?SLt(%y^Ya z`wy5`Rb~V+wc*tsoMVvL>st(N73*IfRKwOYX-hj{+4|k3TfP>A9}euEzBQB=y1vIs zw)G9|JRg?ijI}mfGVGD!<c)M)YcG!hL%ZzDYuJNrZQ5-Zjn7ny{zDDGRcsxdT==AK z9qvSN$ITINv6@3uv&!J7G>>KjJ_{CxjAk=Gi`H*h^P-^6YuJ~cy%n_4m4}&SsK-Y< zC&jEIctNBrC96!dtL$gkTj*5A`hNaMa9_uIeUT=d_>ft@=q4PDV#mKYE|h(+d)fB0 zJina1x+7T7FJ+5&v=ADS*y<e#j%Oq~#<wN0pLX~-eyT*4B<+5_L&pmXJ2KtQ)xxb@ zwqd79unl2{cg6`rXYPKwv$dcPUiPASJFF~ncbI;xKYB=(uZMig$|md%Y8R7;BRwfB zmYA?2zU-}>cjQ&NV}(}<H%HB@6#XUpVs~293~((bY*Ik&mj6D64Y&N+aq-{cdN%9z z|Ke5JvmIU~FP5;!UZ`!!`t1phy}q<5r+g123KLb|QYtkL&Lw~ON0l${dMVm_Df@U& zH&-thqjYXdx|<8xFMB$)83B>tSh=GjsTi-{N+=XJ@*@spUg4{mUvZWH^a~dIO;$qU z7vw&&uT6ZA)XcHKP`F#bLQaC)mL5+I4;w2=OUg(ItU7N<b|39IKys~3kz5UimE|^L z-`?!%H@<?YH+%3+q_FpM7P0qGkGt`7fvqLua}ZA*z(;2y-mF5mthy_o5yus8?%}I7 zsv(mK!}l6CW7l|Q+Sg4e`iza<7cBhPlP%cys&JqcyN1}BR?N09LilDIYf~C7-0i{A zOO3)?+t}37cZ5H;vUB@GocqSnL>US@+1RuFeyq#>4+XuFB24JP{@H&<@aVyg9cUNz zJCqtNfLV@@7pWQWJ-Rw3wL6Z_>AEX(?P7;l$CBcn=a<$y+Km3OEaclRet*E4(Of9P zJLJ@@D}7|cs|=^Ru{Xc<7b;@e@^6hjY9UoO2MJU<J~Ps{R%)|9qKt2X*(I@HA?If( zjDm<fODPg_3`3!a{q${1Vc10W<lDq3T+9`f;$GDGFGuBDTmGA)GG_}pDuwke*n0;} zT|b1uqqX!akWeR8o1Ed&RXmj+s9gEYmx9kN@Kl=I#xAF^xw4-=eU@)9x9>#LU04rl zY4v@e(;egV*_+XTR$>;tubI7>&HgSZ|Em{5a!-)aWy;-0`5M(n`D|$9sKnuT4JrX@ zYL|yP)T>V9gFZfBJvQw7%TQFL=*mg?_Ac6MIwZXdWwnO?u}Qsvc7ZoVu0j%RJrNW+ zOmgtPWz~odN1o{9ox9k~Ljhd{Lm|EQNfPfU;x@CAckaLQ&Ul%ZvW}yy5$&Qid3X!X z%B?hcymNR~_WiH&&bwIIp^n1)L9F3Wg5bKDMI9c|?X@7Pl^&(zC4<sesc~b&a_BR? zIT(!ue4+O$-b9r2##y)vVt`KYVt0M@YIf-GYr^#`)~0NZV2NUz%i@Je1?-oy6yZ^4 z*5OFFuxldw=tx)g-jU`)wo3MV&ELKhFL~`A`&wIG`1B37WLp2w4VloWnuSdpd#Spy zSAd~y?86tbd(+^?7|61xhYF(y?zT=(<z259P^(YL_wit#-I~!yuokd3GXw1>pnNuP z<}~|R&{Vd6=24+{KFfc7iM>-JDt39JfVV#ZzYJTR6XM?&{<9RzaYy=eEIrY1=$V-H z0B1{?%^2L4ozCg*=l}tmv9=9!do#fCN)AQ7ZNsA947M+Tvz%qT`IG%Ue1FDz<ZcpN zGj{)!`+*?Dr?a_vff%h99oUr1w&aC|Dc`G_!+cc7uJj|6a%!A@4EEev<KgFp>Wq1x z*^UGXev??x_pMrwX+=Fu`UiJTf4sL=?<K#M+oaO+B9OE|@U(X4Yawje_bzU`SD>WV z#4NlS++p_$w)OjPVR{xj^L>Qr^Khl4XV{)y?dY+Sm%Q)I7e!UQ$-K(LB0dENCCZCl zq|~ROiJjy0jX2I{wGp$<VOwY{VH3+kjAJ$7m>0tGksS6xd1ql+39Bj(NW6jWmsl1= zyK;GWlO{FV{lG>84PSmJ$33iRc|P>9Eh*Y{KRs@fccU-3RT>6LuD?On*B$S^gtE3r zBTNpcS(LuF{GvVUJx?AF9L1^bc;_<QF_h&VZ5P;=vX|i{`r~zbv4EfxzEn9px>nuC z%h|U_(|pPxQev5p#suT0*VLAYA<T5lug5a<AsO!jPNF26v2_R@BKL)>+EtAQAzqIc za?dD2y^kYFERM!w!{3%M^RZsSr_0!$V<wX)9t)Be1b5If^DHroUa`o5gD9ogzS^Q2 zy^Q^RY^pFbhmAYly<^+gn+Csb0|tPe=M@3t;x||^(hG6wig@=_^{ubAvZKel_S+Fm zJA59T`FpTjRcAgfyGHr+Oxn}sPXX#~r|y}7c)o}|vxRJZks$lcga<t-VVjkfc#W8) zys(c~B#X)x!RHG{!&!ML(on1keeg$TB9bIL#XBxtq^~#W6>6LDu8AG37_Z+jgX&XG z>9#XiWTih}&C)7^-1lI`BuUQ_4J#|exGp>1WD6=IjN}edK3O4`5505hBnJ#)`zxb* zb%8j2siGNdIwRsxg%ldZB>C)rXiRk?n6k8mW_j*h`M_o(%sKK0(^;DnUE-bLA%oBt zSe=v$uWDkK?Xq2oKFBkvAIIJLN)cME34Npry_>_<orn^~Y+`3mL<+$<j6d0iKhN5o zOm4aJ4RX0Vl6uai3Q+R=`dk#e<_#vD93uGaWIvq@NC<~mQg`f><~;g}X4fY++aUp& zU@bb}+uSodkQLqq<Q_rk?;|~pKJh#&T0I@)*2OIDRLI~l0m|IEVdmsfuhF=+r6MpL zMFUASZTL7HndcyqR@_l4uiJ#lefho_ef?^Zn`h(M0*MV_+fTLUx3MdytW#c3gW)$# zEIOapOnz?w=&_>ZaqU}uv6~wfH2s7@(N)oqRHJ+mmr7r1hAYOKGF5MM*R_D0`l!Pf zT8oaf6D?0_XMaa~Z!>V@2UA&9Rm*l)rczh&Az`I2^s`c4j5yvsy(pJ8C48kOWKU(y zPj~T~3%2@30rsQ#6vJ2W#pBl%<PP7m38&i%r;C~ObRXeX4Lf(*bHLde6b`%A94|`O z#dZ+AYFKszgJd(xKA5P(bnaV6!C{6kXz}XW6ENv9Ewrt({Lz%Anq)SGbvn~$%;76^ z9NVzM*Q$jrYFU3(9t}B5tS-+*?xf_<(<I=*6y8|G;|HeH7LS5crDLdvUY#3^SpA5( z<KX2LrHc@{bD340@oDc0{=*uvrA>TzL$sY0VOivb%f}~EJE&CoDoACpKKEwsXT1gM zW!C0wpx{x&QqG?C`+O1!Dr>77$Wo%}oh9A9xh%4JcwkTPnw)!;S;{h>BuXO^=+{1A z`)sO?Zf=30C;x^tRh8tcS?qRor0ZwkSjUH8yXcn1LeIUKeh@`tN?TYKMsS9rXqbiM zLHh*rw!vm>W+V${o}s9v!heAGpP-6*MNyPzM=<i_I}_0u?2yVgiWAw*b3Hvv;kFko zkDlkb$|+b2(6vvevY7L&g_&M#-1$z;OIM)H!<2V|zJD8z`!E;>=lunTiR|F{#P;5! zX{wvv335_nnN9BmeRiF6P<<yT<vMHgLzK|v8XNaRScl66Xzl;-PLN^pe|slr;}2}d z5AB7$SJ|~6dWu_cg*>3HS&@g2R&LRkCP!Jc9YyO%Uip~3Qa0h9j;eL|>4JJ6^;+<9 z4ay_Ov&;*TGb%eOuZJGMfFvYg1I31Uy#>Yq?*yYAFsDSAQkLJbL+Jw)2Hp<Ghxm`4 z<DrkSMRP?YqTCif2c#{0p7MVo{>Pj3RS6@I(?q{@A*H3u-)FED7hANme20Exg}SKw z+~*T;(9vBRzp0|gN2>Cfa$yGRb<rnoTE_n%u)i6KkjH3wBb)N7bzw}-Nse;eI@ISY zgiN4naa`!9jilwb$Ag``=r8=13ASf5MY&I9vc;Euo4Hb%Ul6~AAiu_9FQo_rRVgJU zGLwCB*=V0Gula*S*vU)IE=%%BT|7_9okp=6mx6_=@l3cJ>DzXpI<<J<fd|QD>H`mF z+3pWE@UpLvJzl9=u_V&Qqv}AR!+4bK&97wpFZX8+LPGu$6AT0Vt=4dxhI=)v*6=3{ z8#J^JR`dC3*j2-!8fI&_NW%{`+^OMF74x|Z8snyh4>WWLQ7Z`4FjB+ytyKO}4JT>* zIT|j}aHWPDHQc4)Q4KFCh+(~>F&yyCIQ@lb7^`8bhEp`0r{NL}H)>d_VXcM@8ajrl z<@#vYO2ZgJ)X(+O7=tvNq+yPR`1K^EVef0WQNtY?9?<ZphSeJWq9J|IPk+xfbjR2J z^cM)E`ngDrF+jsf8qU+OM8nTDJfLB<hIcf)ry-|JjjM+K8g|sMmxe=I@ex!%H%VhG z(r|}{N+SMC4dL0(LU_Q<E?fe&-BVPtW~7Q?^MNaZTj=Nfq%w|n1tLq3SJSVH(KeQb zV8R!irpDc`Tpi?;Z+faqlPyDkWlz+&lN#kRwKy#x`g7OfI8l|qosU&{4=tXq#jz`? ze_9;dobtEvkt&ZfkpA+yNR5HxU-_$TR0aIBc#altsl_=hPW}b@+xbwH4^*REwiXZ4 z;u{~R=_V~6sl|hp_%Q6mf2#s>Gy#(qpR2_=Ej~|+*WOp<pK9?MEpF4|rMn;9$~WvT z`zMhPeSN~j8Iw)sH%wW%lP8!bn{smB$eKKBmML%6Yp+i=wGN*#t2G<`a4c(h=*x~g zyuG`!(N*yHY}P-cx^`qKyCzQCHKt9;EFHV`cvi@@XuQ>pDx%Qw5ePefQtNvK^y^BT z&3h7MA~8}AJM`bf#BnDzG2U6=oTD5f*|jGo!SPl0{K){HCfS@eIG_>kYO-r;dEP(Y z!(h)DVw!P=D3_cj$v@h|bU|>up;pd&I&z+q5QaD7JYyU<&nWjC4^JTRkeA|3c|CCW znGjy&H8^mFNhlxrs1Dvi;2eeu=^nn$obOQ=*6pcBKINrOhj-$*g9ss7y(dB6_*eXY zYU|-hjyr+SRG*Vx$LV9dIX&;mIplC$Q@gfy#^Q?NxOkLJ$h!)hEAPSaCRg^xv*hMY za(8-goO4f(8*_sle-;?x_$tTI8?NdQIAaK$&}DRcsCy1?&vW(@*_~&7lpfH0v`gf; z*AQlFJ#ZqCJ}+BQhqONsc*qiZcsq06liWD(7(M4bv_*qQt)W_9_Hrf`_r+UucqMhE zR-T8mJ?D&(#2hOlPNOz5nH$JuCUe{_O(+ladW7>$nOfL$Eig?jq6{xGJ;h5NZTi2= z69+mM;rtEu;CW!zrqP%uV&$Zut^bGq>>Gen1;M?RkPWZ}lGaVL1&TCqIT$###m!!{ zn0lT&1apQEg<SOK8|28j$GCIuxvtgDWsW(`G9A(>e=O2i<Wrv>P0gvnbJ{|f98DP& zWr~6_ky1qokxg=aU*WjX2x+yeLio08mh!yMs#LxWD|+g)Dv$5<VzSrI<hU~k*|n?o z@?D2EW#5p?ac2>-YpHeU1L>;SmGhnC!uiI49%^i8QSDJi^2%a0W5y8{*DB)Nqgfoc z9PzJf*G%O3w&@-o4x9(_c|_6NI%hW3GZ>rAEQIMltND0kt+-&VICF-fSSw1sden=$ z@zX_0-MPlHHLe057UH2uJ3-n}k!~vW)gnwI!k8OtrU`stpgIH2oO7<KGc*H_tQ_YL z$3#`_nq30l3X`_xM-2r#zAMFh*zrMYCfM<Pd(c!=qM!V~l+VNH$Qff?IAc_k%D(JY z@z)&p9YS{Pn)7yi$N<W3!p8ppl;`w*7?KE0L+%^w$Tc5s;F`y{am{nx%3O0?GSR4B z_I#@_-$)lODCScxh+ogSH@MZlR2pmpsH+<97_FIO&qusimb*@K+z$vwZJ^Z&b;H17 zaAPpIxyIUmPw2Tb9M^~tsmad)?OV-p=}r0n4->i-GW7`BgsutF@$auWqT^dSQoACl zU6BIslkXeejPo|fVA)4=-h3qIlCHKq2Xn&*xpF~f4=!kuJ9=s0f^vNtylOqGo0qw# z`$p+Glld*qG-)<x8lK0QV$7V0&*6M)y{nC7S{Zn^N7<RM?bP50M4SG4k@45SadS+( zp^3+3fVuG=emC_0TF3v_I{yFDI&Q6~<o~;Mye8L)uURwJnSbn*%nvi?;$O<XA{TzX z{l1;<d?dfuo!{6jt>k^xc>DhZVO&qxH^_(o#ePD%z$eg(O4U%*FiFD%4SQ-Bt05M> zk}pC-lZNgZavJ`<=3D^ZDgOemidSn`rD3IpM>Q<d@SukH#89bbr-mCfT&W?>K_$JZ zeEbZW5?`R9S;anFw#LZRFkQn04P!No(J(?ocMZ8UivxM@HjSKGeuIXxhP4{jXjr}G z9K?gSY0<SB7HF8QVa6KYAbxOuqfMRM1`TBmYc;IauvEis8m`sQM=NfL7N4VGwub2% zCTJL;p}UH$ID3up^tsv)S;I07w-K%}P2>IHcNn}T!^GEgsI^mvBF9l15+@ZS<F$Bi z6>sW*(75t7rVu{CxhdN(x_#3^_*T4AhN~(kuK6I8e<R->8+?^J9?5y&JjZv`F~C+W zprGJK06p;3^lyKj^91H2EC5{q+=5U78N$`@|3!jc3oOKi%?C8$WE|PqptFIw2qa$u z{EBGYEqAuWz0Qg|$x1{@fWIPCgSLk&J05}P1mH-G&H?V$=sm#O2sMzAfx&?scfpV6 z!hk<$^aWs+iQ{fUW<BtG1S<0=kPpV)xJ5oH1CfkC2_mpgqkjhqc+Mo^^Hh7_dIahL z;m|O3v~qy-fXyGjoirA>6(I@qPT+ZjbkG-oF1R&ifOZA$M#urZ2Y4<Y|Bc0ub6o)T zXiF`^4|W5eAW&sbfxB>#+DI9It{rf}p@M*i5Nbh}0ef`BG*MpQLj;=2M&M&yequmB z1<sCCM`#Z4Cj>M2`L!VC;x@Aukp;kI2+BMGe~iXGAAGV4uXKS42O4e#4juz_7=iT& zB>y{ba5t5o4qU9!4ZzbqFa^k44Q!2@5r+;(0_VqL{v!}s5YKU6B2Ym)fkC}6?BJV# zy%1=GmH@XQkPKm^MiaXARx36ElMu2YpH7I713CxzCW0As0dO(G9MFZm^I`HNK&ufU zT#i6Px(&E25tV^o0=z&0%0>7P;UM^pKqE{fYLEz=mxNmy_$z@G2&7LJfR7MJ<|**& zWK5wKrV<$24`t?~P$I@6PzJ)~xJ4yEs08SN>l5+e#o<P2bOvx@3VH*XNx=CCRKWt^ z9t3JA;qMwv7&1WR?*#sZumyQd1F@tRreQu2DFBuuP(eq5euGrnAGjBR3M~aD4OT~p zFlGpRv&c($7s1h+=Nf<oLy;Fh&Pe#raE@yaek1VGNK7kexT4S-rF{5xGC1zD(W=I7 z2im=&_Shac7oh+d766Bg!72bf9N1$l$885quSFM*LkB<;ZbBek*#d0#s#*@&&EW{t zu?Qg3_$z^by^8t2h8-<(JjX3Us6{~qK>Y;t473U8JQ1z+!IA^E%2F$i0On1?Pve6> z2Y7O_+JP$IpEE#%-vD%Z9iv2ffl&z5h#26v2(&~G=7U)FCcJtODgn02RTCnB&1RwZ z;5z~rBT$AyU^NBk1mQt58U&e2U_&0}95i9!Y_+nDz{zi+1K7l}fg9$iqqPwjJ{R*( z+8zPoCIZbd;nsP$k0Zl2;0*+-tQHtNUo8|aM6Rbs6S^)`b%L-6fy!9|EM257W5Ra} z)J<#&aM5CDKgusyjP-ARTis#_TU$6T6A6*P9SAc(?*v8{s&ou+zeXPfCKaiN4dGpc zLdZ7&*IF?upb4ifLytkv0Dgo(`cVaRT~3|B{%-;?{C&(fgb1w&G*3H$84SY;K4Io6 zY_XsTZz0gA$iRK8Rk{?|h(LJ>zyAQEh><u7eC0#*81z`+fpzFG=!3u>`RlPmA<`4r zb|dB+bbH`h1Ud@{H+`b=w*YmU)XGeR2umPe3*5RH6@xAV=50ZK&~t!?5U8PLK#%|6 z5Ch*6_!$Bn)A`#$%-;z&Arck<J-$}w&lC6%p%(l`V8(t78P+smw*xruLB|3m1gc;u zupWVW|2xqKG1QQ;{|?Le5Vl*;g}@UCv^P`%TO7vxQ^fNy#|=cFf>MDS5U8aafj!F9 zmi7d$K82n^el0M$N>w@$cn{$q_<sYlPpgd})SrQa0{mEDLN&)d1x@IE4hoEA?{h96 zLxc!zAZ5V$7qJwPumJcFfvg?ZOIQU6wESv;Q!cA?Hjul*aZ4dXcm`n`=xQMSoIQ;M z_an-_iqQq1um*t?kg)SLoUZvuhyhW49VZ$I0lWXCZc06YZz22+KAiR3hX{&d0?A!F z2aP5qcP!C_<iu=`h7yt!l=y_?v?M;^7>!SuPku%cA|xLn(S+m*B%1KFMw2^`{CLDC zw5wHVcVLu86Q*f2VYWsSF4kzm&s4e}TwLTtQVSw9{;al?FcN_(BOIvF=|J+15TCG+ z=zZrp^4)kC%RXBjBl)48m7A(J)oiNW)VPV;oV__`^OEgrx3BSen~&I+^frIc?tcNP CAY45F diff --git a/pipenv/vendor/distlib/wheel.py b/pipenv/vendor/distlib/wheel.py index 0c8efad9ae..bd179383ac 100644 --- a/pipenv/vendor/distlib/wheel.py +++ b/pipenv/vendor/distlib/wheel.py @@ -684,7 +684,7 @@ def _get_dylib_cache(self): if cache is None: # Use native string to avoid issues on 2.x: see Python #20140. base = os.path.join(get_cache_base(), str('dylib-cache'), - sys.version[:3]) + '%s.%s' % sys.version_info[:2]) cache = Cache(base) return cache diff --git a/pipenv/vendor/dotenv/cli.py b/pipenv/vendor/dotenv/cli.py index 829b14adc3..d2a021a52b 100644 --- a/pipenv/vendor/dotenv/cli.py +++ b/pipenv/vendor/dotenv/cli.py @@ -1,5 +1,6 @@ import os import sys +from subprocess import Popen try: import click @@ -9,11 +10,11 @@ sys.exit(1) from .compat import IS_TYPE_CHECKING -from .main import dotenv_values, get_key, set_key, unset_key, run_command +from .main import dotenv_values, get_key, set_key, unset_key from .version import __version__ if IS_TYPE_CHECKING: - from typing import Any, List + from typing import Any, List, Dict @click.group() @@ -104,5 +105,40 @@ def run(ctx, commandline): exit(ret) +def run_command(command, env): + # type: (List[str], Dict[str, str]) -> int + """Run command in sub process. + + Runs the command in a sub process with the variables from `env` + added in the current environment variables. + + Parameters + ---------- + command: List[str] + The command and it's parameters + env: Dict + The additional environment variables + + Returns + ------- + int + The return code of the command + + """ + # copy the current environment variables and add the vales from + # `env` + cmd_env = os.environ.copy() + cmd_env.update(env) + + p = Popen(command, + universal_newlines=True, + bufsize=0, + shell=False, + env=cmd_env) + _, _ = p.communicate() + + return p.returncode + + if __name__ == "__main__": cli() diff --git a/pipenv/vendor/dotenv/compat.py b/pipenv/vendor/dotenv/compat.py index 7a8694fc6e..61f555df93 100644 --- a/pipenv/vendor/dotenv/compat.py +++ b/pipenv/vendor/dotenv/compat.py @@ -1,21 +1,49 @@ -import os import sys -if sys.version_info >= (3, 0): - from io import StringIO # noqa -else: - from StringIO import StringIO # noqa - PY2 = sys.version_info[0] == 2 # type: bool +if PY2: + from StringIO import StringIO # noqa +else: + from io import StringIO # noqa + def is_type_checking(): # type: () -> bool try: from typing import TYPE_CHECKING - except ImportError: + except ImportError: # pragma: no cover return False return TYPE_CHECKING -IS_TYPE_CHECKING = os.environ.get("MYPY_RUNNING", is_type_checking()) +IS_TYPE_CHECKING = is_type_checking() + + +if IS_TYPE_CHECKING: + from typing import Text + + +def to_env(text): + # type: (Text) -> str + """ + Encode a string the same way whether it comes from the environment or a `.env` file. + """ + if PY2: + return text.encode(sys.getfilesystemencoding() or "utf-8") + else: + return text + + +def to_text(string): + # type: (str) -> Text + """ + Make a string Unicode if it isn't already. + + This is useful for defining raw unicode strings because `ur"foo"` isn't valid in + Python 3. + """ + if PY2: + return string.decode("utf-8") + else: + return string diff --git a/pipenv/vendor/dotenv/main.py b/pipenv/vendor/dotenv/main.py index 64d4269630..06a210e197 100644 --- a/pipenv/vendor/dotenv/main.py +++ b/pipenv/vendor/dotenv/main.py @@ -1,24 +1,22 @@ # -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals -import codecs import io import os import re import shutil import sys -from subprocess import Popen import tempfile import warnings from collections import OrderedDict from contextlib import contextmanager -from .compat import StringIO, PY2, IS_TYPE_CHECKING +from .compat import StringIO, PY2, to_env, IS_TYPE_CHECKING +from .parser import parse_stream -if IS_TYPE_CHECKING: # pragma: no cover +if IS_TYPE_CHECKING: from typing import ( - Dict, Iterator, List, Match, Optional, Pattern, Union, - Text, IO, Tuple + Dict, Iterator, Match, Optional, Pattern, Union, Text, IO, Tuple ) if sys.version_info >= (3, 6): _PathLike = os.PathLike @@ -32,99 +30,6 @@ __posix_variable = re.compile(r'\$\{[^\}]*\}') # type: Pattern[Text] -_binding = re.compile( - r""" - ( - \s* # leading whitespace - (?:export{0}+)? # export - - ( '[^']+' # single-quoted key - | [^=\#\s]+ # or unquoted key - )? - - (?: - (?:{0}*={0}*) # equal sign - - ( '(?:\\'|[^'])*' # single-quoted value - | "(?:\\"|[^"])*" # or double-quoted value - | [^\#\r\n]* # or unquoted value - ) - )? - - \s* # trailing whitespace - (?:\#[^\r\n]*)? # comment - (?:\r|\n|\r\n)? # newline - ) - """.format(r'[^\S\r\n]'), - re.MULTILINE | re.VERBOSE, -) # type: Pattern[Text] - -_escape_sequence = re.compile(r"\\[\\'\"abfnrtv]") # type: Pattern[Text] - -try: - from typing import NamedTuple, Optional, Text - Binding = NamedTuple("Binding", [("key", Optional[Text]), - ("value", Optional[Text]), - ("original", Text)]) -except ImportError: - from collections import namedtuple - Binding = namedtuple("Binding", ["key", "value", "original"]) - - -def decode_escapes(string): - # type: (Text) -> Text - def decode_match(match): - # type: (Match[Text]) -> Text - return codecs.decode(match.group(0), 'unicode-escape') # type: ignore - - return _escape_sequence.sub(decode_match, string) - - -def is_surrounded_by(string, char): - # type: (Text, Text) -> bool - return ( - len(string) > 1 - and string[0] == string[-1] == char - ) - - -def parse_binding(string, position): - # type: (Text, int) -> Tuple[Binding, int] - match = _binding.match(string, position) - assert match is not None - (matched, key, value) = match.groups() - if key is None or value is None: - key = None - value = None - else: - value_quoted = is_surrounded_by(value, "'") or is_surrounded_by(value, '"') - if value_quoted: - value = decode_escapes(value[1:-1]) - else: - value = value.strip() - return (Binding(key=key, value=value, original=matched), match.end()) - - -def parse_stream(stream): - # type:(IO[Text]) -> Iterator[Binding] - string = stream.read() - position = 0 - length = len(string) - while position < length: - (binding, position) = parse_binding(string, position) - yield binding - - -def to_env(text): - # type: (Text) -> str - """ - Encode a string the same way whether it comes from the environment or a `.env` file. - """ - if PY2: - return text.encode(sys.getfilesystemencoding() or "utf-8") - else: - return text - class DotEnv(): @@ -331,8 +236,14 @@ def find_dotenv(filename='.env', raise_error_if_not_found=False, usecwd=False): Returns path to the file if found, or an empty string otherwise """ - if usecwd or '__file__' not in globals(): - # should work without __file__, e.g. in REPL or IPython notebook + + def _is_interactive(): + """ Decide whether this is running in a REPL or IPython notebook """ + main = __import__('__main__', None, None, fromlist=['__file__']) + return not hasattr(main, '__file__') + + if usecwd or _is_interactive(): + # Should work without __file__, e.g. in REPL or IPython notebook. path = os.getcwd() else: # will work for .py files @@ -363,6 +274,14 @@ def find_dotenv(filename='.env', raise_error_if_not_found=False, usecwd=False): def load_dotenv(dotenv_path=None, stream=None, verbose=False, override=False, **kwargs): # type: (Union[Text, _PathLike, None], Optional[_StringIO], bool, bool, Union[None, Text]) -> bool + """Parse a .env file and then load all the variables found as environment variables. + + - *dotenv_path*: absolute or relative path to .env file. + - *stream*: `StringIO` object with .env content. + - *verbose*: whether to output the warnings related to missing .env file etc. Defaults to `False`. + - *override*: where to override the system environment variables with the variables in `.env` file. + Defaults to `False`. + """ f = dotenv_path or stream or find_dotenv() return DotEnv(f, verbose=verbose, **kwargs).set_as_environment_variables(override=override) @@ -371,38 +290,3 @@ def dotenv_values(dotenv_path=None, stream=None, verbose=False, **kwargs): # type: (Union[Text, _PathLike, None], Optional[_StringIO], bool, Union[None, Text]) -> Dict[Text, Text] f = dotenv_path or stream or find_dotenv() return DotEnv(f, verbose=verbose, **kwargs).dict() - - -def run_command(command, env): - # type: (List[str], Dict[str, str]) -> int - """Run command in sub process. - - Runs the command in a sub process with the variables from `env` - added in the current environment variables. - - Parameters - ---------- - command: List[str] - The command and it's parameters - env: Dict - The additional environment variables - - Returns - ------- - int - The return code of the command - - """ - # copy the current environment variables and add the vales from - # `env` - cmd_env = os.environ.copy() - cmd_env.update(env) - - p = Popen(command, - universal_newlines=True, - bufsize=0, - shell=False, - env=cmd_env) - _, _ = p.communicate() - - return p.returncode diff --git a/pipenv/vendor/dotenv/parser.py b/pipenv/vendor/dotenv/parser.py new file mode 100644 index 0000000000..034ebfded8 --- /dev/null +++ b/pipenv/vendor/dotenv/parser.py @@ -0,0 +1,163 @@ +import codecs +import re + +from .compat import to_text, IS_TYPE_CHECKING + + +if IS_TYPE_CHECKING: + from typing import ( # noqa:F401 + IO, Iterator, Match, NamedTuple, Optional, Pattern, Sequence, Text, + Tuple + ) + + +def make_regex(string, extra_flags=0): + # type: (str, int) -> Pattern[Text] + return re.compile(to_text(string), re.UNICODE | extra_flags) + + +_whitespace = make_regex(r"\s*", extra_flags=re.MULTILINE) +_export = make_regex(r"(?:export[^\S\r\n]+)?") +_single_quoted_key = make_regex(r"'([^']+)'") +_unquoted_key = make_regex(r"([^=\#\s]+)") +_equal_sign = make_regex(r"[^\S\r\n]*=[^\S\r\n]*") +_single_quoted_value = make_regex(r"'((?:\\'|[^'])*)'") +_double_quoted_value = make_regex(r'"((?:\\"|[^"])*)"') +_unquoted_value_part = make_regex(r"([^ \r\n]*)") +_comment = make_regex(r"(?:\s*#[^\r\n]*)?") +_end_of_line = make_regex(r"[^\S\r\n]*(?:\r\n|\n|\r)?") +_rest_of_line = make_regex(r"[^\r\n]*(?:\r|\n|\r\n)?") +_double_quote_escapes = make_regex(r"\\[\\'\"abfnrtv]") +_single_quote_escapes = make_regex(r"\\[\\']") + + +try: + # this is necessary because we only import these from typing + # when we are type checking, and the linter is upset if we + # re-import + import typing + Binding = typing.NamedTuple("Binding", [("key", typing.Optional[typing.Text]), + ("value", typing.Optional[typing.Text]), + ("original", typing.Text)]) +except ImportError: # pragma: no cover + from collections import namedtuple + Binding = namedtuple("Binding", ["key", # type: ignore + "value", + "original"]) # type: Tuple[Optional[Text], Optional[Text], Text] + + +class Error(Exception): + pass + + +class Reader: + def __init__(self, stream): + # type: (IO[Text]) -> None + self.string = stream.read() + self.position = 0 + self.mark = 0 + + def has_next(self): + # type: () -> bool + return self.position < len(self.string) + + def set_mark(self): + # type: () -> None + self.mark = self.position + + def get_marked(self): + # type: () -> Text + return self.string[self.mark:self.position] + + def peek(self, count): + # type: (int) -> Text + return self.string[self.position:self.position + count] + + def read(self, count): + # type: (int) -> Text + result = self.string[self.position:self.position + count] + if len(result) < count: + raise Error("read: End of string") + self.position += count + return result + + def read_regex(self, regex): + # type: (Pattern[Text]) -> Sequence[Text] + match = regex.match(self.string, self.position) + if match is None: + raise Error("read_regex: Pattern not found") + self.position = match.end() + return match.groups() + + +def decode_escapes(regex, string): + # type: (Pattern[Text], Text) -> Text + def decode_match(match): + # type: (Match[Text]) -> Text + return codecs.decode(match.group(0), 'unicode-escape') # type: ignore + + return regex.sub(decode_match, string) + + +def parse_key(reader): + # type: (Reader) -> Text + char = reader.peek(1) + if char == "'": + (key,) = reader.read_regex(_single_quoted_key) + else: + (key,) = reader.read_regex(_unquoted_key) + return key + + +def parse_unquoted_value(reader): + # type: (Reader) -> Text + value = u"" + while True: + (part,) = reader.read_regex(_unquoted_value_part) + value += part + after = reader.peek(2) + if len(after) < 2 or after[0] in u"\r\n" or after[1] in u" #\r\n": + return value + value += reader.read(2) + + +def parse_value(reader): + # type: (Reader) -> Text + char = reader.peek(1) + if char == u"'": + (value,) = reader.read_regex(_single_quoted_value) + return decode_escapes(_single_quote_escapes, value) + elif char == u'"': + (value,) = reader.read_regex(_double_quoted_value) + return decode_escapes(_double_quote_escapes, value) + elif char in (u"", u"\n", u"\r"): + return u"" + else: + return parse_unquoted_value(reader) + + +def parse_binding(reader): + # type: (Reader) -> Binding + reader.set_mark() + try: + reader.read_regex(_whitespace) + reader.read_regex(_export) + key = parse_key(reader) + reader.read_regex(_equal_sign) + value = parse_value(reader) + reader.read_regex(_comment) + reader.read_regex(_end_of_line) + return Binding(key=key, value=value, original=reader.get_marked()) + except Error: + reader.read_regex(_rest_of_line) + return Binding(key=None, value=None, original=reader.get_marked()) + + +def parse_stream(stream): + # type:(IO[Text]) -> Iterator[Binding] + reader = Reader(stream) + while reader.has_next(): + try: + yield parse_binding(reader) + except Error: + return diff --git a/pipenv/vendor/dotenv/version.py b/pipenv/vendor/dotenv/version.py index 17c1a6260b..b2385cb400 100644 --- a/pipenv/vendor/dotenv/version.py +++ b/pipenv/vendor/dotenv/version.py @@ -1 +1 @@ -__version__ = "0.10.2" +__version__ = "0.10.3" diff --git a/pipenv/vendor/jinja2/LICENSE b/pipenv/vendor/jinja2/LICENSE deleted file mode 100644 index 31bf900e58..0000000000 --- a/pipenv/vendor/jinja2/LICENSE +++ /dev/null @@ -1,31 +0,0 @@ -Copyright (c) 2009 by the Jinja Team, see AUTHORS for more details. - -Some rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - * The names of the contributors may not be used to endorse or - promote products derived from this software without specific - prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pipenv/vendor/jinja2/LICENSE.rst b/pipenv/vendor/jinja2/LICENSE.rst new file mode 100644 index 0000000000..c37cae49ec --- /dev/null +++ b/pipenv/vendor/jinja2/LICENSE.rst @@ -0,0 +1,28 @@ +Copyright 2007 Pallets + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pipenv/vendor/jinja2/__init__.py b/pipenv/vendor/jinja2/__init__.py index 42aa763d57..0eaf721499 100644 --- a/pipenv/vendor/jinja2/__init__.py +++ b/pipenv/vendor/jinja2/__init__.py @@ -27,7 +27,7 @@ :license: BSD, see LICENSE for more details. """ __docformat__ = 'restructuredtext en' -__version__ = '2.10' +__version__ = "2.10.3" # high level interface from jinja2.environment import Environment, Template diff --git a/pipenv/vendor/jinja2/_compat.py b/pipenv/vendor/jinja2/_compat.py index 61d85301a4..4dbf6ea039 100644 --- a/pipenv/vendor/jinja2/_compat.py +++ b/pipenv/vendor/jinja2/_compat.py @@ -97,3 +97,9 @@ def __new__(cls, name, this_bases, d): from urllib.parse import quote_from_bytes as url_quote except ImportError: from urllib import quote as url_quote + + +try: + from collections import abc +except ImportError: + import collections as abc diff --git a/pipenv/vendor/jinja2/bccache.py b/pipenv/vendor/jinja2/bccache.py index 080e527cab..507a9b3dee 100644 --- a/pipenv/vendor/jinja2/bccache.py +++ b/pipenv/vendor/jinja2/bccache.py @@ -296,9 +296,8 @@ class MemcachedBytecodeCache(BytecodeCache): Libraries compatible with this class: - - `werkzeug <http://werkzeug.pocoo.org/>`_.contrib.cache - - `python-memcached <https://www.tummy.com/Community/software/python-memcached/>`_ - - `cmemcache <http://gijsbert.org/cmemcache/>`_ + - `cachelib <https://github.com/pallets/cachelib>`_ + - `python-memcached <https://pypi.org/project/python-memcached/>`_ (Unfortunately the django cache interface is not compatible because it does not support storing binary data, only unicode. You can however pass diff --git a/pipenv/vendor/jinja2/debug.py b/pipenv/vendor/jinja2/debug.py index b61139f0cd..d3c1a3a875 100644 --- a/pipenv/vendor/jinja2/debug.py +++ b/pipenv/vendor/jinja2/debug.py @@ -365,8 +365,14 @@ def tb_set_next(tb, next): # proxies. tb_set_next = None if tproxy is None: - try: - tb_set_next = _init_ugly_crap() - except: - pass - del _init_ugly_crap + # traceback.tb_next can be modified since CPython 3.7 + if sys.version_info >= (3, 7): + def tb_set_next(tb, next): + tb.tb_next = next + else: + # On Python 3.6 and older, use ctypes + try: + tb_set_next = _init_ugly_crap() + except Exception: + pass +del _init_ugly_crap diff --git a/pipenv/vendor/jinja2/runtime.py b/pipenv/vendor/jinja2/runtime.py index f9d7a6806c..5e313369ed 100644 --- a/pipenv/vendor/jinja2/runtime.py +++ b/pipenv/vendor/jinja2/runtime.py @@ -20,7 +20,7 @@ TemplateNotFound from jinja2._compat import imap, text_type, iteritems, \ implements_iterator, implements_to_string, string_types, PY2, \ - with_metaclass + with_metaclass, abc # these variables are exported to the template runtime @@ -313,12 +313,7 @@ def __repr__(self): ) -# register the context as mapping if possible -try: - from collections import Mapping - Mapping.register(Context) -except ImportError: - pass +abc.Mapping.register(Context) class BlockReference(object): diff --git a/pipenv/vendor/jinja2/sandbox.py b/pipenv/vendor/jinja2/sandbox.py index 93fb9d45f3..08c22f4f13 100644 --- a/pipenv/vendor/jinja2/sandbox.py +++ b/pipenv/vendor/jinja2/sandbox.py @@ -14,10 +14,9 @@ """ import types import operator -from collections import Mapping from jinja2.environment import Environment from jinja2.exceptions import SecurityError -from jinja2._compat import string_types, PY2 +from jinja2._compat import string_types, PY2, abc, range_type from jinja2.utils import Markup from markupsafe import EscapeFormatter @@ -79,10 +78,9 @@ pass #: register Python 2.6 abstract base classes -from collections import MutableSet, MutableMapping, MutableSequence -_mutable_set_types += (MutableSet,) -_mutable_mapping_types += (MutableMapping,) -_mutable_sequence_types += (MutableSequence,) +_mutable_set_types += (abc.MutableSet,) +_mutable_mapping_types += (abc.MutableMapping,) +_mutable_sequence_types += (abc.MutableSequence,) _mutable_spec = ( @@ -103,7 +101,7 @@ ) -class _MagicFormatMapping(Mapping): +class _MagicFormatMapping(abc.Mapping): """This class implements a dummy wrapper to fix a bug in the Python standard library for string formatting. @@ -137,7 +135,7 @@ def __len__(self): def inspect_format_method(callable): if not isinstance(callable, (types.MethodType, types.BuiltinMethodType)) or \ - callable.__name__ != 'format': + callable.__name__ not in ('format', 'format_map'): return None obj = callable.__self__ if isinstance(obj, string_types): @@ -148,10 +146,14 @@ def safe_range(*args): """A range that can't generate ranges with a length of more than MAX_RANGE items. """ - rng = range(*args) + rng = range_type(*args) + if len(rng) > MAX_RANGE: - raise OverflowError('range too big, maximum size for range is %d' % - MAX_RANGE) + raise OverflowError( + "Range too big. The sandbox blocks ranges larger than" + " MAX_RANGE (%d)." % MAX_RANGE + ) + return rng @@ -402,7 +404,7 @@ def unsafe_undefined(self, obj, attribute): obj.__class__.__name__ ), name=attribute, obj=obj, exc=SecurityError) - def format_string(self, s, args, kwargs): + def format_string(self, s, args, kwargs, format_func=None): """If a format call is detected, then this is routed through this method so that our safety sandbox can be used for it. """ @@ -410,6 +412,17 @@ def format_string(self, s, args, kwargs): formatter = SandboxedEscapeFormatter(self, s.escape) else: formatter = SandboxedFormatter(self) + + if format_func is not None and format_func.__name__ == 'format_map': + if len(args) != 1 or kwargs: + raise TypeError( + 'format_map() takes exactly one argument %d given' + % (len(args) + (kwargs is not None)) + ) + + kwargs = args[0] + args = None + kwargs = _MagicFormatMapping(args, kwargs) rv = formatter.vformat(s, args, kwargs) return type(s)(rv) @@ -418,7 +431,7 @@ def call(__self, __context, __obj, *args, **kwargs): """Call an object from sandboxed code.""" fmt = inspect_format_method(__obj) if fmt is not None: - return __self.format_string(fmt, args, kwargs) + return __self.format_string(fmt, args, kwargs, __obj) # the double prefixes are to avoid double keyword argument # errors when proxying the call. diff --git a/pipenv/vendor/jinja2/tests.py b/pipenv/vendor/jinja2/tests.py index 0adc3d4dbc..bc99d66c83 100644 --- a/pipenv/vendor/jinja2/tests.py +++ b/pipenv/vendor/jinja2/tests.py @@ -10,9 +10,8 @@ """ import operator import re -from collections import Mapping from jinja2.runtime import Undefined -from jinja2._compat import text_type, string_types, integer_types +from jinja2._compat import text_type, string_types, integer_types, abc import decimal number_re = re.compile(r'^-?\d+(\.\d+)?$') @@ -84,7 +83,7 @@ def test_mapping(value): .. versionadded:: 2.6 """ - return isinstance(value, Mapping) + return isinstance(value, abc.Mapping) def test_number(value): diff --git a/pipenv/vendor/jinja2/utils.py b/pipenv/vendor/jinja2/utils.py index 502a311c08..db9c5d062d 100644 --- a/pipenv/vendor/jinja2/utils.py +++ b/pipenv/vendor/jinja2/utils.py @@ -14,7 +14,7 @@ from collections import deque from threading import Lock from jinja2._compat import text_type, string_types, implements_iterator, \ - url_quote + url_quote, abc _word_split_re = re.compile(r'(\s+)') @@ -480,12 +480,7 @@ def __reversed__(self): __copy__ = copy -# register the LRU cache as mutable mapping if possible -try: - from collections import MutableMapping - MutableMapping.register(LRUCache) -except ImportError: - pass +abc.MutableMapping.register(LRUCache) def select_autoescape(enabled_extensions=('html', 'htm', 'xml'), diff --git a/pipenv/vendor/orderedmultidict/LICENSE.md b/pipenv/vendor/orderedmultidict/LICENSE.md index 210e8658b9..fd832f40a9 100644 --- a/pipenv/vendor/orderedmultidict/LICENSE.md +++ b/pipenv/vendor/orderedmultidict/LICENSE.md @@ -28,4 +28,4 @@ OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\ SOFTWARE. -For more information, please refer to <http://unlicense.org/> +For more information, please refer to <https://unlicense.org/> diff --git a/pipenv/vendor/orderedmultidict/__init__.py b/pipenv/vendor/orderedmultidict/__init__.py index 4f0ce2f7cd..d122e35cb4 100755 --- a/pipenv/vendor/orderedmultidict/__init__.py +++ b/pipenv/vendor/orderedmultidict/__init__.py @@ -8,14 +8,14 @@ # grunseid@gmail.com # # License: Build Amazing Things (Unlicense) +# -from __future__ import absolute_import +from os.path import dirname, join as pjoin from .orderedmultidict import * # noqa -__title__ = 'orderedmultidict' -__version__ = '1.0' -__author__ = 'Ansgar Grunseid' -__contact__ = 'grunseid@gmail.com' -__license__ = 'Unlicense' -__url__ = 'https://github.com/gruns/orderedmultidict' +# Import all variables in __version__.py without explicit imports. +meta = {} +with open(pjoin(dirname(__file__), '__version__.py')) as f: + exec(f.read(), meta) +globals().update(dict((k, v) for k, v in meta.items() if k not in globals())) diff --git a/pipenv/vendor/orderedmultidict/__version__.py b/pipenv/vendor/orderedmultidict/__version__.py new file mode 100755 index 0000000000..29e47e48be --- /dev/null +++ b/pipenv/vendor/orderedmultidict/__version__.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- + +# +# omdict - Ordered Multivalue Dictionary. +# +# Ansgar Grunseid +# grunseid.com +# grunseid@gmail.com +# +# License: Build Amazing Things (Unlicense) +# + +__title__ = 'orderedmultidict' +__version__ = '1.0.1' +__license__ = 'Unlicense' +__author__ = 'Ansgar Grunseid' +__contact__ = 'grunseid@gmail.com' +__description__ = 'Ordered Multivalue Dictionary' +__url__ = 'https://github.com/gruns/orderedmultidict' diff --git a/pipenv/vendor/orderedmultidict/itemlist.py b/pipenv/vendor/orderedmultidict/itemlist.py index e9e96c725e..9524a5609c 100755 --- a/pipenv/vendor/orderedmultidict/itemlist.py +++ b/pipenv/vendor/orderedmultidict/itemlist.py @@ -8,6 +8,7 @@ # grunseid@gmail.com # # License: Build Amazing Things (Unlicense) +# from __future__ import absolute_import diff --git a/pipenv/vendor/orderedmultidict/orderedmultidict.py b/pipenv/vendor/orderedmultidict/orderedmultidict.py index 924dd8d2bc..6cd55c0fc2 100755 --- a/pipenv/vendor/orderedmultidict/orderedmultidict.py +++ b/pipenv/vendor/orderedmultidict/orderedmultidict.py @@ -8,26 +8,30 @@ # grunseid@gmail.com # # License: Build Amazing Things (Unlicense) +# from __future__ import absolute_import +import sys from itertools import chain -from collections import MutableMapping import six from six.moves import map, zip_longest from .itemlist import itemlist +if six.PY2: + from collections import MutableMapping +else: + from collections.abc import MutableMapping try: from collections import OrderedDict as odict # Python 2.7 and later. except ImportError: from ordereddict import OrderedDict as odict # Python 2.6 and earlier. -import sys -items_attr = 'items' if sys.version_info[0] >= 3 else 'iteritems' _absent = object() # Marker that means no parameter was provided. +_items_attr = 'items' if sys.version_info[0] >= 3 else 'iteritems' def callable_attr(obj, attr): @@ -765,7 +769,7 @@ def __eq__(self, other): for i1, i2 in zip_longest(myiter, otheriter, fillvalue=_absent): if i1 != i2 or i1 is _absent or i2 is _absent: return False - elif not hasattr(other, '__len__') or not hasattr(other, items_attr): + elif not hasattr(other, '__len__') or not hasattr(other, _items_attr): return False # Ignore order so we can compare ordered omdicts with unordered dicts. else: @@ -809,3 +813,21 @@ def __str__(self): def __repr__(self): return '%s(%s)' % (self.__class__.__name__, self.allitems()) + + def __or__(self, other): + return self.__class__(chain(_get_items(self), _get_items(other))) + + def __ior__(self, other): + for k, v in _get_items(other): + self.add(k, value=v) + return self + + +def _get_items(mapping): + """Find item iterator for an object.""" + names = ('iterallitems', 'allitems', 'iteritems', 'items') + exist = (n for n in names if callable_attr(mapping, n)) + for a in exist: + return getattr(mapping, a)() + raise TypeError( + "Object {} has no compatible items interface.".format(mapping)) diff --git a/pipenv/vendor/packaging/__about__.py b/pipenv/vendor/packaging/__about__.py index 7481c9e298..dc95138d04 100644 --- a/pipenv/vendor/packaging/__about__.py +++ b/pipenv/vendor/packaging/__about__.py @@ -18,7 +18,7 @@ __summary__ = "Core utilities for Python packages" __uri__ = "https://github.com/pypa/packaging" -__version__ = "19.0" +__version__ = "19.2" __author__ = "Donald Stufft and individual contributors" __email__ = "donald@stufft.io" diff --git a/pipenv/vendor/packaging/markers.py b/pipenv/vendor/packaging/markers.py index eff5abbbc1..3b8af3242e 100644 --- a/pipenv/vendor/packaging/markers.py +++ b/pipenv/vendor/packaging/markers.py @@ -259,7 +259,7 @@ def default_environment(): "platform_version": platform.version(), "python_full_version": platform.python_version(), "platform_python_implementation": platform.python_implementation(), - "python_version": platform.python_version()[:3], + "python_version": ".".join(platform.python_version_tuple()[:2]), "sys_platform": sys.platform, } diff --git a/pipenv/vendor/packaging/tags.py b/pipenv/vendor/packaging/tags.py new file mode 100644 index 0000000000..ec9942f0f6 --- /dev/null +++ b/pipenv/vendor/packaging/tags.py @@ -0,0 +1,404 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import + +import distutils.util + +try: + from importlib.machinery import EXTENSION_SUFFIXES +except ImportError: # pragma: no cover + import imp + + EXTENSION_SUFFIXES = [x[0] for x in imp.get_suffixes()] + del imp +import platform +import re +import sys +import sysconfig +import warnings + + +INTERPRETER_SHORT_NAMES = { + "python": "py", # Generic. + "cpython": "cp", + "pypy": "pp", + "ironpython": "ip", + "jython": "jy", +} + + +_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32 + + +class Tag(object): + + __slots__ = ["_interpreter", "_abi", "_platform"] + + def __init__(self, interpreter, abi, platform): + self._interpreter = interpreter.lower() + self._abi = abi.lower() + self._platform = platform.lower() + + @property + def interpreter(self): + return self._interpreter + + @property + def abi(self): + return self._abi + + @property + def platform(self): + return self._platform + + def __eq__(self, other): + return ( + (self.platform == other.platform) + and (self.abi == other.abi) + and (self.interpreter == other.interpreter) + ) + + def __hash__(self): + return hash((self._interpreter, self._abi, self._platform)) + + def __str__(self): + return "{}-{}-{}".format(self._interpreter, self._abi, self._platform) + + def __repr__(self): + return "<{self} @ {self_id}>".format(self=self, self_id=id(self)) + + +def parse_tag(tag): + tags = set() + interpreters, abis, platforms = tag.split("-") + for interpreter in interpreters.split("."): + for abi in abis.split("."): + for platform_ in platforms.split("."): + tags.add(Tag(interpreter, abi, platform_)) + return frozenset(tags) + + +def _normalize_string(string): + return string.replace(".", "_").replace("-", "_") + + +def _cpython_interpreter(py_version): + # TODO: Is using py_version_nodot for interpreter version critical? + return "cp{major}{minor}".format(major=py_version[0], minor=py_version[1]) + + +def _cpython_abis(py_version): + abis = [] + version = "{}{}".format(*py_version[:2]) + debug = pymalloc = ucs4 = "" + with_debug = sysconfig.get_config_var("Py_DEBUG") + has_refcount = hasattr(sys, "gettotalrefcount") + # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled + # extension modules is the best option. + # https://github.com/pypa/pip/issues/3383#issuecomment-173267692 + has_ext = "_d.pyd" in EXTENSION_SUFFIXES + if with_debug or (with_debug is None and (has_refcount or has_ext)): + debug = "d" + if py_version < (3, 8): + with_pymalloc = sysconfig.get_config_var("WITH_PYMALLOC") + if with_pymalloc or with_pymalloc is None: + pymalloc = "m" + if py_version < (3, 3): + unicode_size = sysconfig.get_config_var("Py_UNICODE_SIZE") + if unicode_size == 4 or ( + unicode_size is None and sys.maxunicode == 0x10FFFF + ): + ucs4 = "u" + elif debug: + # Debug builds can also load "normal" extension modules. + # We can also assume no UCS-4 or pymalloc requirement. + abis.append("cp{version}".format(version=version)) + abis.insert( + 0, + "cp{version}{debug}{pymalloc}{ucs4}".format( + version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4 + ), + ) + return abis + + +def _cpython_tags(py_version, interpreter, abis, platforms): + for abi in abis: + for platform_ in platforms: + yield Tag(interpreter, abi, platform_) + for tag in (Tag(interpreter, "abi3", platform_) for platform_ in platforms): + yield tag + for tag in (Tag(interpreter, "none", platform_) for platform_ in platforms): + yield tag + # PEP 384 was first implemented in Python 3.2. + for minor_version in range(py_version[1] - 1, 1, -1): + for platform_ in platforms: + interpreter = "cp{major}{minor}".format( + major=py_version[0], minor=minor_version + ) + yield Tag(interpreter, "abi3", platform_) + + +def _pypy_interpreter(): + return "pp{py_major}{pypy_major}{pypy_minor}".format( + py_major=sys.version_info[0], + pypy_major=sys.pypy_version_info.major, + pypy_minor=sys.pypy_version_info.minor, + ) + + +def _generic_abi(): + abi = sysconfig.get_config_var("SOABI") + if abi: + return _normalize_string(abi) + else: + return "none" + + +def _pypy_tags(py_version, interpreter, abi, platforms): + for tag in (Tag(interpreter, abi, platform) for platform in platforms): + yield tag + for tag in (Tag(interpreter, "none", platform) for platform in platforms): + yield tag + + +def _generic_tags(interpreter, py_version, abi, platforms): + for tag in (Tag(interpreter, abi, platform) for platform in platforms): + yield tag + if abi != "none": + tags = (Tag(interpreter, "none", platform_) for platform_ in platforms) + for tag in tags: + yield tag + + +def _py_interpreter_range(py_version): + """ + Yield Python versions in descending order. + + After the latest version, the major-only version will be yielded, and then + all following versions up to 'end'. + """ + yield "py{major}{minor}".format(major=py_version[0], minor=py_version[1]) + yield "py{major}".format(major=py_version[0]) + for minor in range(py_version[1] - 1, -1, -1): + yield "py{major}{minor}".format(major=py_version[0], minor=minor) + + +def _independent_tags(interpreter, py_version, platforms): + """ + Return the sequence of tags that are consistent across implementations. + + The tags consist of: + - py*-none-<platform> + - <interpreter>-none-any + - py*-none-any + """ + for version in _py_interpreter_range(py_version): + for platform_ in platforms: + yield Tag(version, "none", platform_) + yield Tag(interpreter, "none", "any") + for version in _py_interpreter_range(py_version): + yield Tag(version, "none", "any") + + +def _mac_arch(arch, is_32bit=_32_BIT_INTERPRETER): + if not is_32bit: + return arch + + if arch.startswith("ppc"): + return "ppc" + + return "i386" + + +def _mac_binary_formats(version, cpu_arch): + formats = [cpu_arch] + if cpu_arch == "x86_64": + if version < (10, 4): + return [] + formats.extend(["intel", "fat64", "fat32"]) + + elif cpu_arch == "i386": + if version < (10, 4): + return [] + formats.extend(["intel", "fat32", "fat"]) + + elif cpu_arch == "ppc64": + # TODO: Need to care about 32-bit PPC for ppc64 through 10.2? + if version > (10, 5) or version < (10, 4): + return [] + formats.append("fat64") + + elif cpu_arch == "ppc": + if version > (10, 6): + return [] + formats.extend(["fat32", "fat"]) + + formats.append("universal") + return formats + + +def _mac_platforms(version=None, arch=None): + version_str, _, cpu_arch = platform.mac_ver() + if version is None: + version = tuple(map(int, version_str.split(".")[:2])) + if arch is None: + arch = _mac_arch(cpu_arch) + platforms = [] + for minor_version in range(version[1], -1, -1): + compat_version = version[0], minor_version + binary_formats = _mac_binary_formats(compat_version, arch) + for binary_format in binary_formats: + platforms.append( + "macosx_{major}_{minor}_{binary_format}".format( + major=compat_version[0], + minor=compat_version[1], + binary_format=binary_format, + ) + ) + return platforms + + +# From PEP 513. +def _is_manylinux_compatible(name, glibc_version): + # Check for presence of _manylinux module. + try: + import _manylinux + + return bool(getattr(_manylinux, name + "_compatible")) + except (ImportError, AttributeError): + # Fall through to heuristic check below. + pass + + return _have_compatible_glibc(*glibc_version) + + +def _glibc_version_string(): + # Returns glibc version string, or None if not using glibc. + import ctypes + + # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen + # manpage says, "If filename is NULL, then the returned handle is for the + # main program". This way we can let the linker do the work to figure out + # which libc our process is actually using. + process_namespace = ctypes.CDLL(None) + try: + gnu_get_libc_version = process_namespace.gnu_get_libc_version + except AttributeError: + # Symbol doesn't exist -> therefore, we are not linked to + # glibc. + return None + + # Call gnu_get_libc_version, which returns a string like "2.5" + gnu_get_libc_version.restype = ctypes.c_char_p + version_str = gnu_get_libc_version() + # py2 / py3 compatibility: + if not isinstance(version_str, str): + version_str = version_str.decode("ascii") + + return version_str + + +# Separated out from have_compatible_glibc for easier unit testing. +def _check_glibc_version(version_str, required_major, minimum_minor): + # Parse string and check against requested version. + # + # We use a regexp instead of str.split because we want to discard any + # random junk that might come after the minor version -- this might happen + # in patched/forked versions of glibc (e.g. Linaro's version of glibc + # uses version strings like "2.20-2014.11"). See gh-3588. + m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str) + if not m: + warnings.warn( + "Expected glibc version with 2 components major.minor," + " got: %s" % version_str, + RuntimeWarning, + ) + return False + return ( + int(m.group("major")) == required_major + and int(m.group("minor")) >= minimum_minor + ) + + +def _have_compatible_glibc(required_major, minimum_minor): + version_str = _glibc_version_string() + if version_str is None: + return False + return _check_glibc_version(version_str, required_major, minimum_minor) + + +def _linux_platforms(is_32bit=_32_BIT_INTERPRETER): + linux = _normalize_string(distutils.util.get_platform()) + if linux == "linux_x86_64" and is_32bit: + linux = "linux_i686" + manylinux_support = ( + ("manylinux2014", (2, 17)), # CentOS 7 w/ glibc 2.17 (PEP 599) + ("manylinux2010", (2, 12)), # CentOS 6 w/ glibc 2.12 (PEP 571) + ("manylinux1", (2, 5)), # CentOS 5 w/ glibc 2.5 (PEP 513) + ) + manylinux_support_iter = iter(manylinux_support) + for name, glibc_version in manylinux_support_iter: + if _is_manylinux_compatible(name, glibc_version): + platforms = [linux.replace("linux", name)] + break + else: + platforms = [] + # Support for a later manylinux implies support for an earlier version. + platforms += [linux.replace("linux", name) for name, _ in manylinux_support_iter] + platforms.append(linux) + return platforms + + +def _generic_platforms(): + platform = _normalize_string(distutils.util.get_platform()) + return [platform] + + +def _interpreter_name(): + name = platform.python_implementation().lower() + return INTERPRETER_SHORT_NAMES.get(name) or name + + +def _generic_interpreter(name, py_version): + version = sysconfig.get_config_var("py_version_nodot") + if not version: + version = "".join(map(str, py_version[:2])) + return "{name}{version}".format(name=name, version=version) + + +def sys_tags(): + """ + Returns the sequence of tag triples for the running interpreter. + + The order of the sequence corresponds to priority order for the + interpreter, from most to least important. + """ + py_version = sys.version_info[:2] + interpreter_name = _interpreter_name() + if platform.system() == "Darwin": + platforms = _mac_platforms() + elif platform.system() == "Linux": + platforms = _linux_platforms() + else: + platforms = _generic_platforms() + + if interpreter_name == "cp": + interpreter = _cpython_interpreter(py_version) + abis = _cpython_abis(py_version) + for tag in _cpython_tags(py_version, interpreter, abis, platforms): + yield tag + elif interpreter_name == "pp": + interpreter = _pypy_interpreter() + abi = _generic_abi() + for tag in _pypy_tags(py_version, interpreter, abi, platforms): + yield tag + else: + interpreter = _generic_interpreter(interpreter_name, py_version) + abi = _generic_abi() + for tag in _generic_tags(interpreter, py_version, abi, platforms): + yield tag + for tag in _independent_tags(interpreter, py_version, platforms): + yield tag diff --git a/pipenv/vendor/parse.LICENSE b/pipenv/vendor/parse.LICENSE index 3163ad6d23..6c73b16ceb 100644 --- a/pipenv/vendor/parse.LICENSE +++ b/pipenv/vendor/parse.LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2012-2013 Richard Jones <richard@python.org> +Copyright (c) 2012-2019 Richard Jones <richard@python.org> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -7,8 +7,8 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, @@ -17,5 +17,3 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - diff --git a/pipenv/vendor/parse.py b/pipenv/vendor/parse.py index 0b5cce2395..1a5f9e634f 100644 --- a/pipenv/vendor/parse.py +++ b/pipenv/vendor/parse.py @@ -21,7 +21,7 @@ Or find all the occurrences of some pattern in a string: ->>> ''.join(r.fixed[0] for r in findall(">{}<", "<p>the <b>bold</b> text</p>")) +>>> ''.join(r[0] for r in findall(">{}<", "<p>the <b>bold</b> text</p>")) 'the bold text' If you're going to use the same pattern to match lots of strings you can @@ -129,7 +129,8 @@ In addition some regular expression character group types "D", "w", "W", "s" and "S" are also available. - The "e" and "g" types are case-insensitive so there is not need for - the "E" or "G" types. + the "E" or "G" types. The "e" type handles Fortran formatted numbers (no + leading 0 before the decimal point). ===== =========================================== ======== Type Characters Matched Output @@ -345,6 +346,10 @@ **Version history (in brief)**: +- 1.13.0 Handle Fortran formatted numbers with no leading 0 before decimal + point (thanks @purpleskyfall). + Handle comparison of FixedTzOffset with other types of object. +- 1.12.1 Actually use the `case_sensitive` arg in compile (thanks @jacquev6) - 1.12.0 Do not assume closing brace when an opening one is found (thanks @mattsep) - 1.11.1 Revert having unicode char in docstring, it breaks Bamboo builds(?!) - 1.11.0 Implement `__contains__` for Result instances. @@ -416,7 +421,7 @@ ''' from __future__ import absolute_import -__version__ = '1.12.0' +__version__ = '1.13.0' # yes, I now have two problems import re @@ -524,6 +529,8 @@ def dst(self, dt): return self.ZERO def __eq__(self, other): + if not isinstance(other, FixedTzOffset): + return False return self._name == other._name and self._offset == other._offset @@ -997,7 +1004,7 @@ def f(string, m): s = r'\d+\.\d+' self._type_conversions[group] = lambda s, m: Decimal(s) elif type == 'e': - s = r'\d+\.\d+[eE][-+]?\d+|nan|NAN|[-+]?inf|[-+]?INF' + s = r'\d*\.\d+[eE][-+]?\d+|nan|NAN|[-+]?inf|[-+]?INF' self._type_conversions[group] = lambda s, m: float(s) elif type == 'g': s = r'\d+(\.\d+)?([eE][-+]?\d+)?|nan|NAN|[-+]?inf|[-+]?INF' @@ -1310,7 +1317,7 @@ def compile(format, extra_types=None, case_sensitive=False): Returns a Parser instance. ''' - return Parser(format, extra_types=extra_types) + return Parser(format, extra_types=extra_types, case_sensitive=case_sensitive) # Copyright (c) 2012-2019 Richard Jones <richard@python.org> diff --git a/pipenv/vendor/passa/actions/clean.py b/pipenv/vendor/passa/actions/clean.py index 3570e4dba2..5c19b31cf9 100644 --- a/pipenv/vendor/passa/actions/clean.py +++ b/pipenv/vendor/passa/actions/clean.py @@ -3,11 +3,11 @@ from __future__ import absolute_import, print_function, unicode_literals -def clean(project, dev=False): +def clean(project, default=True, dev=False): from passa.models.synchronizers import Cleaner from passa.operations.sync import clean - cleaner = Cleaner(project, default=True, develop=dev) + cleaner = Cleaner(project, default=default, develop=dev) success = clean(cleaner) if not success: diff --git a/pipenv/vendor/passa/cli/add.py b/pipenv/vendor/passa/cli/add.py index d5596cdeba..077149f00a 100644 --- a/pipenv/vendor/passa/cli/add.py +++ b/pipenv/vendor/passa/cli/add.py @@ -20,7 +20,8 @@ def run(self, options): packages=options.packages, editables=options.editables, project=options.project, - dev=options.dev + dev=options.dev, + sync=options.sync ) diff --git a/pipenv/vendor/pathlib2/__init__.py b/pipenv/vendor/pathlib2/__init__.py index 2eb41e309e..d5a47a66c6 100644 --- a/pipenv/vendor/pathlib2/__init__.py +++ b/pipenv/vendor/pathlib2/__init__.py @@ -12,12 +12,18 @@ import re import six import sys -from collections import Sequence -from errno import EINVAL, ENOENT, ENOTDIR, EEXIST, EPERM, EACCES -from operator import attrgetter +from errno import EINVAL, ENOENT, ENOTDIR, EBADF +from errno import EEXIST, EPERM, EACCES +from operator import attrgetter from stat import ( S_ISDIR, S_ISLNK, S_ISREG, S_ISSOCK, S_ISBLK, S_ISCHR, S_ISFIFO) + +try: + from collections.abc import Sequence +except ImportError: + from collections import Sequence + try: from urllib import quote as urlquote_from_bytes except ImportError: @@ -54,6 +60,18 @@ # Internals # +# EBADF - guard agains macOS `stat` throwing EBADF +_IGNORED_ERROS = (ENOENT, ENOTDIR, EBADF) + +_IGNORED_WINERRORS = ( + 21, # ERROR_NOT_READY - drive exists but is not accessible +) + + +def _ignore_error(exception): + return (getattr(exception, 'errno', None) in _IGNORED_ERROS or + getattr(exception, 'winerror', None) in _IGNORED_WINERRORS) + def _py2_fsencode(parts): # py2 => minimal unicode support @@ -90,9 +108,25 @@ def _try_except_filenotfounderror(try_func, except_func): try_func() except FileNotFoundError as exc: except_func(exc) + elif os.name != 'nt': + try: + try_func() + except EnvironmentError as exc: + if exc.errno != ENOENT: + raise + else: + except_func(exc) else: try: try_func() + except WindowsError as exc: + # errno contains winerror + # 2 = file not found + # 3 = path not found + if exc.errno not in (2, 3): + raise + else: + except_func(exc) except EnvironmentError as exc: if exc.errno != ENOENT: raise @@ -336,16 +370,26 @@ def resolve(self, path, strict=False): else: # End of the path after the first one not found tail_parts = [] + + def _try_func(): + result[0] = self._ext_to_normal(_getfinalpathname(s)) + # if there was no exception, set flag to 0 + result[1] = 0 + + def _exc_func(exc): + pass + while True: - try: - s = self._ext_to_normal(_getfinalpathname(s)) - except FileNotFoundError: + result = [None, 1] + _try_except_filenotfounderror(_try_func, _exc_func) + if result[1] == 1: # file not found exception raised previous_s = s s, tail = os.path.split(s) tail_parts.append(tail) if previous_s == s: return path else: + s = result[0] return os.path.join(s, *reversed(tail_parts)) # Means fallback on absolute return None @@ -715,7 +759,13 @@ def _iterate_directories(self, parent_path, is_dir, scandir): def try_iter(): entries = list(scandir(parent_path)) for entry in entries: - if entry.is_dir() and not entry.is_symlink(): + entry_is_dir = False + try: + entry_is_dir = entry.is_dir() + except OSError as e: + if not _ignore_error(e): + raise + if entry_is_dir and not entry.is_symlink(): path = parent_path._make_child_relpath(entry.name) for p in self._iterate_directories(path, is_dir, scandir): yield p @@ -832,7 +882,11 @@ def _parse_args(cls, args): # also handle unicode for PY2 (six.text_type = unicode) elif six.PY2 and isinstance(a, six.text_type): # cast to str using filesystem encoding - parts.append(a.encode(sys.getfilesystemencoding())) + # note: in rare circumstances, on Python < 3.2, + # getfilesystemencoding can return None, in that + # case fall back to ascii + parts.append(a.encode( + sys.getfilesystemencoding() or "ascii")) else: raise TypeError( "argument should be a str object or an os.PathLike " @@ -1026,8 +1080,9 @@ def with_name(self, name): self._parts[:-1] + [name]) def with_suffix(self, suffix): - """Return a new path with the file suffix changed (or added, if - none). + """Return a new path with the file suffix changed. If the path + has no suffix, add given suffix. If the given suffix is an empty + string, remove the suffix from the path. """ # XXX if suffix is None, should the current suffix be removed? f = self._flavour @@ -1173,6 +1228,11 @@ class PurePosixPath(PurePath): class PureWindowsPath(PurePath): + """PurePath subclass for Windows systems. + + On a Windows system, instantiating a PurePath should return this object. + However, you can also instantiate it directly on any system. + """ _flavour = _windows_flavour __slots__ = () @@ -1181,6 +1241,14 @@ class PureWindowsPath(PurePath): class Path(PurePath): + """PurePath subclass that can make system calls. + + Path represents a filesystem path but unlike PurePath, also offers + methods to do system calls on path objects. Depending on your system, + instantiating a Path will return either a PosixPath or a WindowsPath + object. You can also instantiate a PosixPath or WindowsPath directly, + but cannot instantiate a WindowsPath on a POSIX system or vice versa. + """ __slots__ = ( '_accessor', '_closed', @@ -1286,7 +1354,7 @@ def iterdir(self): def glob(self, pattern): """Iterate over this subtree and yield all existing files (of any - kind, including directories) matching the given pattern. + kind, including directories) matching the given relative pattern. """ if not pattern: raise ValueError("Unacceptable pattern: {0!r}".format(pattern)) @@ -1300,7 +1368,8 @@ def glob(self, pattern): def rglob(self, pattern): """Recursively yield all existing files (of any kind, including - directories) matching the given pattern, anywhere in this subtree. + directories) matching the given relative pattern, anywhere in + this subtree. """ pattern = self._flavour.casefold(pattern) drv, root, pattern_parts = self._flavour.parse_parts((pattern,)) @@ -1339,9 +1408,20 @@ def resolve(self, strict=False): s = self._flavour.resolve(self, strict=strict) if s is None: # No symlink resolution => for consistency, raise an error if - # the path doesn't exist or is forbidden - self.stat() + # the path is forbidden + # but not raise error if file does not exist (see issue #54). + + def _try_func(): + self.stat() + + def _exc_func(exc): + pass + + _try_except_filenotfounderror(_try_func, _exc_func) s = str(self.absolute()) + else: + # ensure s is a string (normpath requires this on older python) + s = str(s) # Now we have no symlinks in the path, it's safe to normalize it. normed = self._flavour.pathmod.normpath(s) obj = self._from_parts((normed,), init=False) @@ -1463,6 +1543,8 @@ def _exc_func(exc): try: _try_except_filenotfounderror(_try_func, _exc_func) except OSError: + # Cannot rely on checking for EEXIST, since the operating system + # could give priority to other errors like EACCES or EROFS if not exist_ok or not self.is_dir(): raise @@ -1548,9 +1630,12 @@ def exists(self): try: self.stat() except OSError as e: - if e.errno not in (ENOENT, ENOTDIR): + if not _ignore_error(e): raise return False + except ValueError: + # Non-encodable path + return False return True def is_dir(self): @@ -1560,11 +1645,14 @@ def is_dir(self): try: return S_ISDIR(self.stat().st_mode) except OSError as e: - if e.errno not in (ENOENT, ENOTDIR): + if not _ignore_error(e): raise # Path doesn't exist or is a broken symlink # (see https://bitbucket.org/pitrou/pathlib/issue/12/) return False + except ValueError: + # Non-encodable path + return False def is_file(self): """ @@ -1574,11 +1662,35 @@ def is_file(self): try: return S_ISREG(self.stat().st_mode) except OSError as e: - if e.errno not in (ENOENT, ENOTDIR): + if not _ignore_error(e): raise # Path doesn't exist or is a broken symlink # (see https://bitbucket.org/pitrou/pathlib/issue/12/) return False + except ValueError: + # Non-encodable path + return False + + def is_mount(self): + """ + Check if this path is a POSIX mount point + """ + # Need to exist and be a dir + if not self.exists() or not self.is_dir(): + return False + + parent = Path(self.parent) + try: + parent_dev = parent.stat().st_dev + except OSError: + return False + + dev = self.stat().st_dev + if dev != parent_dev: + return True + ino = self.stat().st_ino + parent_ino = parent.stat().st_ino + return ino == parent_ino def is_symlink(self): """ @@ -1587,10 +1699,13 @@ def is_symlink(self): try: return S_ISLNK(self.lstat().st_mode) except OSError as e: - if e.errno not in (ENOENT, ENOTDIR): + if not _ignore_error(e): raise # Path doesn't exist return False + except ValueError: + # Non-encodable path + return False def is_block_device(self): """ @@ -1599,11 +1714,14 @@ def is_block_device(self): try: return S_ISBLK(self.stat().st_mode) except OSError as e: - if e.errno not in (ENOENT, ENOTDIR): + if not _ignore_error(e): raise # Path doesn't exist or is a broken symlink # (see https://bitbucket.org/pitrou/pathlib/issue/12/) return False + except ValueError: + # Non-encodable path + return False def is_char_device(self): """ @@ -1612,11 +1730,14 @@ def is_char_device(self): try: return S_ISCHR(self.stat().st_mode) except OSError as e: - if e.errno not in (ENOENT, ENOTDIR): + if not _ignore_error(e): raise # Path doesn't exist or is a broken symlink # (see https://bitbucket.org/pitrou/pathlib/issue/12/) return False + except ValueError: + # Non-encodable path + return False def is_fifo(self): """ @@ -1625,11 +1746,14 @@ def is_fifo(self): try: return S_ISFIFO(self.stat().st_mode) except OSError as e: - if e.errno not in (ENOENT, ENOTDIR): + if not _ignore_error(e): raise # Path doesn't exist or is a broken symlink # (see https://bitbucket.org/pitrou/pathlib/issue/12/) return False + except ValueError: + # Non-encodable path + return False def is_socket(self): """ @@ -1638,11 +1762,14 @@ def is_socket(self): try: return S_ISSOCK(self.stat().st_mode) except OSError as e: - if e.errno not in (ENOENT, ENOTDIR): + if not _ignore_error(e): raise # Path doesn't exist or is a broken symlink # (see https://bitbucket.org/pitrou/pathlib/issue/12/) return False + except ValueError: + # Non-encodable path + return False def expanduser(self): """ Return a new path with expanded ~ and ~user constructs @@ -1657,10 +1784,18 @@ def expanduser(self): class PosixPath(Path, PurePosixPath): + """Path subclass for non-Windows systems. + + On a POSIX system, instantiating a Path should return this object. + """ __slots__ = () class WindowsPath(Path, PureWindowsPath): + """Path subclass for Windows systems. + + On a Windows system, instantiating a Path should return this object. + """ __slots__ = () def owner(self): @@ -1668,3 +1803,7 @@ def owner(self): def group(self): raise NotImplementedError("Path.group() is unsupported on this system") + + def is_mount(self): + raise NotImplementedError( + "Path.is_mount() is unsupported on this system") diff --git a/pipenv/vendor/pep517/__init__.py b/pipenv/vendor/pep517/__init__.py index 9c1a098f78..d3705f6460 100644 --- a/pipenv/vendor/pep517/__init__.py +++ b/pipenv/vendor/pep517/__init__.py @@ -1,4 +1,4 @@ """Wrappers to build Python packages using PEP 517 hooks """ -__version__ = '0.5.0' +__version__ = '0.8.1' diff --git a/pipenv/vendor/pep517/_in_process.py b/pipenv/vendor/pep517/_in_process.py index d6524b660a..1589a6cac5 100644 --- a/pipenv/vendor/pep517/_in_process.py +++ b/pipenv/vendor/pep517/_in_process.py @@ -2,7 +2,9 @@ It expects: - Command line args: hook_name, control_dir -- Environment variable: PEP517_BUILD_BACKEND=entry.point:spec +- Environment variables: + PEP517_BUILD_BACKEND=entry.point:spec + PEP517_BACKEND_PATH=paths (separated with os.pathsep) - control_dir/input.json: - {"kwargs": {...}} @@ -13,10 +15,12 @@ from glob import glob from importlib import import_module import os +import os.path from os.path import join as pjoin import re import shutil import sys +import traceback # This is run as a script, not a module, so it can't do a relative import import compat @@ -24,16 +28,49 @@ class BackendUnavailable(Exception): """Raised if we cannot import the backend""" + def __init__(self, traceback): + self.traceback = traceback + + +class BackendInvalid(Exception): + """Raised if the backend is invalid""" + def __init__(self, message): + self.message = message + + +class HookMissing(Exception): + """Raised if a hook is missing and we are not executing the fallback""" + + +def contained_in(filename, directory): + """Test if a file is located within the given directory.""" + filename = os.path.normcase(os.path.abspath(filename)) + directory = os.path.normcase(os.path.abspath(directory)) + return os.path.commonprefix([filename, directory]) == directory def _build_backend(): """Find and load the build backend""" + # Add in-tree backend directories to the front of sys.path. + backend_path = os.environ.get('PEP517_BACKEND_PATH') + if backend_path: + extra_pathitems = backend_path.split(os.pathsep) + sys.path[:0] = extra_pathitems + ep = os.environ['PEP517_BUILD_BACKEND'] mod_path, _, obj_path = ep.partition(':') try: obj = import_module(mod_path) except ImportError: - raise BackendUnavailable + raise BackendUnavailable(traceback.format_exc()) + + if backend_path: + if not any( + contained_in(obj.__file__, path) + for path in extra_pathitems + ): + raise BackendInvalid("Backend was not loaded from backend-path") + if obj_path: for path_part in obj_path.split('.'): obj = getattr(obj, path_part) @@ -54,15 +91,19 @@ def get_requires_for_build_wheel(config_settings): return hook(config_settings) -def prepare_metadata_for_build_wheel(metadata_directory, config_settings): +def prepare_metadata_for_build_wheel( + metadata_directory, config_settings, _allow_fallback): """Invoke optional prepare_metadata_for_build_wheel - Implements a fallback by building a wheel if the hook isn't defined. + Implements a fallback by building a wheel if the hook isn't defined, + unless _allow_fallback is False in which case HookMissing is raised. """ backend = _build_backend() try: hook = backend.prepare_metadata_for_build_wheel except AttributeError: + if not _allow_fallback: + raise HookMissing() return _get_wheel_metadata_from_wheel(backend, metadata_directory, config_settings) else: @@ -161,6 +202,8 @@ class _DummyException(Exception): class GotUnsupportedOperation(Exception): """For internal use when backend raises UnsupportedOperation""" + def __init__(self, traceback): + self.traceback = traceback def build_sdist(sdist_directory, config_settings): @@ -169,7 +212,7 @@ def build_sdist(sdist_directory, config_settings): try: return backend.build_sdist(sdist_directory, config_settings) except getattr(backend, 'UnsupportedOperation', _DummyException): - raise GotUnsupportedOperation + raise GotUnsupportedOperation(traceback.format_exc()) HOOK_NAMES = { @@ -195,10 +238,17 @@ def main(): json_out = {'unsupported': False, 'return_val': None} try: json_out['return_val'] = hook(**hook_input['kwargs']) - except BackendUnavailable: + except BackendUnavailable as e: json_out['no_backend'] = True - except GotUnsupportedOperation: + json_out['traceback'] = e.traceback + except BackendInvalid as e: + json_out['backend_invalid'] = True + json_out['backend_error'] = e.message + except GotUnsupportedOperation as e: json_out['unsupported'] = True + json_out['traceback'] = e.traceback + except HookMissing: + json_out['hook_missing'] = True compat.write_json(json_out, pjoin(control_dir, 'output.json'), indent=2) diff --git a/pipenv/vendor/pep517/build.py b/pipenv/vendor/pep517/build.py index 6fca39a87c..7618c78c19 100644 --- a/pipenv/vendor/pep517/build.py +++ b/pipenv/vendor/pep517/build.py @@ -3,25 +3,56 @@ import argparse import logging import os -import contextlib -import pytoml +import toml import shutil -import errno -import tempfile from .envbuild import BuildEnvironment from .wrappers import Pep517HookCaller +from .dirtools import tempdir, mkdir_p +from .compat import FileNotFoundError log = logging.getLogger(__name__) -@contextlib.contextmanager -def tempdir(): - td = tempfile.mkdtemp() +def validate_system(system): + """ + Ensure build system has the requisite fields. + """ + required = {'requires', 'build-backend'} + if not (required <= set(system)): + message = "Missing required fields: {missing}".format( + missing=required-set(system), + ) + raise ValueError(message) + + +def load_system(source_dir): + """ + Load the build system from a source dir (pyproject.toml). + """ + pyproject = os.path.join(source_dir, 'pyproject.toml') + with open(pyproject) as f: + pyproject_data = toml.load(f) + return pyproject_data['build-system'] + + +def compat_system(source_dir): + """ + Given a source dir, attempt to get a build system backend + and requirements from pyproject.toml. Fallback to + setuptools but only if the file was not found or a build + system was not indicated. + """ try: - yield td - finally: - shutil.rmtree(td) + system = load_system(source_dir) + except (FileNotFoundError, KeyError): + system = {} + system.setdefault( + 'build-backend', + 'setuptools.build_meta:__legacy__', + ) + system.setdefault('requires', ['setuptools', 'wheel']) + return system def _do_build(hooks, env, dist, dest): @@ -42,33 +73,18 @@ def _do_build(hooks, env, dist, dest): shutil.move(source, os.path.join(dest, os.path.basename(filename))) -def mkdir_p(*args, **kwargs): - """Like `mkdir`, but does not raise an exception if the - directory already exists. - """ - try: - return os.mkdir(*args, **kwargs) - except OSError as exc: - if exc.errno != errno.EEXIST: - raise - - -def build(source_dir, dist, dest=None): - pyproject = os.path.join(source_dir, 'pyproject.toml') +def build(source_dir, dist, dest=None, system=None): + system = system or load_system(source_dir) dest = os.path.join(source_dir, dest or 'dist') mkdir_p(dest) - with open(pyproject) as f: - pyproject_data = pytoml.load(f) - # Ensure the mandatory data can be loaded - buildsys = pyproject_data['build-system'] - requires = buildsys['requires'] - backend = buildsys['build-backend'] - - hooks = Pep517HookCaller(source_dir, backend) + validate_system(system) + hooks = Pep517HookCaller( + source_dir, system['build-backend'], system.get('backend-path') + ) with BuildEnvironment() as env: - env.pip_install(requires) + env.pip_install(system['requires']) _do_build(hooks, env, dist, dest) diff --git a/pipenv/vendor/pep517/check.py b/pipenv/vendor/pep517/check.py index fc82cca7ec..9e0c068209 100644 --- a/pipenv/vendor/pep517/check.py +++ b/pipenv/vendor/pep517/check.py @@ -4,7 +4,7 @@ import logging import os from os.path import isfile, join as pjoin -from pytoml import TomlError, load as toml_load +from toml import TomlDecodeError, load as toml_load import shutil from subprocess import CalledProcessError import sys @@ -147,12 +147,13 @@ def check(source_dir): buildsys = pyproject_data['build-system'] requires = buildsys['requires'] backend = buildsys['build-backend'] + backend_path = buildsys.get('backend-path') log.info('Loaded pyproject.toml') - except (TomlError, KeyError): + except (TomlDecodeError, KeyError): log.error("Invalid pyproject.toml", exc_info=True) return False - hooks = Pep517HookCaller(source_dir, backend) + hooks = Pep517HookCaller(source_dir, backend, backend_path) sdist_ok = check_build_sdist(hooks, requires) wheel_ok = check_build_wheel(hooks, requires) diff --git a/pipenv/vendor/pep517/compat.py b/pipenv/vendor/pep517/compat.py index 01c66fc7e4..8432acb732 100644 --- a/pipenv/vendor/pep517/compat.py +++ b/pipenv/vendor/pep517/compat.py @@ -1,7 +1,10 @@ -"""Handle reading and writing JSON in UTF-8, on Python 3 and 2.""" +"""Python 2/3 compatibility""" import json import sys + +# Handle reading and writing JSON in UTF-8, on Python 3 and 2. + if sys.version_info[0] >= 3: # Python 3 def write_json(obj, path, **kwargs): @@ -21,3 +24,11 @@ def write_json(obj, path, **kwargs): def read_json(path): with open(path, 'rb') as f: return json.load(f) + + +# FileNotFoundError + +try: + FileNotFoundError = FileNotFoundError +except NameError: + FileNotFoundError = IOError diff --git a/pipenv/vendor/pep517/dirtools.py b/pipenv/vendor/pep517/dirtools.py new file mode 100644 index 0000000000..58c6ca0c56 --- /dev/null +++ b/pipenv/vendor/pep517/dirtools.py @@ -0,0 +1,44 @@ +import os +import io +import contextlib +import tempfile +import shutil +import errno +import zipfile + + +@contextlib.contextmanager +def tempdir(): + """Create a temporary directory in a context manager.""" + td = tempfile.mkdtemp() + try: + yield td + finally: + shutil.rmtree(td) + + +def mkdir_p(*args, **kwargs): + """Like `mkdir`, but does not raise an exception if the + directory already exists. + """ + try: + return os.mkdir(*args, **kwargs) + except OSError as exc: + if exc.errno != errno.EEXIST: + raise + + +def dir_to_zipfile(root): + """Construct an in-memory zip file for a directory.""" + buffer = io.BytesIO() + zip_file = zipfile.ZipFile(buffer, 'w') + for root, dirs, files in os.walk(root): + for path in dirs: + fs_path = os.path.join(root, path) + rel_path = os.path.relpath(fs_path, root) + zip_file.writestr(rel_path + '/', '') + for path in files: + fs_path = os.path.join(root, path) + rel_path = os.path.relpath(fs_path, root) + zip_file.write(fs_path, rel_path) + return zip_file diff --git a/pipenv/vendor/pep517/envbuild.py b/pipenv/vendor/pep517/envbuild.py index 61253f4da8..cacd2b12c0 100644 --- a/pipenv/vendor/pep517/envbuild.py +++ b/pipenv/vendor/pep517/envbuild.py @@ -3,23 +3,27 @@ import os import logging -import pytoml +import toml import shutil from subprocess import check_call import sys from sysconfig import get_paths from tempfile import mkdtemp -from .wrappers import Pep517HookCaller +from .wrappers import Pep517HookCaller, LoggerWrapper log = logging.getLogger(__name__) def _load_pyproject(source_dir): with open(os.path.join(source_dir, 'pyproject.toml')) as f: - pyproject_data = pytoml.load(f) + pyproject_data = toml.load(f) buildsys = pyproject_data['build-system'] - return buildsys['requires'], buildsys['build-backend'] + return ( + buildsys['requires'], + buildsys['build-backend'], + buildsys.get('backend-path'), + ) class BuildEnvironment(object): @@ -90,9 +94,14 @@ def pip_install(self, reqs): if not reqs: return log.info('Calling pip to install %s', reqs) - check_call([ + cmd = [ sys.executable, '-m', 'pip', 'install', '--ignore-installed', - '--prefix', self.path] + list(reqs)) + '--prefix', self.path] + list(reqs) + check_call( + cmd, + stdout=LoggerWrapper(log, logging.INFO), + stderr=LoggerWrapper(log, logging.ERROR), + ) def __exit__(self, exc_type, exc_val, exc_tb): needs_cleanup = ( @@ -126,8 +135,8 @@ def build_wheel(source_dir, wheel_dir, config_settings=None): """ if config_settings is None: config_settings = {} - requires, backend = _load_pyproject(source_dir) - hooks = Pep517HookCaller(source_dir, backend) + requires, backend, backend_path = _load_pyproject(source_dir) + hooks = Pep517HookCaller(source_dir, backend, backend_path) with BuildEnvironment() as env: env.pip_install(requires) @@ -148,8 +157,8 @@ def build_sdist(source_dir, sdist_dir, config_settings=None): """ if config_settings is None: config_settings = {} - requires, backend = _load_pyproject(source_dir) - hooks = Pep517HookCaller(source_dir, backend) + requires, backend, backend_path = _load_pyproject(source_dir) + hooks = Pep517HookCaller(source_dir, backend, backend_path) with BuildEnvironment() as env: env.pip_install(requires) diff --git a/pipenv/vendor/pep517/meta.py b/pipenv/vendor/pep517/meta.py new file mode 100644 index 0000000000..d525de5c6c --- /dev/null +++ b/pipenv/vendor/pep517/meta.py @@ -0,0 +1,92 @@ +"""Build metadata for a project using PEP 517 hooks. +""" +import argparse +import logging +import os +import shutil +import functools + +try: + import importlib.metadata as imp_meta +except ImportError: + import importlib_metadata as imp_meta + +try: + from zipfile import Path +except ImportError: + from zipp import Path + +from .envbuild import BuildEnvironment +from .wrappers import Pep517HookCaller, quiet_subprocess_runner +from .dirtools import tempdir, mkdir_p, dir_to_zipfile +from .build import validate_system, load_system, compat_system + +log = logging.getLogger(__name__) + + +def _prep_meta(hooks, env, dest): + reqs = hooks.get_requires_for_build_wheel({}) + log.info('Got build requires: %s', reqs) + + env.pip_install(reqs) + log.info('Installed dynamic build dependencies') + + with tempdir() as td: + log.info('Trying to build metadata in %s', td) + filename = hooks.prepare_metadata_for_build_wheel(td, {}) + source = os.path.join(td, filename) + shutil.move(source, os.path.join(dest, os.path.basename(filename))) + + +def build(source_dir='.', dest=None, system=None): + system = system or load_system(source_dir) + dest = os.path.join(source_dir, dest or 'dist') + mkdir_p(dest) + validate_system(system) + hooks = Pep517HookCaller( + source_dir, system['build-backend'], system.get('backend-path') + ) + + with hooks.subprocess_runner(quiet_subprocess_runner): + with BuildEnvironment() as env: + env.pip_install(system['requires']) + _prep_meta(hooks, env, dest) + + +def build_as_zip(builder=build): + with tempdir() as out_dir: + builder(dest=out_dir) + return dir_to_zipfile(out_dir) + + +def load(root): + """ + Given a source directory (root) of a package, + return an importlib.metadata.Distribution object + with metadata build from that package. + """ + root = os.path.expanduser(root) + system = compat_system(root) + builder = functools.partial(build, source_dir=root, system=system) + path = Path(build_as_zip(builder)) + return imp_meta.PathDistribution(path) + + +parser = argparse.ArgumentParser() +parser.add_argument( + 'source_dir', + help="A directory containing pyproject.toml", +) +parser.add_argument( + '--out-dir', '-o', + help="Destination in which to save the builds relative to source dir", +) + + +def main(): + args = parser.parse_args() + build(args.source_dir, args.out_dir) + + +if __name__ == '__main__': + main() diff --git a/pipenv/vendor/pep517/wrappers.py b/pipenv/vendor/pep517/wrappers.py index b14b899150..00a3d1a789 100644 --- a/pipenv/vendor/pep517/wrappers.py +++ b/pipenv/vendor/pep517/wrappers.py @@ -1,14 +1,24 @@ +import threading from contextlib import contextmanager import os from os.path import dirname, abspath, join as pjoin import shutil -from subprocess import check_call +from subprocess import check_call, check_output, STDOUT import sys from tempfile import mkdtemp from . import compat -_in_proc_script = pjoin(dirname(abspath(__file__)), '_in_process.py') + +try: + import importlib.resources as resources + + def _in_proc_script_path(): + return resources.path(__package__, '_in_process.py') +except ImportError: + @contextmanager + def _in_proc_script_path(): + yield pjoin(dirname(abspath(__file__)), '_in_process.py') @contextmanager @@ -22,10 +32,29 @@ def tempdir(): class BackendUnavailable(Exception): """Will be raised if the backend cannot be imported in the hook process.""" + def __init__(self, traceback): + self.traceback = traceback + + +class BackendInvalid(Exception): + """Will be raised if the backend is invalid.""" + def __init__(self, backend_name, backend_path, message): + self.backend_name = backend_name + self.backend_path = backend_path + self.message = message + + +class HookMissing(Exception): + """Will be raised on missing hooks.""" + def __init__(self, hook_name): + super(HookMissing, self).__init__(hook_name) + self.hook_name = hook_name class UnsupportedOperation(Exception): """May be raised by build_sdist if the backend indicates that it can't.""" + def __init__(self, traceback): + self.traceback = traceback def default_subprocess_runner(cmd, cwd=None, extra_environ=None): @@ -37,25 +66,86 @@ def default_subprocess_runner(cmd, cwd=None, extra_environ=None): check_call(cmd, cwd=cwd, env=env) +def quiet_subprocess_runner(cmd, cwd=None, extra_environ=None): + """A method of calling the wrapper subprocess while suppressing output.""" + env = os.environ.copy() + if extra_environ: + env.update(extra_environ) + + check_output(cmd, cwd=cwd, env=env, stderr=STDOUT) + + +def norm_and_check(source_tree, requested): + """Normalise and check a backend path. + + Ensure that the requested backend path is specified as a relative path, + and resolves to a location under the given source tree. + + Return an absolute version of the requested path. + """ + if os.path.isabs(requested): + raise ValueError("paths must be relative") + + abs_source = os.path.abspath(source_tree) + abs_requested = os.path.normpath(os.path.join(abs_source, requested)) + # We have to use commonprefix for Python 2.7 compatibility. So we + # normalise case to avoid problems because commonprefix is a character + # based comparison :-( + norm_source = os.path.normcase(abs_source) + norm_requested = os.path.normcase(abs_requested) + if os.path.commonprefix([norm_source, norm_requested]) != norm_source: + raise ValueError("paths must be inside source tree") + + return abs_requested + + class Pep517HookCaller(object): """A wrapper around a source directory to be built with a PEP 517 backend. source_dir : The path to the source directory, containing pyproject.toml. - backend : The build backend spec, as per PEP 517, from pyproject.toml. + build_backend : The build backend spec, as per PEP 517, from + pyproject.toml. + backend_path : The backend path, as per PEP 517, from pyproject.toml. + runner : A callable that invokes the wrapper subprocess. + + The 'runner', if provided, must expect the following: + cmd : a list of strings representing the command and arguments to + execute, as would be passed to e.g. 'subprocess.check_call'. + cwd : a string representing the working directory that must be + used for the subprocess. Corresponds to the provided source_dir. + extra_environ : a dict mapping environment variable names to values + which must be set for the subprocess execution. """ - def __init__(self, source_dir, build_backend): + def __init__( + self, + source_dir, + build_backend, + backend_path=None, + runner=None, + ): + if runner is None: + runner = default_subprocess_runner + self.source_dir = abspath(source_dir) self.build_backend = build_backend - self._subprocess_runner = default_subprocess_runner + if backend_path: + backend_path = [ + norm_and_check(self.source_dir, p) for p in backend_path + ] + self.backend_path = backend_path + self._subprocess_runner = runner - # TODO: Is this over-engineered? Maybe frontends only need to - # set this when creating the wrapper, not on every call. @contextmanager def subprocess_runner(self, runner): + """A context manager for temporarily overriding the default subprocess + runner. + """ prev = self._subprocess_runner self._subprocess_runner = runner - yield - self._subprocess_runner = prev + try: + yield + finally: + self._subprocess_runner = prev def get_requires_for_build_wheel(self, config_settings=None): """Identify packages required for building a wheel @@ -72,18 +162,21 @@ def get_requires_for_build_wheel(self, config_settings=None): }) def prepare_metadata_for_build_wheel( - self, metadata_directory, config_settings=None): + self, metadata_directory, config_settings=None, + _allow_fallback=True): """Prepare a *.dist-info folder with metadata for this project. Returns the name of the newly created folder. If the build backend defines a hook with this name, it will be called in a subprocess. If not, the backend will be asked to build a wheel, - and the dist-info extracted from that. + and the dist-info extracted from that (unless _allow_fallback is + False). """ return self._call_hook('prepare_metadata_for_build_wheel', { 'metadata_directory': abspath(metadata_directory), 'config_settings': config_settings, + '_allow_fallback': _allow_fallback, }) def build_wheel( @@ -139,25 +232,77 @@ def _call_hook(self, hook_name, kwargs): # letters, digits and _, . and : characters, and will be used as a # Python identifier, so non-ASCII content is wrong on Python 2 in # any case). + # For backend_path, we use sys.getfilesystemencoding. if sys.version_info[0] == 2: build_backend = self.build_backend.encode('ASCII') else: build_backend = self.build_backend + extra_environ = {'PEP517_BUILD_BACKEND': build_backend} + + if self.backend_path: + backend_path = os.pathsep.join(self.backend_path) + if sys.version_info[0] == 2: + backend_path = backend_path.encode(sys.getfilesystemencoding()) + extra_environ['PEP517_BACKEND_PATH'] = backend_path with tempdir() as td: - compat.write_json({'kwargs': kwargs}, pjoin(td, 'input.json'), + hook_input = {'kwargs': kwargs} + compat.write_json(hook_input, pjoin(td, 'input.json'), indent=2) # Run the hook in a subprocess - self._subprocess_runner( - [sys.executable, _in_proc_script, hook_name, td], - cwd=self.source_dir, - extra_environ={'PEP517_BUILD_BACKEND': build_backend} - ) + with _in_proc_script_path() as script: + self._subprocess_runner( + [sys.executable, str(script), hook_name, td], + cwd=self.source_dir, + extra_environ=extra_environ + ) data = compat.read_json(pjoin(td, 'output.json')) if data.get('unsupported'): - raise UnsupportedOperation + raise UnsupportedOperation(data.get('traceback', '')) if data.get('no_backend'): - raise BackendUnavailable + raise BackendUnavailable(data.get('traceback', '')) + if data.get('backend_invalid'): + raise BackendInvalid( + backend_name=self.build_backend, + backend_path=self.backend_path, + message=data.get('backend_error', '') + ) + if data.get('hook_missing'): + raise HookMissing(hook_name) return data['return_val'] + + +class LoggerWrapper(threading.Thread): + """ + Read messages from a pipe and redirect them + to a logger (see python's logging module). + """ + + def __init__(self, logger, level): + threading.Thread.__init__(self) + self.daemon = True + + self.logger = logger + self.level = level + + # create the pipe and reader + self.fd_read, self.fd_write = os.pipe() + self.reader = os.fdopen(self.fd_read) + + self.start() + + def fileno(self): + return self.fd_write + + @staticmethod + def remove_newline(msg): + return msg[:-1] if msg.endswith(os.linesep) else msg + + def run(self): + for line in self.reader: + self._write(self.remove_newline(line)) + + def _write(self, message): + self.logger.log(self.level, message) diff --git a/pipenv/vendor/pip_shims/__init__.py b/pipenv/vendor/pip_shims/__init__.py index 9320a43746..93f3a4721f 100644 --- a/pipenv/vendor/pip_shims/__init__.py +++ b/pipenv/vendor/pip_shims/__init__.py @@ -3,22 +3,30 @@ import sys -__version__ = "0.3.3" - from . import shims +__version__ = "0.4.0" + -old_module = sys.modules[__name__] +if "pip_shims" in sys.modules: + # mainly to keep a reference to the old module on hand so it doesn't get + # weakref'd away + if __name__ != "pip_shims": + del sys.modules["pip_shims"] +if __name__ in sys.modules: + old_module = sys.modules[__name__] -module = sys.modules[__name__] = shims._new() +module = sys.modules[__name__] = sys.modules["pip_shims"] = shims._new() module.shims = shims -module.__dict__.update({ - '__file__': __file__, - '__package__': "pip_shims", - '__path__': __path__, - '__doc__': __doc__, - '__all__': module.__all__ + ['shims',], - '__version__': __version__, - '__name__': __name__ -}) +module.__dict__.update( + { + "__file__": __file__, + "__package__": "pip_shims", + "__path__": __path__, + "__doc__": __doc__, + "__all__": module.__all__ + ["shims"], + "__version__": __version__, + "__name__": __name__, + } +) diff --git a/pipenv/vendor/pip_shims/backports.py b/pipenv/vendor/pip_shims/backports.py new file mode 100644 index 0000000000..9206cbe061 --- /dev/null +++ b/pipenv/vendor/pip_shims/backports.py @@ -0,0 +1,1183 @@ +# -*- coding=utf-8 -*- +from __future__ import absolute_import + +import atexit +import contextlib +import functools +import inspect +import os +import sys +import types + +import six +from packaging import specifiers +from vistir.compat import TemporaryDirectory + +from .environment import MYPY_RUNNING +from .utils import ( + call_function_with_correct_args, + get_method_args, + nullcontext, + suppress_setattr, +) + +if six.PY3: + from contextlib import ExitStack +else: + from contextlib2 import ExitStack + + +if MYPY_RUNNING: + from optparse import Values + from requests import Session + from typing import ( + Any, + Callable, + Dict, + Generator, + Generic, + Iterator, + List, + Optional, + Tuple, + Type, + TypeVar, + Union, + ) + from .utils import TShimmedPath, TShim, TShimmedFunc + + TFinder = TypeVar("TFinder") + TResolver = TypeVar("TResolver") + TReqTracker = TypeVar("TReqTracker") + TLink = TypeVar("TLink") + TSession = TypeVar("TSession", bound=Session) + TCommand = TypeVar("TCommand", covariant=True) + TCommandInstance = TypeVar("TCommandInstance") + TCmdDict = Dict[str, Union[Tuple[str, str, str], TCommandInstance]] + TInstallRequirement = TypeVar("TInstallRequirement") + TShimmedCmdDict = Union[TShim, TCmdDict] + TWheelCache = TypeVar("TWheelCache") + TPreparer = TypeVar("TPreparer") + + +class SearchScope(object): + def __init__(self, find_links=None, index_urls=None): + self.index_urls = index_urls if index_urls else [] + self.find_links = find_links + + @classmethod + def create(cls, find_links=None, index_urls=None): + if not index_urls: + index_urls = ["https://pypi.org/simple"] + return cls(find_links=find_links, index_urls=index_urls) + + +class SelectionPreferences(object): + def __init__( + self, + allow_yanked=True, + allow_all_prereleases=False, + format_control=None, + prefer_binary=False, + ignore_requires_python=False, + ): + self.allow_yanked = allow_yanked + self.allow_all_prereleases = allow_all_prereleases + self.format_control = format_control + self.prefer_binary = prefer_binary + self.ignore_requires_python = ignore_requires_python + + +class TargetPython(object): + fallback_get_tags = None # type: Optional[TShimmedFunc] + + def __init__( + self, + platform=None, # type: Optional[str] + py_version_info=None, # type: Optional[Tuple[int, ...]] + abi=None, # type: Optional[str] + implementation=None, # type: Optional[str] + ): + # type: (...) -> None + self._given_py_version_info = py_version_info + if py_version_info is None: + py_version_info = sys.version_info[:3] + elif len(py_version_info) < 3: + py_version_info += (3 - len(py_version_info)) * (0,) + else: + py_version_info = py_version_info[:3] + py_version = ".".join(map(str, py_version_info[:2])) + self.abi = abi + self.implementation = implementation + self.platform = platform + self.py_version = py_version + self.py_version_info = py_version_info + self._valid_tags = None + + def get_tags(self): + if self._valid_tags is None and self.fallback_get_tags: + fallback_func = resolve_possible_shim(self.fallback_get_tags) + versions = None + if self._given_py_version_info: + versions = ["".join(map(str, self._given_py_version_info[:2]))] + self._valid_tags = fallback_func( + versions=versions, + platform=self.platform, + abi=self.abi, + impl=self.implementation, + ) + return self._valid_tags + + +class CandidatePreferences(object): + def __init__(self, prefer_binary=False, allow_all_prereleases=False): + self.prefer_binary = prefer_binary + self.allow_all_prereleases = allow_all_prereleases + + +class LinkCollector(object): + def __init__(self, session=None, search_scope=None): + self.session = session + self.search_scope = search_scope + + +class CandidateEvaluator(object): + @classmethod + def create( + cls, + project_name, # type: str + target_python=None, # type: Optional[TargetPython] + prefer_binary=False, # type: bool + allow_all_prereleases=False, # type: bool + specifier=None, # type: Optional[specifiers.BaseSpecifier] + hashes=None, # type: Optional[Any] + ): + if target_python is None: + target_python = TargetPython() + if specifier is None: + specifier = specifiers.SpecifierSet() + + supported_tags = target_python.get_tags() + + return cls( + project_name=project_name, + supported_tags=supported_tags, + specifier=specifier, + prefer_binary=prefer_binary, + allow_all_prereleases=allow_all_prereleases, + hashes=hashes, + ) + + def __init__( + self, + project_name, # type: str + supported_tags, # type: List[Any] + specifier, # type: specifiers.BaseSpecifier + prefer_binary=False, # type: bool + allow_all_prereleases=False, # type: bool + hashes=None, # type: Optional[Any] + ): + self._allow_all_prereleases = allow_all_prereleases + self._hashes = hashes + self._prefer_binary = prefer_binary + self._project_name = project_name + self._specifier = specifier + self._supported_tags = supported_tags + + +class LinkEvaluator(object): + def __init__( + self, + allow_yanked, + project_name, + canonical_name, + formats, + target_python, + ignore_requires_python=False, + ignore_compatibility=True, + ): + self._allow_yanked = allow_yanked + self._canonical_name = canonical_name + self._ignore_requires_python = ignore_requires_python + self._formats = formats + self._target_python = target_python + self._ignore_compatibility = ignore_compatibility + + self.project_name = project_name + + +def resolve_possible_shim(target): + # type: (TShimmedFunc) -> Optional[Union[Type, Callable]] + if target is None: + return target + if getattr(target, "shim", None) and isinstance( + target.shim, (types.MethodType, types.FunctionType) + ): + return target.shim() + return target + + +@contextlib.contextmanager +def temp_environ(): + """Allow the ability to set os.environ temporarily""" + environ = dict(os.environ) + try: + yield + finally: + os.environ.clear() + os.environ.update(environ) + + +@contextlib.contextmanager +def get_requirement_tracker(req_tracker_creator=None): + # type: (Optional[Callable]) -> Generator[Optional[TReqTracker], None, None] + root = os.environ.get("PIP_REQ_TRACKER") + if not req_tracker_creator: + yield None + else: + req_tracker_args = [] + _, required_args = get_method_args(req_tracker_creator.__init__) # type: ignore + with ExitStack() as ctx: + if root is None: + root = ctx.enter_context(TemporaryDirectory(prefix="req-tracker")).name + if root: + root = str(root) + ctx.enter_context(temp_environ()) + os.environ["PIP_REQ_TRACKER"] = root + if required_args is not None and "root" in required_args: + req_tracker_args.append(root) + with req_tracker_creator(*req_tracker_args) as tracker: + yield tracker + + +@contextlib.contextmanager +def ensure_resolution_dirs(**kwargs): + # type: (Any) -> Iterator[Dict[str, Any]] + """ + Ensures that the proper directories are scaffolded and present in the provided kwargs + for performing dependency resolution via pip. + + :return: A new kwargs dictionary with scaffolded directories for **build_dir**, **src_dir**, + **download_dir**, and **wheel_download_dir** added to the key value pairs. + :rtype: Dict[str, Any] + """ + keys = ("build_dir", "src_dir", "download_dir", "wheel_download_dir") + if not any(kwargs.get(key) is None for key in keys): + yield kwargs + else: + with TemporaryDirectory(prefix="pip-shims-") as base_dir: + for key in keys: + if kwargs.get(key) is not None: + continue + target = os.path.join(base_dir.name, key) + os.makedirs(target) + kwargs[key] = target + yield kwargs + + +def partial_command(shimmed_path, cmd_mapping=None): + # type: (Type, Optional[TShimmedCmdDict]) -> Union[Type[TCommandInstance], functools.partial] + """ + Maps a default set of arguments across all members of a + :class:`~pip_shims.models.ShimmedPath` instance, specifically for + :class:`~pip._internal.command.Command` instances which need + `summary` and `name` arguments. + + :param :class:`~pip_shims.models.ShimmedPath` shimmed_path: A + :class:`~pip_shims.models.ShimmedCollection` instance + :param Any cmd_mapping: A reference to use for mapping against, e.g. an + import that depends on pip also + :return: A dictionary mapping new arguments to their default values + :rtype: Dict[str, str] + """ + basecls = shimmed_path.shim() + resolved_cmd_mapping = None # type: Optional[Dict[str, Any]] + cmd_mapping = resolve_possible_shim(cmd_mapping) + if cmd_mapping is not None and isinstance(cmd_mapping, dict): + resolved_cmd_mapping = cmd_mapping.copy() + base_args = [] # type: List[str] + for root_cls in basecls.mro(): + if root_cls.__name__ == "Command": + _, root_init_args = get_method_args(root_cls.__init__) + if root_init_args is not None: + base_args = root_init_args.args + needs_name_and_summary = any(arg in base_args for arg in ("name", "summary")) + if not needs_name_and_summary: + basecls.name = shimmed_path.name + return basecls + elif ( + not resolved_cmd_mapping + and needs_name_and_summary + and getattr(functools, "partialmethod", None) + ): + new_init = functools.partial( + basecls.__init__, name=shimmed_path.name, summary="Summary" + ) + basecls.__init__ = new_init + result = basecls + assert resolved_cmd_mapping is not None + for command_name, command_info in resolved_cmd_mapping.items(): + if getattr(command_info, "class_name", None) == shimmed_path.name: + summary = getattr(command_info, "summary", "Command summary") + result = functools.partial(basecls, command_name, summary) + break + return result + + +def get_session( + install_cmd_provider=None, # type: Optional[TShimmedFunc] + install_cmd=None, # type: TCommandInstance + options=None, # type: Optional[Values] +): + # type: (...) -> TSession + session = None # type: Optional[TSession] + if install_cmd is None: + assert install_cmd_provider is not None + install_cmd_provider = resolve_possible_shim(install_cmd_provider) + assert isinstance(install_cmd_provider, (type, functools.partial)) + install_cmd = install_cmd_provider() + if options is None: + options = install_cmd.parser.parse_args([]) # type: ignore + session = install_cmd._build_session(options) # type: ignore + assert session is not None + atexit.register(session.close) + return session + + +def populate_options( + install_command=None, # type: TCommandInstance + options=None, # type: Optional[Values] + **kwargs # type: Any +): + # (...) -> Tuple[Dict[str, Any], Values] + results = {} + if install_command is None and options is None: + raise TypeError("Must pass either options or InstallCommand to populate options") + if options is None and install_command is not None: + options, _ = install_command.parser.parse_args([]) # type: ignore + options_dict = options.__dict__ + for provided_key, provided_value in kwargs.items(): + if provided_key == "isolated": + options_key = "isolated_mode" + elif provided_key == "source_dir": + options_key = "src_dir" + else: + options_key = provided_key + if provided_key in options_dict and provided_value is not None: + setattr(options, options_key, provided_value) + results[provided_key] = provided_value + elif getattr(options, options_key, None) is not None: + results[provided_key] = getattr(options, options_key) + else: + results[provided_key] = provided_value + return results, options + + +def get_requirement_set( + install_command=None, # type: Optional[TCommandInstance] + req_set_provider=None, # type: Optional[TShimmedFunc] + build_dir=None, # type: Optional[str] + src_dir=None, # type: Optional[str] + download_dir=None, # type: Optional[str] + wheel_download_dir=None, # type: Optional[str] + session=None, # type: Optional[TSession] + wheel_cache=None, # type: Optional[TWheelCache] + upgrade=False, # type: bool + upgrade_strategy=None, # type: Optional[str] + ignore_installed=False, # type: bool + ignore_dependencies=False, # type: bool + force_reinstall=False, # type: bool + use_user_site=False, # type: bool + isolated=False, # type: bool + ignore_requires_python=False, # type: bool + require_hashes=None, # type: bool + cache_dir=None, # type: Optional[str] + options=None, # type: Optional[Values] + install_cmd_provider=None, # type: Optional[TShimmedFunc] +): + # (...) -> TRequirementSet + """ + Creates a requirement set from the supplied parameters. + + Not all parameters are passed through for all pip versions, but any + invalid parameters will be ignored if they are not needed to generate a + requirement set on the current pip version. + + :param install_command: A :class:`~pip._internal.commands.install.InstallCommand` + instance which is used to generate the finder. + :param :class:`~pip_shims.models.ShimmedPathCollection` req_set_provider: A provider + to build requirement set instances. + :param str build_dir: The directory to build requirements in. Removed in pip 10, + defeaults to None + :param str source_dir: The directory to use for source requirements. Removed in + pip 10, defaults to None + :param str download_dir: The directory to download requirement artifacts to. Removed + in pip 10, defaults to None + :param str wheel_download_dir: The directory to download wheels to. Removed in pip + 10, defaults ot None + :param :class:`~requests.Session` session: The pip session to use. Removed in pip 10, + defaults to None + :param WheelCache wheel_cache: The pip WheelCache instance to use for caching wheels. + Removed in pip 10, defaults to None + :param bool upgrade: Whether to try to upgrade existing requirements. Removed in pip + 10, defaults to False. + :param str upgrade_strategy: The upgrade strategy to use, e.g. "only-if-needed". + Removed in pip 10, defaults to None. + :param bool ignore_installed: Whether to ignore installed packages when resolving. + Removed in pip 10, defaults to False. + :param bool ignore_dependencies: Whether to ignore dependencies of requirements + when resolving. Removed in pip 10, defaults to False. + :param bool force_reinstall: Whether to force reinstall of packages when resolving. + Removed in pip 10, defaults to False. + :param bool use_user_site: Whether to use user site packages when resolving. Removed + in pip 10, defaults to False. + :param bool isolated: Whether to resolve in isolation. Removed in pip 10, defaults + to False. + :param bool ignore_requires_python: Removed in pip 10, defaults to False. + :param bool require_hashes: Whether to require hashes when resolving. Defaults to + False. + :param Values options: An :class:`~optparse.Values` instance from an install cmd + :param install_cmd_provider: A shim for providing new install command instances. + :type install_cmd_provider: :class:`~pip_shims.models.ShimmedPathCollection` + :return: A new requirement set instance + :rtype: :class:`~pip._internal.req.req_set.RequirementSet` + """ + req_set_provider = resolve_possible_shim(req_set_provider) + if install_command is None: + install_cmd_provider = resolve_possible_shim(install_cmd_provider) + assert isinstance(install_cmd_provider, (type, functools.partial)) + install_command = install_cmd_provider() + required_args = inspect.getargs( + req_set_provider.__init__.__code__ + ).args # type: ignore + results, options = populate_options( + install_command, + options, + build_dir=build_dir, + src_dir=src_dir, + download_dir=download_dir, + upgrade=upgrade, + upgrade_strategy=upgrade_strategy, + ignore_installed=ignore_installed, + ignore_dependencies=ignore_dependencies, + force_reinstall=force_reinstall, + use_user_site=use_user_site, + isolated=isolated, + ignore_requires_python=ignore_requires_python, + require_hashes=require_hashes, + cache_dir=cache_dir, + ) + if session is None and "session" in required_args: + session = get_session(install_cmd=install_command, options=options) + results["wheel_cache"] = wheel_cache + results["session"] = session + results["wheel_download_dir"] = wheel_download_dir + return call_function_with_correct_args(req_set_provider, **results) + + +def get_package_finder( + install_cmd=None, # type: Optional[TCommand] + options=None, # type: Optional[Values] + session=None, # type: Optional[TSession] + platform=None, # type: Optional[str] + python_versions=None, # type: Optional[Tuple[str, ...]] + abi=None, # type: Optional[str] + implementation=None, # type: Optional[str] + target_python=None, # type: Optional[Any] + ignore_requires_python=None, # type: Optional[bool] + target_python_builder=None, # type: Optional[TShimmedFunc] + install_cmd_provider=None, # type: Optional[TShimmedFunc] +): + # type: (...) -> TFinder + """Shim for compatibility to generate package finders. + + Build and return a :class:`~pip._internal.index.package_finder.PackageFinder` + instance using the :class:`~pip._internal.commands.install.InstallCommand` helper + method to construct the finder, shimmed with backports as needed for compatibility. + + :param install_cmd_provider: A shim for providing new install command instances. + :type install_cmd_provider: :class:`~pip_shims.models.ShimmedPathCollection` + :param install_cmd: A :class:`~pip._internal.commands.install.InstallCommand` + instance which is used to generate the finder. + :param optparse.Values options: An optional :class:`optparse.Values` instance + generated by calling `install_cmd.parser.parse_args()` typically. + :param session: An optional session instance, can be created by the `install_cmd`. + :param Optional[str] platform: An optional platform string, e.g. linux_x86_64 + :param Optional[Tuple[str, ...]] python_versions: A tuple of 2-digit strings + representing python versions, e.g. ("27", "35", "36", "37"...) + :param Optional[str] abi: The target abi to support, e.g. "cp38" + :param Optional[str] implementation: An optional implementation string for limiting + searches to a specific implementation, e.g. "cp" or "py" + :param target_python: A :class:`~pip._internal.models.target_python.TargetPython` + instance (will be translated to alternate arguments if necessary on incompatible + pip versions). + :param Optional[bool] ignore_requires_python: Whether to ignore `requires_python` + on resulting candidates, only valid after pip version 19.3.1 + :param target_python_builder: A 'TargetPython' builder (e.g. the class itself, + uninstantiated) + :return: A :class:`pip._internal.index.package_finder.PackageFinder` instance + :rtype: :class:`pip._internal.index.package_finder.PackageFinder` + + :Example: + + >>> from pip_shims.shims import InstallCommand, get_package_finder + >>> install_cmd = InstallCommand() + >>> finder = get_package_finder( + ... install_cmd, python_versions=("27", "35", "36", "37", "38"), implementation=" + cp" + ... ) + >>> candidates = finder.find_all_candidates("requests") + >>> requests_222 = next(iter(c for c in candidates if c.version.public == "2.22.0")) + >>> requests_222 + <InstallationCandidate('requests', <Version('2.22.0')>, <Link https://files.pythonhos + ted.org/packages/51/bd/23c926cd341ea6b7dd0b2a00aba99ae0f828be89d72b2190f27c11d4b7fb/r + equests-2.22.0-py2.py3-none-any.whl#sha256=9cf5292fcd0f598c671cfc1e0d7d1a7f13bb8085e9 + a590f48c010551dc6c4b31 (from https://pypi.org/simple/requests/) (requires-python:>=2. + 7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*)>)> + """ + if install_cmd is None: + install_cmd_provider = resolve_possible_shim(install_cmd_provider) + assert isinstance(install_cmd_provider, (type, functools.partial)) + install_cmd = install_cmd_provider() + if options is None: + options, _ = install_cmd.parser.parse_args([]) # type: ignore + if session is None: + session = get_session(install_cmd=install_cmd, options=options) # type: ignore + builder_args = inspect.getargs( + install_cmd._build_package_finder.__code__ + ) # type: ignore + build_kwargs = {"options": options, "session": session} + expects_targetpython = "target_python" in builder_args.args + received_python = any(arg for arg in [platform, python_versions, abi, implementation]) + if expects_targetpython and received_python and not target_python: + if target_python_builder is None: + target_python_builder = TargetPython + py_version_info = None + if python_versions: + py_version_info_python = max(python_versions) + py_version_info = tuple([int(part) for part in py_version_info_python]) + target_python = target_python_builder( + platform=platform, + abi=abi, + implementation=implementation, + py_version_info=py_version_info, + ) + build_kwargs["target_python"] = target_python + elif any( + arg in builder_args.args + for arg in ["platform", "python_versions", "abi", "implementation"] + ): + if target_python and not received_python: + tags = target_python.get_tags() + version_impl = set([t[0] for t in tags]) + # impls = set([v[:2] for v in version_impl]) + # impls.remove("py") + # impl = next(iter(impls), "py") if not target_python + versions = set([v[2:] for v in version_impl]) + build_kwargs.update( + { + "platform": target_python.platform, + "python_versions": versions, + "abi": target_python.abi, + "implementation": target_python.implementation, + } + ) + if ( + ignore_requires_python is not None + and "ignore_requires_python" in builder_args.args + ): + build_kwargs["ignore_requires_python"] = ignore_requires_python + return install_cmd._build_package_finder(**build_kwargs) # type: ignore + + +def shim_unpack( + unpack_fn, # type: TShimmedFunc + download_dir, # type str + ireq=None, # type: Optional[Any] + link=None, # type: Optional[Any] + location=None, # type Optional[str], + hashes=None, # type: Optional[Any] + progress_bar="off", # type: str + only_download=None, # type: Optional[bool] + session=None, # type: Optional[Any] +): + # (...) -> None + """ + Accepts all parameters that have been valid to pass + to :func:`pip._internal.download.unpack_url` and selects or + drops parameters as needed before invoking the provided + callable. + + :param unpack_fn: A callable or shim referring to the pip implementation + :type unpack_fn: Callable + :param str download_dir: The directory to download the file to + :param Optional[:class:`~pip._internal.req.req_install.InstallRequirement`] ireq: + an Install Requirement instance, defaults to None + :param Optional[:class:`~pip._internal.models.link.Link`] link: A Link instance, + defaults to None. + :param Optional[str] location: A location or source directory if the target is + a VCS url, defaults to None. + :param Optional[Any] hashes: A Hashes instance, defaults to None + :param str progress_bar: Indicates progress par usage during download, defatuls to + off. + :param Optional[bool] only_download: Whether to skip install, defaults to None. + :param Optional[`~requests.Session`] session: A PipSession instance, defaults to + None. + :return: The result of unpacking the url. + :rtype: None + """ + unpack_fn = resolve_possible_shim(unpack_fn) + required_args = inspect.getargs(unpack_fn.__code__).args # type: ignore + unpack_kwargs = {"download_dir": download_dir} + if ireq: + if not link and ireq.link: + link = ireq.link + if only_download is None: + only_download = ireq.is_wheel + if hashes is None: + hashes = ireq.hashes(True) + if location is None and getattr(ireq, "source_dir", None): + location = ireq.source_dir + unpack_kwargs.update({"link": link, "location": location}) + if hashes is not None and "hashes" in required_args: + unpack_kwargs["hashes"] = hashes + if "progress_bar" in required_args: + unpack_kwargs["progress_bar"] = progress_bar + if only_download is not None and "only_download" in required_args: + unpack_kwargs["only_download"] = only_download + if session is not None and "session" in required_args: + unpack_kwargs["session"] = session + return unpack_fn(**unpack_kwargs) # type: ignore + + +@contextlib.contextmanager +def make_preparer( + preparer_fn, # type: TShimmedFunc + req_tracker_fn=None, # type: Optional[TShimmedFunc] + build_dir=None, # type: Optional[str] + src_dir=None, # type: Optional[str] + download_dir=None, # type: Optional[str] + wheel_download_dir=None, # type: Optional[str] + progress_bar="off", # type: str + build_isolation=False, # type: bool + session=None, # type: Optional[TSession] + finder=None, # type: Optional[TFinder] + options=None, # type: Optional[Values] + require_hashes=None, # type: Optional[bool] + use_user_site=None, # type: Optional[bool] + req_tracker=None, # type: Optional[Union[TReqTracker, TShimmedFunc]] + install_cmd_provider=None, # type: Optional[TShimmedFunc] + install_cmd=None, # type: Optional[TCommandInstance] +): + # (...) -> ContextManager + """ + Creates a requirement preparer for preparing pip requirements. + + Provides a compatibilty shim that accepts all previously valid arguments and + discards any that are no longer used. + + :raises TypeError: No requirement tracker provided and one cannot be generated + :raises TypeError: No valid sessions provided and one cannot be generated + :raises TypeError: No valid finders provided and one cannot be generated + :param TShimmedFunc preparer_fn: Callable or shim for generating preparers. + :param Optional[TShimmedFunc] req_tracker_fn: Callable or shim for generating + requirement trackers, defualts to None + :param Optional[str] build_dir: Directory for building packages and wheels, + defaults to None + :param Optional[str] src_dir: Directory to find or extract source files, defaults + to None + :param Optional[str] download_dir: Target directory to download files, defaults to + None + :param Optional[str] wheel_download_dir: Target directoryto download wheels, defaults + to None + :param str progress_bar: Whether to display a progress bar, defaults to off + :param bool build_isolation: Whether to build requirements in isolation, defaults + to False + :param Optional[TSession] session: Existing session to use for getting requirements, + defaults to None + :param Optional[TFinder] finder: The package finder to use during resolution, + defaults to None + :param Optional[Values] options: Pip options to use if needed, defaults to None + :param Optional[bool] require_hashes: Whether to require hashes for preparation + :param Optional[bool] use_user_site: Whether to use the user site directory for + preparing requirements + :param Optional[Union[TReqTracker, TShimmedFunc]] req_tracker: The requirement + tracker to use for building packages, defaults to None + :param Optional[TCommandInstance] install_cmd: The install command used to create + the finder, session, and options if needed, defaults to None + :yield: A new requirement preparer instance + :rtype: ContextManager[:class:`~pip._internal.operations.prepare.RequirementPreparer`] + + :Example: + + >>> from pip_shims.shims import ( + ... InstallCommand, get_package_finder, make_preparer, get_requirement_tracker + ... ) + >>> install_cmd = InstallCommand() + >>> pip_options, _ = install_cmd.parser.parse_args([]) + >>> session = install_cmd._build_session(pip_options) + >>> finder = get_package_finder( + ... install_cmd, session=session, options=pip_options + ... ) + >>> with make_preparer( + ... options=pip_options, finder=finder, session=session, install_cmd=ic + ... ) as preparer: + ... print(preparer) + <pip._internal.operations.prepare.RequirementPreparer object at 0x7f8a2734be80> + """ + preparer_fn = resolve_possible_shim(preparer_fn) + required_args = inspect.getargs(preparer_fn.__init__.__code__).args # type: ignore + if not req_tracker and not req_tracker_fn and "req_tracker" in required_args: + raise TypeError("No requirement tracker and no req tracker generator found!") + req_tracker_fn = resolve_possible_shim(req_tracker_fn) + pip_options_created = options is None + session_is_required = "session" in required_args + finder_is_required = "finder" in required_args + options_map = { + "src_dir": src_dir, + "download_dir": download_dir, + "wheel_download_dir": wheel_download_dir, + "build_dir": build_dir, + "progress_bar": progress_bar, + "build_isolation": build_isolation, + "require_hashes": require_hashes, + "use_user_site": use_user_site, + } + if install_cmd is None: + assert install_cmd_provider is not None + install_cmd_provider = resolve_possible_shim(install_cmd_provider) + assert isinstance(install_cmd_provider, (type, functools.partial)) + install_cmd = install_cmd_provider() + preparer_args, options = populate_options(install_cmd, options, **options_map) + if options is not None and pip_options_created: + for k, v in options_map.items(): + suppress_setattr(options, k, v, filter_none=True) + if all([session is None, install_cmd is None, session_is_required]): + raise TypeError( + "Preparer requires a session instance which was not supplied and cannot be " + "created without an InstallCommand." + ) + elif all([session is None, session_is_required]): + session = get_session(install_cmd=install_cmd, options=options) + if all([finder is None, install_cmd is None, finder_is_required]): + raise TypeError( + "RequirementPreparer requires a packagefinder but no InstallCommand" + " was provided to build one and none was passed in." + ) + elif all([finder is None, finder_is_required]): + finder = get_package_finder(install_cmd, options=options, session=session) + preparer_args.update({"finder": finder, "session": session}) + req_tracker_fn = nullcontext if not req_tracker_fn else req_tracker_fn + with req_tracker_fn() as tracker_ctx: + if "req_tracker" in required_args: + req_tracker = tracker_ctx if req_tracker is None else req_tracker + preparer_args["req_tracker"] = req_tracker + + result = call_function_with_correct_args(preparer_fn, **preparer_args) + yield result + + +def get_resolver( + resolver_fn, # type: TShimmedFunc + install_req_provider=None, # type: Optional[TShimmedFunc] + format_control_provider=None, # type: Optional[TShimmedFunc] + wheel_cache_provider=None, # type: Optional[TShimmedFunc] + finder=None, # type: Optional[TFinder] + upgrade_strategy="to-satisfy-only", # type: str + force_reinstall=None, # type: Optional[bool] + ignore_dependencies=None, # type: Optional[bool] + ignore_requires_python=None, # type: Optional[bool] + ignore_installed=True, # type: bool + use_user_site=False, # type: bool + isolated=None, # type: Optional[bool] + wheel_cache=None, # type: Optional[TWheelCache] + preparer=None, # type: Optional[TPreparer] + session=None, # type: Optional[TSession] + options=None, # type: Optional[Values] + make_install_req=None, # type: Optional[Callable] + install_cmd_provider=None, # type: Optional[TShimmedFunc] + install_cmd=None, # type: Optional[TCommandInstance] +): + # (...) -> TResolver + """ + A resolver creation compatibility shim for generating a resolver. + + Consumes any argument that was previously used to instantiate a + resolver, discards anything that is no longer valid. + + .. note:: This is only valid for **pip >= 10.0.0** + + :raises ValueError: A session is required but not provided and one cannot be created + :raises ValueError: A finder is required but not provided and one cannot be created + :raises ValueError: An install requirement provider is required and has not been + provided + :param TShimmedFunc resolver_fn: The resolver function used to create new resolver + instances. + :param TShimmedFunc install_req_provider: The provider function to use to generate + install requirements if needed. + :param TShimmedFunc format_control_provider: The provider function to use to generate + a format_control instance if needed. + :param TShimmedFunc wheel_cache_provider: The provider function to use to generate + a wheel cache if needed. + :param Optional[TFinder] finder: The package finder to use during resolution, + defaults to None. + :param str upgrade_strategy: Upgrade strategy to use, defaults to ``only-if-needed``. + :param Optional[bool] force_reinstall: Whether to simulate or assume package + reinstallation during resolution, defaults to None + :param Optional[bool] ignore_dependencies: Whether to ignore package dependencies, + defaults to None + :param Optional[bool] ignore_requires_python: Whether to ignore indicated + required_python versions on packages, defaults to None + :param bool ignore_installed: Whether to ignore installed packages during resolution, + defaults to True + :param bool use_user_site: Whether to use the user site location during resolution, + defaults to False + :param Optional[bool] isolated: Whether to isolate the resolution process, defaults + to None + :param Optional[TWheelCache] wheel_cache: The wheel cache to use, defaults to None + :param Optional[TPreparer] preparer: The requirement preparer to use, defaults to + None + :param Optional[TSession] session: Existing session to use for getting requirements, + defaults to None + :param Optional[Values] options: Pip options to use if needed, defaults to None + :param Optional[functools.partial] make_install_req: The partial function to pass in + to the resolver for actually generating install requirements, if necessary + :param Optional[TCommandInstance] install_cmd: The install command used to create + the finder, session, and options if needed, defaults to None. + :return: A new resolver instance. + :rtype: :class:`~pip._internal.legacy_resolve.Resolver` + + :Example: + + >>> import os + >>> from tempdir import TemporaryDirectory + >>> from pip_shims.shims import ( + ... InstallCommand, get_package_finder, make_preparer, get_requirement_tracker, + ... get_resolver, InstallRequirement, RequirementSet + ... ) + >>> install_cmd = InstallCommand() + >>> pip_options, _ = install_cmd.parser.parse_args([]) + >>> session = install_cmd._build_session(pip_options) + >>> finder = get_package_finder( + ... install_cmd, session=session, options=pip_options + ... ) + >>> wheel_cache = WheelCache(USER_CACHE_DIR, FormatControl(None, None)) + >>> with TemporaryDirectory() as temp_base: + ... reqset = RequirementSet() + ... ireq = InstallRequirement.from_line("requests") + ... ireq.is_direct = True + ... build_dir = os.path.join(temp_base, "build") + ... src_dir = os.path.join(temp_base, "src") + ... ireq.build_location(build_dir) + ... with make_preparer( + ... options=pip_options, finder=finder, session=session, + ... build_dir=build_dir, install_cmd=install_cmd, + ... ) as preparer: + ... resolver = get_resolver( + ... finder=finder, ignore_dependencies=False, ignore_requires_python=True, + ... preparer=preparer, session=session, options=pip_options, + ... install_cmd=install_cmd, wheel_cache=wheel_cache, + ... ) + ... resolver.require_hashes = False + ... reqset.add_requirement(ireq) + ... results = resolver.resolve(reqset) + ... #reqset.cleanup_files() + ... for result_req in reqset.requirements: + ... print(result_req) + requests + chardet + certifi + urllib3 + idna + """ + resolver_fn = resolve_possible_shim(resolver_fn) + install_req_provider = resolve_possible_shim(install_req_provider) + format_control_provider = resolve_possible_shim(format_control_provider) + wheel_cache_provider = resolve_possible_shim(wheel_cache_provider) + install_cmd_provider = resolve_possible_shim(install_cmd_provider) + required_args = inspect.getargs(resolver_fn.__init__.__code__).args # type: ignore + install_cmd_dependency_map = {"session": session, "finder": finder} + resolver_kwargs = {} # type: Dict[str, Any] + if install_cmd is None: + assert isinstance(install_cmd_provider, (type, functools.partial)) + install_cmd = install_cmd_provider() + if options is None and install_cmd is not None: + options = install_cmd.parser.parse_args([]) # type: ignore + for arg, val in install_cmd_dependency_map.items(): + if arg not in required_args: + continue + elif val is None and install_cmd is None: + raise TypeError( + "Preparer requires a {0} but did not receive one " + "and cannot generate one".format(arg) + ) + elif arg == "session" and val is None: + val = get_session(install_cmd=install_cmd, options=options) + elif arg == "finder" and val is None: + val = get_package_finder(install_cmd, options=options, session=session) + resolver_kwargs[arg] = val + if "make_install_req" in required_args: + if make_install_req is None and install_req_provider is not None: + make_install_req = functools.partial( + install_req_provider, + isolated=isolated, + wheel_cache=wheel_cache, + # use_pep517=use_pep517, + ) + assert make_install_req is not None + resolver_kwargs["make_install_req"] = make_install_req + if "isolated" in required_args: + resolver_kwargs["isolated"] = isolated + if "wheel_cache" in required_args: + if wheel_cache is None and wheel_cache_provider is not None: + cache_dir = getattr(options, "cache_dir", None) + format_control = getattr( + options, + "format_control", + format_control_provider(None, None), # type: ignore + ) + wheel_cache = wheel_cache_provider(cache_dir, format_control) + resolver_kwargs["wheel_cache"] = wheel_cache + resolver_kwargs.update( + { + "upgrade_strategy": upgrade_strategy, + "force_reinstall": force_reinstall, + "ignore_dependencies": ignore_dependencies, + "ignore_requires_python": ignore_requires_python, + "ignore_installed": ignore_installed, + "use_user_site": use_user_site, + "preparer": preparer, + } + ) + return resolver_fn(**resolver_kwargs) # type: ignore + + +def resolve( + ireq, # type: TInstallRequirement + reqset_provider=None, # type: Optional[TShimmedFunc] + req_tracker_provider=None, # type: Optional[TShimmedFunc] + install_cmd_provider=None, # type: Optional[TShimmedFunc] + install_command=None, # type: Optional[TCommand] + finder_provider=None, # type: Optional[TShimmedFunc] + resolver_provider=None, # type: Optional[TShimmedFunc] + wheel_cache_provider=None, # type: Optional[TShimmedFunc] + format_control_provider=None, # type: Optional[TShimmedFunc] + make_preparer_provider=None, # type: Optional[TShimmedFunc] + options=None, # type: Optional[Values] + session=None, # type: Optional[TSession] + resolver=None, # type: Optional[TResolver] + finder=None, # type: Optional[TFinder] + upgrade_strategy="to-satisfy-only", # type: str + force_reinstall=None, # type: Optional[bool] + ignore_dependencies=None, # type: Optional[bool] + ignore_requires_python=None, # type: Optional[bool] + ignore_installed=True, # type: bool + use_user_site=False, # type: bool + isolated=None, # type: Optional[bool] + build_dir=None, # type: Optional[str] + source_dir=None, # type: Optional[str] + download_dir=None, # type: Optional[str] + cache_dir=None, # type: Optional[str] + wheel_download_dir=None, # type: Optional[str] + wheel_cache=None, # type: Optional[TWheelCache] + require_hashes=None, # type: bool +): + # (...) -> Set[TInstallRequirement] + """ + Resolves the provided **InstallRequirement**, returning a dictionary. + + Maps a dictionary of names to corresponding ``InstallRequirement`` values. + + :param :class:`~pip._internal.req.req_install.InstallRequirement` ireq: An + InstallRequirement to initiate the resolution process + :param :class:`~pip_shims.models.ShimmedPathCollection` reqset_provider: A provider + to build requirement set instances. + :param :class:`~pip_shims.models.ShimmedPathCollection` req_tracker_provider: A + provider to build requirement tracker instances + :param install_cmd_provider: A shim for providing new install command instances. + :type install_cmd_provider: :class:`~pip_shims.models.ShimmedPathCollection` + :param Optional[TCommandInstance] install_command: The install command used to + create the finder, session, and options if needed, defaults to None. + :param :class:`~pip_shims.models.ShimmedPathCollection` finder_provider: A provider + to package finder instances. + :param :class:`~pip_shims.models.ShimmedPathCollection` resolver_provider: A provider + to build resolver instances + :param TShimmedFunc wheel_cache_provider: The provider function to use to generate a + wheel cache if needed. + :param TShimmedFunc format_control_provider: The provider function to use to generate + a format_control instance if needed. + :param TShimmedFunc make_preparer_provider: Callable or shim for generating preparers. + :param Optional[Values] options: Pip options to use if needed, defaults to None + :param Optional[TSession] session: Existing session to use for getting requirements, + defaults to None + :param :class:`~pip._internal.legacy_resolve.Resolver` resolver: A pre-existing + resolver instance to use for resolution + :param Optional[TFinder] finder: The package finder to use during resolution, + defaults to None. + :param str upgrade_strategy: Upgrade strategy to use, defaults to ``only-if-needed``. + :param Optional[bool] force_reinstall: Whether to simulate or assume package + reinstallation during resolution, defaults to None + :param Optional[bool] ignore_dependencies: Whether to ignore package dependencies, + defaults to None + :param Optional[bool] ignore_requires_python: Whether to ignore indicated + required_python versions on packages, defaults to None + :param bool ignore_installed: Whether to ignore installed packages during + resolution, defaults to True + :param bool use_user_site: Whether to use the user site location during resolution, + defaults to False + :param Optional[bool] isolated: Whether to isolate the resolution process, defaults + to None + :param Optional[str] build_dir: Directory for building packages and wheels, defaults + to None + :param str source_dir: The directory to use for source requirements. Removed in pip + 10, defaults to None + :param Optional[str] download_dir: Target directory to download files, defaults to + None + :param str cache_dir: The cache directory to use for caching artifacts during + resolution + :param Optional[str] wheel_download_dir: Target directoryto download wheels, defaults + to None + :param Optional[TWheelCache] wheel_cache: The wheel cache to use, defaults to None + :param bool require_hashes: Whether to require hashes when resolving. Defaults to + False. + :return: A dictionary mapping requirements to corresponding + :class:`~pip._internal.req.req_install.InstallRequirement`s + :rtype: :class:`~pip._internal.req.req_install.InstallRequirement` + + :Example: + + >>> from pip_shims.shims import resolve, InstallRequirement + >>> ireq = InstallRequirement.from_line("requests>=2.20") + >>> results = resolve(ireq) + >>> for k, v in results.items(): + ... print("{0}: {1!r}".format(k, v)) + requests: <InstallRequirement object: requests>=2.20 from https://files.pythonhosted. + org/packages/51/bd/23c926cd341ea6b7dd0b2a00aba99ae0f828be89d72b2190f27c11d4b7fb/reque + sts-2.22.0-py2.py3-none-any.whl#sha256=9cf5292fcd0f598c671cfc1e0d7d1a7f13bb8085e9a590 + f48c010551dc6c4b31 editable=False> + idna: <InstallRequirement object: idna<2.9,>=2.5 from https://files.pythonhosted.org/ + packages/14/2c/cd551d81dbe15200be1cf41cd03869a46fe7226e7450af7a6545bfc474c9/idna-2.8- + py2.py3-none-any.whl#sha256=ea8b7f6188e6fa117537c3df7da9fc686d485087abf6ac197f9c46432 + f7e4a3c (from requests>=2.20) editable=False> + urllib3: <InstallRequirement object: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 from htt + ps://files.pythonhosted.org/packages/b4/40/a9837291310ee1ccc242ceb6ebfd9eb21539649f19 + 3a7c8c86ba15b98539/urllib3-1.25.7-py2.py3-none-any.whl#sha256=a8a318824cc77d1fd4b2bec + 2ded92646630d7fe8619497b142c84a9e6f5a7293 (from requests>=2.20) editable=False> + chardet: <InstallRequirement object: chardet<3.1.0,>=3.0.2 from https://files.pythonh + osted.org/packages/bc/a9/01ffebfb562e4274b6487b4bb1ddec7ca55ec7510b22e4c51f14098443b8 + /chardet-3.0.4-py2.py3-none-any.whl#sha256=fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed + 4531e3e15460124c106691 (from requests>=2.20) editable=False> + certifi: <InstallRequirement object: certifi>=2017.4.17 from https://files.pythonhost + ed.org/packages/18/b0/8146a4f8dd402f60744fa380bc73ca47303cccf8b9190fd16a827281eac2/ce + rtifi-2019.9.11-py2.py3-none-any.whl#sha256=fd7c7c74727ddcf00e9acd26bba8da604ffec95bf + 1c2144e67aff7a8b50e6cef (from requests>=2.20) editable=False> + """ + reqset_provider = resolve_possible_shim(reqset_provider) + finder_provider = resolve_possible_shim(finder_provider) + resolver_provider = resolve_possible_shim(resolver_provider) + wheel_cache_provider = resolve_possible_shim(wheel_cache_provider) + format_control_provider = resolve_possible_shim(format_control_provider) + make_preparer_provider = resolve_possible_shim(make_preparer_provider) + req_tracker_provider = resolve_possible_shim(req_tracker_provider) + install_cmd_provider = resolve_possible_shim(install_cmd_provider) + if install_command is None: + assert isinstance(install_cmd_provider, (type, functools.partial)) + install_command = install_cmd_provider() + kwarg_map = { + "upgrade_strategy": upgrade_strategy, + "force_reinstall": force_reinstall, + "ignore_dependencies": ignore_dependencies, + "ignore_requires_python": ignore_requires_python, + "ignore_installed": ignore_installed, + "use_user_site": use_user_site, + "isolated": isolated, + "build_dir": build_dir, + "src_dir": source_dir, + "download_dir": download_dir, + "require_hashes": require_hashes, + "cache_dir": cache_dir, + } + kwargs, options = populate_options(install_command, options, **kwarg_map) + with ExitStack() as ctx: + kwargs = ctx.enter_context( + ensure_resolution_dirs(wheel_download_dir=wheel_download_dir, **kwargs) + ) + wheel_download_dir = kwargs.pop("wheel_download_dir") + if session is None: + session = get_session(install_cmd=install_command, options=options) + if finder is None: + finder = finder_provider( + install_command, options=options, session=session + ) # type: ignore + format_control = getattr(options, "format_control", None) + if not format_control: + format_control = format_control_provider(None, None) # type: ignore + wheel_cache = wheel_cache_provider( + kwargs["cache_dir"], format_control + ) # type: ignore + ireq.is_direct = True # type: ignore + ireq.build_location(kwargs["build_dir"]) # type: ignore + if reqset_provider is None: + raise TypeError( + "cannot resolve without a requirement set provider... failed!" + ) + reqset = reqset_provider( + install_command, + options=options, + session=session, + wheel_download_dir=wheel_download_dir, + **kwargs + ) # type: ignore + if getattr(reqset, "prepare_files", None): + reqset.add_requirement(ireq) + results = reqset.prepare_files(finder) + result = reqset.requirements + reqset.cleanup_files() + return result + if make_preparer_provider is None: + raise TypeError("Cannot create requirement preparer, cannot resolve!") + + preparer_args = { + "build_dir": kwargs["build_dir"], + "src_dir": kwargs["src_dir"], + "download_dir": kwargs["download_dir"], + "wheel_download_dir": wheel_download_dir, + "build_isolation": kwargs["isolated"], + "install_cmd": install_command, + "options": options, + "finder": finder, + "session": session, + "use_user_site": use_user_site, + "require_hashes": require_hashes, + } + # with req_tracker_provider() as req_tracker: + if isinstance(req_tracker_provider, (types.FunctionType, functools.partial)): + preparer_args["req_tracker"] = ctx.enter_context(req_tracker_provider()) + resolver_keys = [ + "upgrade_strategy", + "force_reinstall", + "ignore_dependencies", + "ignore_installed", + "use_user_site", + "isolated", + "use_user_site", + ] + resolver_args = {key: kwargs[key] for key in resolver_keys if key in kwargs} + if resolver_provider is None: + raise TypeError("Cannot resolve without a resolver provider... failed!") + preparer = ctx.enter_context(make_preparer_provider(**preparer_args)) + resolver = resolver_provider( + finder=finder, + preparer=preparer, + session=session, + options=options, + install_cmd=install_command, + wheel_cache=wheel_cache, + **resolver_args + ) # type: ignore + reqset.add_requirement(ireq) + resolver.require_hashes = kwargs.get("require_hashes", False) # type: ignore + resolver.resolve(reqset) # type: ignore + results = reqset.requirements + reqset.cleanup_files() + return results diff --git a/pipenv/vendor/pip_shims/environment.py b/pipenv/vendor/pip_shims/environment.py new file mode 100644 index 0000000000..1fa7df4537 --- /dev/null +++ b/pipenv/vendor/pip_shims/environment.py @@ -0,0 +1,40 @@ +# -*- coding=utf-8 -*- +import importlib +import os + + +def get_base_import_path(): + base_import_path = os.environ.get("PIP_SHIMS_BASE_MODULE", "pip") + return base_import_path + + +BASE_IMPORT_PATH = get_base_import_path() + + +def get_pip_version(import_path=BASE_IMPORT_PATH): + try: + pip = importlib.import_module(import_path) + except ImportError: + if import_path != "pip": + return get_pip_version(import_path="pip") + else: + import subprocess + + version = subprocess.check_output(["pip", "--version"]) + if version: + version = version.decode("utf-8").split()[1] + return version + return "0.0.0" + version = getattr(pip, "__version__", None) + return version + + +def is_type_checking(): + try: + from typing import TYPE_CHECKING + except ImportError: + return False + return TYPE_CHECKING + + +MYPY_RUNNING = os.environ.get("MYPY_RUNNING", is_type_checking()) diff --git a/pipenv/vendor/pip_shims/models.py b/pipenv/vendor/pip_shims/models.py new file mode 100644 index 0000000000..f3fc06426d --- /dev/null +++ b/pipenv/vendor/pip_shims/models.py @@ -0,0 +1,1152 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import, print_function + +import collections +import functools +import importlib +import inspect +import operator +import sys +import types +import weakref + +import six + +from . import backports +from .environment import BASE_IMPORT_PATH, MYPY_RUNNING, get_pip_version +from .utils import ( + add_mixin_to_class, + apply_alias, + ensure_function, + fallback_is_artifact, + fallback_is_file_url, + fallback_is_vcs, + get_method_args, + has_property, + make_classmethod, + make_method, + nullcontext, + parse_version, + resolve_possible_shim, + set_default_kwargs, + split_package, + suppress_setattr, +) + +# format: off +six.add_move( + six.MovedAttribute("Sequence", "collections", "collections.abc") +) # type: ignore # noqa +six.add_move( + six.MovedAttribute("Mapping", "collections", "collections.abc") +) # type: ignore # noqa +from six.moves import Sequence, Mapping # type: ignore # noqa # isort:skip + +# format: on + + +if MYPY_RUNNING: + import packaging.version + + Module = types.ModuleType + from typing import ( # noqa:F811 + Any, + Callable, + ContextManager, + Dict, + Iterable, + List, + Mapping, + Optional, + Set, + Tuple, + Type, + TypeVar, + Union, + ) + + +PIP_VERSION_SET = { + "7.0.0", + "7.0.1", + "7.0.2", + "7.0.3", + "7.1.0", + "7.1.1", + "7.1.2", + "8.0.0", + "8.0.1", + "8.0.2", + "8.0.3", + "8.1.0", + "8.1.1", + "8.1.2", + "9.0.0", + "9.0.1", + "9.0.2", + "9.0.3", + "10.0.0", + "10.0.1", + "18.0", + "18.1", + "19.0", + "19.0.1", + "19.0.2", + "19.0.3", + "19.1", + "19.1.1", + "19.2", + "19.2.1", + "19.2.2", + "19.2.3", + "19.3", + "19.3.1", +} + + +ImportTypesBase = collections.namedtuple( + "ImportTypes", ["FUNCTION", "CLASS", "MODULE", "CONTEXTMANAGER"] +) + + +class ImportTypes(ImportTypesBase): + FUNCTION = 0 + CLASS = 1 + MODULE = 2 + CONTEXTMANAGER = 3 + METHOD = 4 + ATTRIBUTE = 5 + + +class PipVersion(Sequence): + def __init__( + self, + version, + round_prereleases_up=True, + base_import_path=None, + vendor_import_path="pip._vendor", + ): + # type: (str, bool, Optional[str], str) -> None + self.version = version + self.vendor_import_path = vendor_import_path + self.round_prereleases_up = round_prereleases_up + parsed_version = self._parse() + if round_prereleases_up and parsed_version.is_prerelease: + parsed_version._version = parsed_version._version._replace(dev=None, pre=None) + self.version = str(parsed_version) + parsed_version = self._parse() + if base_import_path is None: + if parsed_version >= parse_version("10.0.0"): + base_import_path = "{0}._internal".format(BASE_IMPORT_PATH) + else: + base_import_path = "{0}".format(BASE_IMPORT_PATH) + self.base_import_path = base_import_path + self.parsed_version = parsed_version + + @property + def version_tuple(self): + return tuple(self.parsed_version._version) + + @property + def version_key(self): + return self.parsed_version._key + + def is_valid(self, compared_to): + # type: (PipVersion) -> bool + return self == compared_to + + def __len__(self): + # type: () -> int + return len(self.version_tuple) + + def __getitem__(self, item): + return self.version_tuple[item] + + def _parse(self): + # type: () -> packaging.version._BaseVersion + return parse_version(self.version) + + def __hash__(self): + # type: () -> int + return hash(self.parsed_version) + + def __str__(self): + # type: () -> str + return "{0!s}".format(self.parsed_version) + + def __repr__(self): + # type: () -> str + return ( + "<PipVersion {0!r}, Path: {1!r}, Vendor Path: {2!r}, " + "Parsed Version: {3!r}>" + ).format( + self.version, + self.base_import_path, + self.vendor_import_path, + self.parsed_version, + ) + + def __gt__(self, other): + # type: (PipVersion) -> bool + return self.parsed_version > other.parsed_version + + def __lt__(self, other): + # type: (PipVersion) -> bool + return self.parsed_version < other.parsed_version + + def __le__(self, other): + # type: (PipVersion) -> bool + return self.parsed_version <= other.parsed_version + + def __ge__(self, other): + # type: (PipVersion) -> bool + return self.parsed_version >= other.parsed_version + + def __ne__(self, other): + # type: (object) -> bool + if not isinstance(other, PipVersion): + return NotImplemented + return self.parsed_version != other.parsed_version + + def __eq__(self, other): + # type: (object) -> bool + if not isinstance(other, PipVersion): + return NotImplemented + return self.parsed_version == other.parsed_version + + +version_cache = weakref.WeakValueDictionary() # type: Mapping[str, PipVersion] +CURRENT_PIP_VERSION = None # type: Optional[PipVersion] + + +def pip_version_lookup(version, *args, **kwargs): + # type: (str, Any, Any) -> PipVersion + try: + cached = version_cache.get(version) + except KeyError: + cached = None + if cached is not None: + return cached + pip_version = PipVersion(version, *args, **kwargs) + version_cache[version] = pip_version + return pip_version + + +def lookup_current_pip_version(): + # type: () -> PipVersion + global CURRENT_PIP_VERSION + if CURRENT_PIP_VERSION is not None: + return CURRENT_PIP_VERSION + CURRENT_PIP_VERSION = pip_version_lookup(get_pip_version()) + return CURRENT_PIP_VERSION + + +class PipVersionRange(Sequence): + def __init__(self, start, end): + # type: (PipVersion, PipVersion) -> None + if start > end: + raise ValueError("Start version must come before end version") + self._versions = (start, end) + + def __str__(self): + # type: () -> str + return "{0!s} -> {1!s}".format(self._versions[0], self._versions[-1]) + + @property + def base_import_paths(self): + # type: () -> Set[str] + return set([version.base_import_path for version in self._versions]) + + @property + def vendor_import_paths(self): + # type: () -> Set[str] + return set([version.vendor_import_path for version in self._versions]) + + def is_valid(self): + # type: () -> bool + return pip_version_lookup(get_pip_version()) in self + + def __contains__(self, item): + # type: (PipVersion) -> bool + if not isinstance(item, PipVersion): + raise TypeError("Need a PipVersion instance to compare") + return item >= self[0] and item <= self[-1] + + def __getitem__(self, item): + # type: (int) -> PipVersion + return self._versions[item] + + def __len__(self): + # type: () -> int + return len(self._versions) + + def __lt__(self, other): + # type: ("PipVersionRange") -> bool + return (other.is_valid() and not self.is_valid()) or ( + not (self.is_valid() or other.is_valid()) + or (self.is_valid() and other.is_valid()) + and self._versions[-1] < other._versions[-1] + ) + + def __hash__(self): + # type: () -> int + return hash(self._versions) + + +class ShimmedPath(object): + __modules = {} # type: Dict[str, Module] + + def __init__( + self, + name, # type: str + import_target, # type: str + import_type, # type: int + version_range, # type: PipVersionRange + provided_methods=None, # type: Optional[Dict[str, Callable]] + provided_functions=None, # type: Optional[Dict[str, Callable]] + provided_classmethods=None, # type: Optional[Dict[str, Callable]] + provided_contextmanagers=None, # type: Optional[Dict[str, Callable]] + provided_mixins=None, # type: Optional[List[Type]] + default_args=None, # type: Dict[str, Sequence[List[Any], Dict[str, Any]]] + ): + # type: (...) -> None + if provided_methods is None: + provided_methods = {} + if provided_classmethods is None: + provided_classmethods = {} + if provided_functions is None: + provided_functions = {} + if provided_contextmanagers is None: + provided_contextmanagers = {} + if provided_mixins is None: + provided_mixins = [] + if default_args is None: + default_args = {} + self.version_range = version_range + self.name = name + self.full_import_path = import_target + module_path, name_to_import = split_package(import_target) + self.module_path = module_path + self.name_to_import = name_to_import + self.import_type = import_type + self._imported = None # type: Optional[Module] + self._provided = None # type: Optional[Union[Module, Type, Callable, Any]] + self.provided_methods = provided_methods + self.provided_functions = provided_functions + self.provided_classmethods = provided_classmethods + self.provided_contextmanagers = provided_contextmanagers + self.provided_mixins = [m for m in provided_mixins if m is not None] + self.default_args = default_args + self.aliases = [] # type: List[List[str]] + self._shimmed = None # type: Optional[Any] + + def _as_tuple(self): + # type: () -> Tuple[str, PipVersionRange, str, int] + return (self.name, self.version_range, self.full_import_path, self.import_type) + + def alias(self, aliases): + # type: (List[str]) -> "ShimmedPath" + self.aliases.append(aliases) + return self + + @classmethod + def _import_module(cls, module): + # type: (str) -> Optional[Module] + if module in ShimmedPath.__modules: + result = ShimmedPath.__modules[module] + if result is not None: + return result + try: + imported = importlib.import_module(module) + except ImportError: + return None + else: + ShimmedPath.__modules[module] = imported + return imported + + @classmethod + def _parse_provides_dict( + cls, + provides, # type: Dict[str, Callable] + prepend_arg_to_callables=None, # type: Optional[str] + ): + # type: (...) -> Dict[str, Callable] + creating_methods = False + creating_classmethods = False + if prepend_arg_to_callables is not None: + if prepend_arg_to_callables == "self": + creating_methods = True + elif prepend_arg_to_callables == "cls": + creating_classmethods = True + provides_map = {} + for item_name, item_value in provides.items(): + if isinstance(item_value, ShimmedPath): + item_value = item_value.shim() + if inspect.isfunction(item_value): + callable_args = inspect.getargs(item_value.__code__).args + if "self" not in callable_args and creating_methods: + item_value = make_method(item_value)(item_name) + elif "cls" not in callable_args and creating_classmethods: + item_value = make_classmethod(item_value)(item_name) + elif isinstance(item_value, six.string_types): + module_path, name = split_package(item_value) + module = cls._import_module(module_path) + item_value = getattr(module, name, None) + if item_value is not None: + provides_map[item_name] = item_value + return provides_map + + def _update_default_kwargs(self, parent, provided): + # type: (Union[Module, None], Union[Type, Module]) -> Tuple[Optional[Module], Union[Type, Module]] # noqa + for func_name, defaults in self.default_args.items(): + # * Note that we set default args here because we have the + # * option to use it, even though currently we dont + # * so we are forcibly ignoring the linter warning about it + default_args, default_kwargs = defaults # noqa:W0612 + provided = set_default_kwargs( + provided, func_name, *default_args, **default_kwargs + ) + return parent, provided + + def _ensure_functions(self, provided): + # type: (Union[Module, Type, None]) -> Any + functions = self._parse_provides_dict(self.provided_functions) + if provided is None: + provided = __module__ # type: ignore # noqa:F821 + for funcname, func in functions.items(): + func = ensure_function(provided, funcname, func) + setattr(provided, funcname, func) + return provided + + def _ensure_methods(self, provided): + # type: (Type) -> Type + """Given a base class, a new name, and any number of functions to + attach, turns those functions into classmethods, attaches them, + and returns an updated class object. + """ + if not self.is_class: + return provided + if not inspect.isclass(provided): + raise TypeError("Provided argument is not a class: {0!r}".format(provided)) + methods = self._parse_provides_dict( + self.provided_methods, prepend_arg_to_callables="self" + ) + classmethods = self._parse_provides_dict( + self.provided_classmethods, prepend_arg_to_callables="cls" + ) + if not methods and not classmethods: + return provided + new_functions = provided.__dict__.copy() + if classmethods: + new_functions.update( + { + method_name: clsmethod + for method_name, clsmethod in classmethods.items() + if method_name not in provided.__dict__ + } + ) + if methods: + new_functions.update( + { + method_name: method + for method_name, method in methods.items() + if method_name not in provided.__dict__ + } + ) + classname = provided.__name__ + if six.PY2: + classname = classname.encode(sys.getdefaultencoding()) + type_ = type(classname, (provided,), new_functions) + return type_ + + @property + def is_class(self): + # type: () -> bool + return self.import_type == ImportTypes.CLASS + + @property + def is_module(self): + # type: () -> bool + return self.import_type == ImportTypes.MODULE + + @property + def is_method(self): + # type: () -> bool + return self.import_type == ImportTypes.METHOD + + @property + def is_function(self): + # type: () -> bool + return self.import_type == ImportTypes.FUNCTION + + @property + def is_contextmanager(self): + # type: () -> bool + return self.import_type == ImportTypes.CONTEXTMANAGER + + @property + def is_attribute(self): + # type: () -> bool + return self.import_type == ImportTypes.ATTRIBUTE + + def __contains__(self, pip_version): + # type: (str) -> bool + return pip_version_lookup(pip_version) in self.version_range + + @property + def is_valid(self): + # type: () -> bool + return self.version_range.is_valid() + + @property + def sort_order(self): + # type: () -> int + return 1 if self.is_valid else 0 + + def _shim_base(self, imported, attribute_name): + # type: (Union[Module, None], str) -> Any + result = getattr(imported, attribute_name, None) + return self._apply_aliases(imported, result) + + def _apply_aliases(self, imported, target): + # type: (Union[Module, None], Any) -> Any + for alias_list in self.aliases: + target = apply_alias(imported, target, *alias_list) + suppress_setattr(imported, self.name, target) + return target + + def _shim_parent(self, imported, attribute_name): + # type: (Union[Module, None], str) -> Tuple[Optional[Module], Any] + result = self._shim_base(imported, attribute_name) + if result is not None: + imported, result = self._update_default_kwargs(imported, result) + suppress_setattr(imported, attribute_name, result) + return imported, result + + def update_sys_modules(self, imported): + # type: (Optional[Module]) -> None + if imported is None: + return None + if self.calculated_module_path in sys.modules: + del sys.modules[self.calculated_module_path] + sys.modules[self.calculated_module_path] = imported + + def shim_class(self, imported, attribute_name): + # type: (Union[Module, None], str) -> Type + imported, result = self._shim_parent(imported, attribute_name) + if result is not None: + assert inspect.isclass(result) # noqa + result = self._ensure_methods(result) + if self.provided_mixins: + result = add_mixin_to_class(result, self.provided_mixins) + self._imported = imported + self._provided = result + self.update_sys_modules(imported) + if imported is not None: + ShimmedPath.__modules[imported.__name__] = imported + return result + + def shim_module(self, imported, attribute_name): + # type: (Union[Module, None], str) -> Module + imported, result = self._shim_parent(imported, attribute_name) + if result is not None: + result = self._ensure_functions(result) + full_import_path = "{0}.{1}".format( + self.calculated_module_path, attribute_name + ) + self._imported = imported + assert isinstance(result, types.ModuleType) + self._provided = result + if full_import_path in sys.modules: + del sys.modules[full_import_path] + sys.modules[full_import_path] = result + self.update_sys_modules(imported) + if imported is not None: + ShimmedPath.__modules[imported.__name__] = imported + return result # type: ignore + + def shim_function(self, imported, attribute_name): + # type: (Union[Module, None], str) -> Callable + return self._shim_base(imported, attribute_name) + + def shim_attribute(self, imported, attribute_name): + # type: (Union[Module, None], Any) -> Any + return self._shim_base(imported, attribute_name) + + def shim_contextmanager(self, imported, attribute_name): + # type: (Union[Module, None], str) -> Callable + result = self._shim_base(imported, attribute_name) + if result is None: + result = nullcontext + suppress_setattr(imported, attribute_name, result) + self.update_sys_modules(imported) + return result + + @property + def shimmed(self): + # type: () -> Any + if self._shimmed is None: + self._shimmed = self.shim() + return self._shimmed + + def shim(self): + # type: () -> (Union[Module, Callable, ContextManager, Type]) + imported = self._import() + if self.is_class: + return self.shim_class(imported, self.name_to_import) + elif self.is_module: + return self.shim_module(imported, self.name_to_import) + elif self.is_contextmanager: + return self.shim_contextmanager(imported, self.name_to_import) + elif self.is_function: + return self.shim_function(imported, self.name_to_import) + elif self.is_attribute: + return self.shim_attribute(imported, self.name_to_import) + return self._shim_base(imported, self.name_to_import) + + @property + def calculated_module_path(self): + current_pip = lookup_current_pip_version() + prefix = current_pip.base_import_path + return ".".join([prefix, self.module_path]).rstrip(".") + + def _import(self, prefix=None): + # type: (Optional[str]) -> Optional[Module] + # TODO: Decide whether to use _imported and _shimmed or to set the shimmed + # always to _imported and never save the unshimmed module + if self._imported is not None: + return self._imported + result = self._import_module(self.calculated_module_path) + return result + + def __hash__(self): + # type: () -> int + return hash(self._as_tuple()) + + +class ShimmedPathCollection(object): + + __registry = {} # type: Dict[str, Any] + + def __init__(self, name, import_type, paths=None): + # type: (str, int, Optional[Sequence[ShimmedPath]]) -> None + self.name = name + self.import_type = import_type + self.paths = set() # type: Set[ShimmedPath] + self.top_path = None + self._default = None + self._default_args = {} # type: Dict[str, Sequence[List[Any], Dict[str, Any]]] + self.provided_methods = {} # type: Dict[str, Callable] + self.provided_functions = {} # type: Dict[str, Callable] + self.provided_contextmanagers = {} # type: Dict[str, Callable] + self.provided_classmethods = {} # type: Dict[str, Callable] + self.provided_mixins = [] # type: List[Type] + self.pre_shim_functions = [] # type: List[Callable] + self.aliases = [] # type: List[List[str]] + if paths is not None: + if isinstance(paths, six.string_types): + self.create_path(paths, version_start=lookup_current_pip_version()) + else: + self.paths.update(set(paths)) + self.register() + + def register(self): + # type: () -> None + self.__registry[self.name] = self + + @classmethod + def get_registry(cls): + # type: () -> Dict[str, "ShimmedPathCollection"] + return cls.__registry.copy() + + def add_path(self, path): + # type: (ShimmedPath) -> None + self.paths.add(path) + + def set_default(self, default): + # type: (Any) -> None + if isinstance(default, (ShimmedPath, ShimmedPathCollection)): + default = default.shim() + try: + default.__qualname__ = default.__name__ = self.name + except AttributeError: + pass + self._default = default + + def set_default_args(self, callable_name, *args, **kwargs): + # type: (str, Any, Any) -> None + self._default_args.update({callable_name: [args, kwargs]}) + + def provide_function(self, name, fn): + # type: (str, Union[Callable, ShimmedPath, ShimmedPathCollection]) -> None + if isinstance(fn, (ShimmedPath, ShimmedPathCollection)): + fn = resolve_possible_shim(fn) # type: ignore + self.provided_functions[name] = fn # type: ignore + + def provide_method(self, name, fn): + # type: (str, Union[Callable, ShimmedPath, ShimmedPathCollection, property]) -> None + if isinstance(fn, (ShimmedPath, ShimmedPathCollection)): + fn = resolve_possible_shim(fn) # type: ignore + self.provided_methods[name] = fn # type: ignore + + def alias(self, aliases): + # type: (List[str]) -> None + """ + Takes a list of methods, functions, attributes, etc and ensures they + all exist on the object pointing at the same referent. + + :param List[str] aliases: Names to map to the same functionality if they do not + exist. + :return: None + :rtype: None + """ + self.aliases.append(aliases) + + def add_mixin(self, mixin): + # type: (Optional[Union[Type, ShimmedPathCollection]]) -> None + if isinstance(mixin, ShimmedPathCollection): + mixin = mixin.shim() + if mixin is not None and inspect.isclass(mixin): + self.provided_mixins.append(mixin) + + def create_path(self, import_path, version_start, version_end=None): + # type: (str, str, Optional[str]) -> None + pip_version_start = pip_version_lookup(version_start) + if version_end is None: + version_end = "9999" + pip_version_end = pip_version_lookup(version_end) + version_range = PipVersionRange(pip_version_start, pip_version_end) + new_path = ShimmedPath( + self.name, + import_path, + self.import_type, + version_range, + self.provided_methods, + self.provided_functions, + self.provided_classmethods, + self.provided_contextmanagers, + self.provided_mixins, + self._default_args, + ) + if self.aliases: + for alias_list in self.aliases: + new_path.alias(alias_list) + self.add_path(new_path) + + def _sort_paths(self): + # type: () -> List[ShimmedPath] + return sorted(self.paths, key=operator.attrgetter("version_range"), reverse=True) + + def _get_top_path(self): + # type: () -> Optional[ShimmedPath] + return next(iter(self._sort_paths()), None) + + @classmethod + def traverse(cls, shim): + # type: (Union[ShimmedPath, ShimmedPathCollection, Any]) -> Any + if isinstance(shim, (ShimmedPath, ShimmedPathCollection)): + result = shim.shim() + return result + return shim + + def shim(self): + # type: () -> Any + top_path = self._get_top_path() # type: Union[ShimmedPath, None] + if not self.pre_shim_functions: + result = self.traverse(top_path) + else: + for fn in self.pre_shim_functions: + result = fn(top_path) + result = self.traverse(result) + if result == nullcontext and self._default is not None: + default_result = self.traverse(self._default) + if default_result: + return default_result + if result is None and self._default is not None: + result = self.traverse(self._default) + return result + + def pre_shim(self, fn): + # type: (Callable) -> None + self.pre_shim_functions.append(fn) + + +def import_pip(): + return importlib.import_module("pip") + + +_strip_extras = ShimmedPathCollection("_strip_extras", ImportTypes.FUNCTION) +_strip_extras.create_path("req.req_install._strip_extras", "7.0.0", "18.0.0") +_strip_extras.create_path("req.constructors._strip_extras", "18.1.0") + +cmdoptions = ShimmedPathCollection("cmdoptions", ImportTypes.MODULE) +cmdoptions.create_path("cli.cmdoptions", "18.1", "9999") +cmdoptions.create_path("cmdoptions", "7.0.0", "18.0") + +commands_dict = ShimmedPathCollection("commands_dict", ImportTypes.ATTRIBUTE) +commands_dict.create_path("commands.commands_dict", "7.0.0", "9999") + +SessionCommandMixin = ShimmedPathCollection("SessionCommandMixin", ImportTypes.CLASS) +SessionCommandMixin.create_path("cli.req_command.SessionCommandMixin", "19.3.0", "9999") + +Command = ShimmedPathCollection("Command", ImportTypes.CLASS) +Command.set_default_args("__init__", name="PipCommand", summary="Default pip command.") +Command.add_mixin(SessionCommandMixin) +Command.create_path("cli.base_command.Command", "18.1", "9999") +Command.create_path("basecommand.Command", "7.0.0", "18.0") + +ConfigOptionParser = ShimmedPathCollection("ConfigOptionParser", ImportTypes.CLASS) +ConfigOptionParser.create_path("cli.parser.ConfigOptionParser", "18.1", "9999") +ConfigOptionParser.create_path("baseparser.ConfigOptionParser", "7.0.0", "18.0") + +InstallCommand = ShimmedPathCollection("InstallCommand", ImportTypes.CLASS) +InstallCommand.pre_shim( + functools.partial(backports.partial_command, cmd_mapping=commands_dict) +) +InstallCommand.create_path("commands.install.InstallCommand", "7.0.0", "9999") + +DistributionNotFound = ShimmedPathCollection("DistributionNotFound", ImportTypes.CLASS) +DistributionNotFound.create_path("exceptions.DistributionNotFound", "7.0.0", "9999") + +FAVORITE_HASH = ShimmedPathCollection("FAVORITE_HASH", ImportTypes.ATTRIBUTE) +FAVORITE_HASH.create_path("utils.hashes.FAVORITE_HASH", "7.0.0", "9999") + +FormatControl = ShimmedPathCollection("FormatControl", ImportTypes.CLASS) +FormatControl.create_path("models.format_control.FormatControl", "18.1", "9999") +FormatControl.create_path("index.FormatControl", "7.0.0", "18.0") + +FrozenRequirement = ShimmedPathCollection("FrozenRequirement", ImportTypes.CLASS) +FrozenRequirement.create_path("FrozenRequirement", "7.0.0", "9.0.3") +FrozenRequirement.create_path("operations.freeze.FrozenRequirement", "10.0.0", "9999") + +get_installed_distributions = ShimmedPathCollection( + "get_installed_distributions", ImportTypes.FUNCTION +) +get_installed_distributions.create_path( + "utils.misc.get_installed_distributions", "10", "9999" +) +get_installed_distributions.create_path("utils.get_installed_distributions", "7", "9.0.3") + +get_supported = ShimmedPathCollection("get_supported", ImportTypes.FUNCTION) +get_supported.create_path("pep425tags.get_supported", "7.0.0", "9999") + +get_tags = ShimmedPathCollection("get_tags", ImportTypes.FUNCTION) +get_tags.create_path("pep425tags.get_tags", "7.0.0", "9999") + +index_group = ShimmedPathCollection("index_group", ImportTypes.FUNCTION) +index_group.create_path("cli.cmdoptions.index_group", "18.1", "9999") +index_group.create_path("cmdoptions.index_group", "7.0.0", "18.0") + +InstallationError = ShimmedPathCollection("InstallationError", ImportTypes.CLASS) +InstallationError.create_path("exceptions.InstallationError", "7.0.0", "9999") + +UninstallationError = ShimmedPathCollection("UninstallationError", ImportTypes.CLASS) +UninstallationError.create_path("exceptions.UninstallationError", "7.0.0", "9999") + +DistributionNotFound = ShimmedPathCollection("DistributionNotFound", ImportTypes.CLASS) +DistributionNotFound.create_path("exceptions.DistributionNotFound", "7.0.0", "9999") + +RequirementsFileParseError = ShimmedPathCollection( + "RequirementsFileParseError", ImportTypes.CLASS +) +RequirementsFileParseError.create_path( + "exceptions.RequirementsFileParseError", "7.0.0", "9999" +) + +BestVersionAlreadyInstalled = ShimmedPathCollection( + "BestVersionAlreadyInstalled", ImportTypes.CLASS +) +BestVersionAlreadyInstalled.create_path( + "exceptions.BestVersionAlreadyInstalled", "7.0.0", "9999" +) + +BadCommand = ShimmedPathCollection("BadCommand", ImportTypes.CLASS) +BadCommand.create_path("exceptions.BadCommand", "7.0.0", "9999") + +CommandError = ShimmedPathCollection("CommandError", ImportTypes.CLASS) +CommandError.create_path("exceptions.CommandError", "7.0.0", "9999") + +PreviousBuildDirError = ShimmedPathCollection("PreviousBuildDirError", ImportTypes.CLASS) +PreviousBuildDirError.create_path("exceptions.PreviousBuildDirError", "7.0.0", "9999") + +install_req_from_editable = ShimmedPathCollection( + "install_req_from_editable", ImportTypes.FUNCTION +) +install_req_from_editable.create_path( + "req.constructors.install_req_from_editable", "18.1", "9999" +) +install_req_from_editable.create_path( + "req.req_install.InstallRequirement.from_editable", "7.0.0", "18.0" +) + +install_req_from_line = ShimmedPathCollection( + "install_req_from_line", ImportTypes.FUNCTION +) +install_req_from_line.create_path( + "req.constructors.install_req_from_line", "18.1", "9999" +) +install_req_from_line.create_path( + "req.req_install.InstallRequirement.from_line", "7.0.0", "18.0" +) + +install_req_from_req_string = ShimmedPathCollection( + "install_req_from_req_string", ImportTypes.FUNCTION +) +install_req_from_req_string.create_path( + "req.constructors.install_req_from_req_string", "19.0", "9999" +) + +InstallRequirement = ShimmedPathCollection("InstallRequirement", ImportTypes.CLASS) +InstallRequirement.provide_method("from_line", install_req_from_line) +InstallRequirement.provide_method("from_editable", install_req_from_editable) +InstallRequirement.alias(["build_location", "ensure_build_location"]) + +InstallRequirement.create_path("req.req_install.InstallRequirement", "7.0.0", "9999") + +is_archive_file = ShimmedPathCollection("is_archive_file", ImportTypes.FUNCTION) +is_archive_file.create_path("req.constructors.is_archive_file", "19.3", "9999") +is_archive_file.create_path("download.is_archive_file", "7.0.0", "19.2.3") + +is_file_url = ShimmedPathCollection("is_file_url", ImportTypes.FUNCTION) +is_file_url.set_default(fallback_is_file_url) +is_file_url.create_path("download.is_file_url", "7.0.0", "19.2.3") + +unpack_url = ShimmedPathCollection("unpack_url", ImportTypes.FUNCTION) +unpack_url.create_path("download.unpack_url", "7.0.0", "19.3.9") +unpack_url.create_path("operations.prepare.unpack_url", "20.0", "9999") + +shim_unpack = ShimmedPathCollection("shim_unpack", ImportTypes.FUNCTION) +shim_unpack.set_default(functools.partial(backports.shim_unpack, unpack_fn=unpack_url)) + +is_installable_dir = ShimmedPathCollection("is_installable_dir", ImportTypes.FUNCTION) +is_installable_dir.create_path("utils.misc.is_installable_dir", "10.0.0", "9999") +is_installable_dir.create_path("utils.is_installable_dir", "7.0.0", "9.0.3") + +Link = ShimmedPathCollection("Link", ImportTypes.CLASS) +Link.provide_method("is_vcs", property(fallback_is_vcs)) +Link.provide_method("is_artifact", property(fallback_is_artifact)) +Link.create_path("models.link.Link", "19.0.0", "9999") +Link.create_path("index.Link", "7.0.0", "18.1") + +make_abstract_dist = ShimmedPathCollection("make_abstract_dist", ImportTypes.FUNCTION) +make_abstract_dist.create_path( + "distributions.make_distribution_for_install_requirement", "19.1.2", "9999" +) +make_abstract_dist.create_path( + "operations.prepare.make_abstract_dist", "10.0.0", "19.1.1" +) +make_abstract_dist.create_path("req.req_set.make_abstract_dist", "7.0.0", "9.0.3") + +make_distribution_for_install_requirement = ShimmedPathCollection( + "make_distribution_for_install_requirement", ImportTypes.CLASS +) +make_distribution_for_install_requirement.create_path( + "distributions.make_distribution_for_install_requirement", "19.1.2", "9999" +) + +make_option_group = ShimmedPathCollection("make_option_group", ImportTypes.FUNCTION) +make_option_group.create_path("cli.cmdoptions.make_option_group", "18.1", "9999") +make_option_group.create_path("cmdoptions.make_option_group", "7.0.0", "18.0") + +PackageFinder = ShimmedPathCollection("PackageFinder", ImportTypes.CLASS) +PackageFinder.create_path("index.PackageFinder", "7.0.0", "19.9") +PackageFinder.create_path("index.package_finder.PackageFinder", "20.0", "9999") + +CandidateEvaluator = ShimmedPathCollection("CandidateEvaluator", ImportTypes.CLASS) +CandidateEvaluator.set_default(backports.CandidateEvaluator) +CandidateEvaluator.create_path("index.CandidateEvaluator", "19.1.0", "19.3.9") +CandidateEvaluator.create_path("index.package_finder.CandidateEvaluator", "20.0", "9999") + +CandidatePreferences = ShimmedPathCollection("CandidatePreferences", ImportTypes.CLASS) +CandidatePreferences.set_default(backports.CandidatePreferences) +CandidatePreferences.create_path("index.CandidatePreferences", "19.2.0", "19.9") +CandidatePreferences.create_path( + "index.package_finder.CandidatePreferences", "20.0", "9999" +) + +LinkCollector = ShimmedPathCollection("LinkCollector", ImportTypes.CLASS) +LinkCollector.set_default(backports.LinkCollector) +LinkCollector.create_path("collector.LinkCollector", "19.3.0", "19.9") +LinkCollector.create_path("index.collector.LinkCollector", "20.0", "9999") + +LinkEvaluator = ShimmedPathCollection("LinkEvaluator", ImportTypes.CLASS) +LinkEvaluator.set_default(backports.LinkEvaluator) +LinkEvaluator.create_path("index.LinkEvaluator", "19.2.0", "19.9") +LinkEvaluator.create_path("index.package_finder.LinkEvaluator", "20.0", "9999") + +TargetPython = ShimmedPathCollection("TargetPython", ImportTypes.CLASS) +backports.TargetPython.fallback_get_tags = get_tags +TargetPython.set_default(backports.TargetPython) +TargetPython.create_path("models.target_python.TargetPython", "19.2.0", "9999") + +SearchScope = ShimmedPathCollection("SearchScope", ImportTypes.CLASS) +SearchScope.set_default(backports.SearchScope) +SearchScope.create_path("models.search_scope.SearchScope", "19.2.0", "9999") + +SelectionPreferences = ShimmedPathCollection("SelectionPreferences", ImportTypes.CLASS) +SelectionPreferences.set_default(backports.SelectionPreferences) +SelectionPreferences.create_path( + "models.selection_prefs.SelectionPreferences", "19.2.0", "9999" +) + +parse_requirements = ShimmedPathCollection("parse_requirements", ImportTypes.FUNCTION) +parse_requirements.create_path("req.req_file.parse_requirements", "7.0.0", "9999") + +path_to_url = ShimmedPathCollection("path_to_url", ImportTypes.FUNCTION) +path_to_url.create_path("download.path_to_url", "7.0.0", "19.2.3") +path_to_url.create_path("utils.urls.path_to_url", "19.3.0", "9999") + +PipError = ShimmedPathCollection("PipError", ImportTypes.CLASS) +PipError.create_path("exceptions.PipError", "7.0.0", "9999") + +RequirementPreparer = ShimmedPathCollection("RequirementPreparer", ImportTypes.CLASS) +RequirementPreparer.create_path("operations.prepare.RequirementPreparer", "7", "9999") + +RequirementSet = ShimmedPathCollection("RequirementSet", ImportTypes.CLASS) +RequirementSet.create_path("req.req_set.RequirementSet", "7.0.0", "9999") + +RequirementTracker = ShimmedPathCollection( + "RequirementTracker", ImportTypes.CONTEXTMANAGER +) +RequirementTracker.create_path("req.req_tracker.RequirementTracker", "7.0.0", "9999") + +TempDirectory = ShimmedPathCollection("TempDirectory", ImportTypes.CLASS) +TempDirectory.create_path("utils.temp_dir.TempDirectory", "7.0.0", "9999") + +get_requirement_tracker = ShimmedPathCollection( + "get_requirement_tracker", ImportTypes.CONTEXTMANAGER +) +get_requirement_tracker.set_default( + functools.partial(backports.get_requirement_tracker, RequirementTracker.shim()) +) +get_requirement_tracker.create_path( + "req.req_tracker.get_requirement_tracker", "7.0.0", "9999" +) + +Resolver = ShimmedPathCollection("Resolver", ImportTypes.CLASS) +Resolver.create_path("resolve.Resolver", "7.0.0", "19.1.1") +Resolver.create_path("legacy_resolve.Resolver", "19.1.2", "9999") + +SafeFileCache = ShimmedPathCollection("SafeFileCache", ImportTypes.CLASS) +SafeFileCache.create_path("network.cache.SafeFileCache", "19.3.0", "9999") +SafeFileCache.create_path("download.SafeFileCache", "7.0.0", "19.2.3") + +UninstallPathSet = ShimmedPathCollection("UninstallPathSet", ImportTypes.CLASS) +UninstallPathSet.create_path("req.req_uninstall.UninstallPathSet", "7.0.0", "9999") + +url_to_path = ShimmedPathCollection("url_to_path", ImportTypes.FUNCTION) +url_to_path.create_path("download.url_to_path", "7.0.0", "19.2.3") +url_to_path.create_path("utils.urls.url_to_path", "19.3.0", "9999") + +USER_CACHE_DIR = ShimmedPathCollection("USER_CACHE_DIR", ImportTypes.ATTRIBUTE) +USER_CACHE_DIR.create_path("locations.USER_CACHE_DIR", "7.0.0", "9999") + +VcsSupport = ShimmedPathCollection("VcsSupport", ImportTypes.CLASS) +VcsSupport.create_path("vcs.VcsSupport", "7.0.0", "19.1.1") +VcsSupport.create_path("vcs.versioncontrol.VcsSupport", "19.2", "9999") + +Wheel = ShimmedPathCollection("Wheel", ImportTypes.CLASS) +Wheel.create_path("wheel.Wheel", "7.0.0", "9999") + +WheelCache = ShimmedPathCollection("WheelCache", ImportTypes.CLASS) +WheelCache.create_path("cache.WheelCache", "10.0.0", "9999") +WheelCache.create_path("wheel.WheelCache", "7", "9.0.3") + +WheelBuilder = ShimmedPathCollection("WheelBuilder", ImportTypes.CLASS) +WheelBuilder.create_path("wheel.WheelBuilder", "7.0.0", "19.9") +WheelBuilder.create_path("wheel_builder.WheelBuilder", "20.0", "9999") + +AbstractDistribution = ShimmedPathCollection("AbstractDistribution", ImportTypes.CLASS) +AbstractDistribution.create_path( + "distributions.base.AbstractDistribution", "19.1.2", "9999" +) + +InstalledDistribution = ShimmedPathCollection("InstalledDistribution", ImportTypes.CLASS) +InstalledDistribution.create_path( + "distributions.installed.InstalledDistribution", "19.1.2", "9999" +) + +SourceDistribution = ShimmedPathCollection("SourceDistribution", ImportTypes.CLASS) +SourceDistribution.create_path("req.req_set.IsSDist", "7.0.0", "9.0.3") +SourceDistribution.create_path("operations.prepare.IsSDist", "10.0.0", "19.1.1") +SourceDistribution.create_path( + "distributions.source.SourceDistribution", "19.1.2", "19.2.3" +) +SourceDistribution.create_path( + "distributions.source.legacy.SourceDistribution", "19.3.0", "19.9" +) +SourceDistribution.create_path("distributions.source.SourceDistribution", "20.0", "9999") + +WheelDistribution = ShimmedPathCollection("WheelDistribution", ImportTypes.CLASS) +WheelDistribution.create_path("distributions.wheel.WheelDistribution", "19.1.2", "9999") + +PyPI = ShimmedPathCollection("PyPI", ImportTypes.ATTRIBUTE) +PyPI.create_path("models.index.PyPI", "7.0.0", "9999") + +stdlib_pkgs = ShimmedPathCollection("stdlib_pkgs", ImportTypes.ATTRIBUTE) +stdlib_pkgs.create_path("utils.compat.stdlib_pkgs", "18.1", "9999") +stdlib_pkgs.create_path("compat.stdlib_pkgs", "7", "18.0") + +DEV_PKGS = ShimmedPathCollection("DEV_PKGS", ImportTypes.ATTRIBUTE) +DEV_PKGS.create_path("commands.freeze.DEV_PKGS", "9.0.0", "9999") +DEV_PKGS.set_default({"setuptools", "pip", "distribute", "wheel"}) + + +get_package_finder = ShimmedPathCollection("get_package_finder", ImportTypes.FUNCTION) +get_package_finder.set_default( + functools.partial( + backports.get_package_finder, + install_cmd_provider=InstallCommand, + target_python_builder=TargetPython.shim(), + ) +) + + +make_preparer = ShimmedPathCollection("make_preparer", ImportTypes.FUNCTION) +make_preparer.set_default( + functools.partial( + backports.make_preparer, + install_cmd_provider=InstallCommand, + preparer_fn=RequirementPreparer, + req_tracker_fn=get_requirement_tracker, + ) +) + + +get_resolver = ShimmedPathCollection("get_resolver", ImportTypes.FUNCTION) +get_resolver.set_default( + functools.partial( + backports.get_resolver, + install_cmd_provider=InstallCommand, + resolver_fn=Resolver, + install_req_provider=install_req_from_req_string, + wheel_cache_provider=WheelCache, + format_control_provider=FormatControl, + ) +) + + +get_requirement_set = ShimmedPathCollection("get_requirement_set", ImportTypes.FUNCTION) +get_requirement_set.set_default( + functools.partial( + backports.get_requirement_set, + install_cmd_provider=InstallCommand, + req_set_provider=RequirementSet, + ) +) + + +resolve = ShimmedPathCollection("resolve", ImportTypes.FUNCTION) +resolve.set_default( + functools.partial( + backports.resolve, + install_cmd_provider=InstallCommand, + reqset_provider=get_requirement_set, + finder_provider=get_package_finder, + resolver_provider=get_resolver, + wheel_cache_provider=WheelCache, + format_control_provider=FormatControl, + make_preparer_provider=make_preparer, + req_tracker_provider=get_requirement_tracker, + ) +) diff --git a/pipenv/vendor/pip_shims/shims.py b/pipenv/vendor/pip_shims/shims.py index 4cad473a54..d5a11f0137 100644 --- a/pipenv/vendor/pip_shims/shims.py +++ b/pipenv/vendor/pip_shims/shims.py @@ -1,23 +1,25 @@ # -*- coding=utf-8 -*- -import importlib -import os +from __future__ import absolute_import + import sys -from collections import namedtuple -from contextlib import contextmanager +import types -import six +from packaging.version import parse as parse_version -# format: off -six.add_move(six.MovedAttribute("Callable", "collections", "collections.abc")) # noqa -from six.moves import Callable # type: ignore # noqa # isort:skip +from .models import ( + ShimmedPathCollection, + get_package_finder, + import_pip, + lookup_current_pip_version, +) -# format: on +class _shims(types.ModuleType): + CURRENT_PIP_VERSION = str(lookup_current_pip_version()) -class _shims(object): - CURRENT_PIP_VERSION = "19.1.1" - BASE_IMPORT_PATH = os.environ.get("PIP_SHIMS_BASE_MODULE", "pip") - path_info = namedtuple("PathInfo", "path start_version end_version") + @classmethod + def parse_version(cls, version): + return parse_version(version) def __dir__(self): result = list(self._locations.keys()) + list(self.__dict__.keys()) @@ -44,364 +46,18 @@ def __all__(self): return list(self._locations.keys()) def __init__(self): - # from .utils import _parse, get_package, STRING_TYPES - from . import utils - - self.utils = utils - self._parse = utils._parse - self.get_package = utils.get_package - self.STRING_TYPES = utils.STRING_TYPES - self._modules = { - "pip": importlib.import_module(self.BASE_IMPORT_PATH), - "pip_shims.utils": utils, - } - self.pip_version = getattr(self._modules["pip"], "__version__") - version_types = ["post", "pre", "dev", "rc"] - if any(post in self.pip_version.rsplit(".")[-1] for post in version_types): - self.pip_version, _, _ = self.pip_version.rpartition(".") - self.parsed_pip_version = self._parse(self.pip_version) - self._contextmanagers = ("RequirementTracker",) - self._moves = { - "InstallRequirement": { - "from_editable": "install_req_from_editable", - "from_line": "install_req_from_line", - } - } - self._locations = { - "parse_version": ("index.parse_version", "7", "9999"), - "_strip_extras": ( - ("req.req_install._strip_extras", "7", "18.0"), - ("req.constructors._strip_extras", "18.1", "9999"), - ), - "cmdoptions": ( - ("cli.cmdoptions", "18.1", "9999"), - ("cmdoptions", "7.0.0", "18.0"), - ), - "Command": ( - ("cli.base_command.Command", "18.1", "9999"), - ("basecommand.Command", "7.0.0", "18.0"), - ), - "ConfigOptionParser": ( - ("cli.parser.ConfigOptionParser", "18.1", "9999"), - ("baseparser.ConfigOptionParser", "7.0.0", "18.0"), - ), - "DistributionNotFound": ("exceptions.DistributionNotFound", "7.0.0", "9999"), - "FAVORITE_HASH": ("utils.hashes.FAVORITE_HASH", "7.0.0", "9999"), - "FormatControl": ( - ("models.format_control.FormatControl", "18.1", "9999"), - ("index.FormatControl", "7.0.0", "18.0"), - ), - "FrozenRequirement": ( - ("FrozenRequirement", "7.0.0", "9.0.3"), - ("operations.freeze.FrozenRequirement", "10.0.0", "9999"), - ), - "get_installed_distributions": ( - ("utils.misc.get_installed_distributions", "10", "9999"), - ("utils.get_installed_distributions", "7", "9.0.3"), - ), - "index_group": ( - ("cli.cmdoptions.index_group", "18.1", "9999"), - ("cmdoptions.index_group", "7.0.0", "18.0"), - ), - "InstallRequirement": ("req.req_install.InstallRequirement", "7.0.0", "9999"), - "InstallationError": ("exceptions.InstallationError", "7.0.0", "9999"), - "UninstallationError": ("exceptions.UninstallationError", "7.0.0", "9999"), - "DistributionNotFound": ("exceptions.DistributionNotFound", "7.0.0", "9999"), - "RequirementsFileParseError": ( - "exceptions.RequirementsFileParseError", - "7.0.0", - "9999", - ), - "BestVersionAlreadyInstalled": ( - "exceptions.BestVersionAlreadyInstalled", - "7.0.0", - "9999", - ), - "BadCommand": ("exceptions.BadCommand", "7.0.0", "9999"), - "CommandError": ("exceptions.CommandError", "7.0.0", "9999"), - "PreviousBuildDirError": ( - "exceptions.PreviousBuildDirError", - "7.0.0", - "9999", - ), - "install_req_from_editable": ( - ("req.constructors.install_req_from_editable", "18.1", "9999"), - ("req.req_install.InstallRequirement.from_editable", "7.0.0", "18.0"), - ), - "install_req_from_line": ( - ("req.constructors.install_req_from_line", "18.1", "9999"), - ("req.req_install.InstallRequirement.from_line", "7.0.0", "18.0"), - ), - "is_archive_file": ("download.is_archive_file", "7.0.0", "9999"), - "is_file_url": ("download.is_file_url", "7.0.0", "9999"), - "unpack_url": ("download.unpack_url", "7.0.0", "9999"), - "is_installable_dir": ( - ("utils.misc.is_installable_dir", "10.0.0", "9999"), - ("utils.is_installable_dir", "7.0.0", "9.0.3"), - ), - "Link": ("index.Link", "7.0.0", "9999"), - "make_abstract_dist": ( - ( - "distributions.make_distribution_for_install_requirement", - "19.1.2", - "9999", - ), - ("operations.prepare.make_abstract_dist", "10.0.0", "19.1.1"), - ("req.req_set.make_abstract_dist", "7.0.0", "9.0.3"), - ), - "make_distribution_for_install_requirement": ( - "distributions.make_distribution_for_install_requirement", - "19.1.2", - "9999", - ), - "make_option_group": ( - ("cli.cmdoptions.make_option_group", "18.1", "9999"), - ("cmdoptions.make_option_group", "7.0.0", "18.0"), - ), - "PackageFinder": ("index.PackageFinder", "7.0.0", "9999"), - "CandidateEvaluator": ("index.CandidateEvaluator", "19.1", "9999"), - "parse_requirements": ("req.req_file.parse_requirements", "7.0.0", "9999"), - "path_to_url": ("download.path_to_url", "7.0.0", "9999"), - "PipError": ("exceptions.PipError", "7.0.0", "9999"), - "RequirementPreparer": ( - "operations.prepare.RequirementPreparer", - "7", - "9999", - ), - "RequirementSet": ("req.req_set.RequirementSet", "7.0.0", "9999"), - "RequirementTracker": ("req.req_tracker.RequirementTracker", "7.0.0", "9999"), - "Resolver": ( - ("resolve.Resolver", "7.0.0", "19.1.1"), - ("legacy_resolve.Resolver", "19.1.2", "9999"), - ), - "SafeFileCache": ("download.SafeFileCache", "7.0.0", "9999"), - "UninstallPathSet": ("req.req_uninstall.UninstallPathSet", "7.0.0", "9999"), - "url_to_path": ("download.url_to_path", "7.0.0", "9999"), - "USER_CACHE_DIR": ("locations.USER_CACHE_DIR", "7.0.0", "9999"), - "VcsSupport": ( - ("vcs.VcsSupport", "7.0.0", "19.1.1"), - ("vcs.versioncontrol.VcsSupport", "19.2", "9999"), - ), - "Wheel": ("wheel.Wheel", "7.0.0", "9999"), - "WheelCache": ( - ("cache.WheelCache", "10.0.0", "9999"), - ("wheel.WheelCache", "7", "9.0.3"), - ), - "WheelBuilder": ("wheel.WheelBuilder", "7.0.0", "9999"), - "AbstractDistribution": ( - "distributions.base.AbstractDistribution", - "19.1.2", - "9999", - ), - "InstalledDistribution": ( - "distributions.installed.InstalledDistribution", - "19.1.2", - "9999", - ), - "SourceDistribution": ( - ("req.req_set.IsSDist", "7.0.0", "9.0.3"), - ("operations.prepare.IsSDist", "10.0.0", "19.1.1"), - ("distributions.source.SourceDistribution", "19.1.2", "9999"), - ), - "WheelDistribution": ( - "distributions.wheel.WheelDistribution", - "19.1.2", - "9999", - ), - "PyPI": ("models.index.PyPI", "7.0.0", "9999"), - "stdlib_pkgs": ( - ("utils.compat.stdlib_pkgs", "18.1", "9999"), - ("compat.stdlib_pkgs", "7", "18.0"), - ), - "DEV_PKGS": ( - ("commands.freeze.DEV_PKGS", "9.0.0", "9999"), - ({"setuptools", "pip", "distribute", "wheel"}, "7.0.0", "8.1.2"), - ), - } - - def _ensure_methods(self, cls, classname, *methods): - method_names = [m[0] for m in methods] - if all(getattr(cls, m, None) for m in method_names): - return cls - new_functions = {} - - class BaseFunc(Callable): - def __init__(self, func_base, name, *args, **kwargs): - self.func = func_base - self.__name__ = self.__qualname__ = name - - def __call__(self, cls, *args, **kwargs): - return self.func(*args, **kwargs) - - for method_name, fn in methods: - new_functions[method_name] = classmethod(BaseFunc(fn, method_name)) - if six.PY2: - classname = classname.encode(sys.getdefaultencoding()) - type_ = type(classname, (cls,), new_functions) - return type_ - - def _get_module_paths(self, module, base_path=None): - if not base_path: - base_path = self.BASE_IMPORT_PATH - module = self._locations[module] - if not isinstance(next(iter(module)), (tuple, list)): - module_paths = self.get_pathinfo(module) - else: - module_paths = [self.get_pathinfo(pth) for pth in module] - return self.sort_paths(module_paths, base_path) - - def _get_remapped_methods(self, moved_package): - original_base, original_target = moved_package - original_import = self._import(self._locations[original_target]) - old_to_new = {} - new_to_old = {} - for method_name, new_method_name in self._moves.get(original_target, {}).items(): - module_paths = self._get_module_paths(new_method_name) - target = next( - iter( - sorted(set([tgt for mod, tgt in map(self.get_package, module_paths)])) - ), - None, - ) - old_to_new[method_name] = { - "target": target, - "name": new_method_name, - "location": self._locations[new_method_name], - "module": self._import(self._locations[new_method_name]), - } - new_to_old[new_method_name] = { - "target": original_target, - "name": method_name, - "location": self._locations[original_target], - "module": original_import, - } - return (old_to_new, new_to_old) - - def _import_moved_module(self, moved_package): - old_to_new, new_to_old = self._get_remapped_methods(moved_package) - imported = None - method_map = [] - new_target = None - for old_method, remapped in old_to_new.items(): - new_name = remapped["name"] - new_target = new_to_old[new_name]["target"] - if not imported: - imported = self._modules[new_target] = new_to_old[new_name]["module"] - method_map.append((old_method, remapped["module"])) - if getattr(imported, "__class__", "") == type: - imported = self._ensure_methods(imported, new_target, *method_map) - self._modules[new_target] = imported - if imported: - return imported - return - - def _check_moved_methods(self, search_pth, moves): - module_paths = [ - self.get_package(pth) for pth in self._get_module_paths(search_pth) - ] - moved_methods = [ - (base, target_cls) for base, target_cls in module_paths if target_cls in moves - ] - return next(iter(moved_methods), None) + self.pip = import_pip() + self._locations = ShimmedPathCollection.get_registry() + self._locations["get_package_finder"] = get_package_finder + self.pip_version = str(lookup_current_pip_version()) + self.parsed_pip_version = lookup_current_pip_version() def __getattr__(self, *args, **kwargs): locations = super(_shims, self).__getattribute__("_locations") - contextmanagers = super(_shims, self).__getattribute__("_contextmanagers") - moves = super(_shims, self).__getattribute__("_moves") if args[0] in locations: - moved_package = self._check_moved_methods(args[0], moves) - if moved_package: - imported = self._import_moved_module(moved_package) - if imported: - return imported - else: - imported = self._import(locations[args[0]]) - if not imported and args[0] in contextmanagers: - return self.nullcontext - return imported + return locations[args[0]].shim() return super(_shims, self).__getattribute__(*args, **kwargs) - def is_valid(self, path_info_tuple): - if ( - path_info_tuple.start_version <= self.parsed_pip_version - and path_info_tuple.end_version >= self.parsed_pip_version - ): - return 1 - return 0 - - def sort_paths(self, module_paths, base_path): - if not isinstance(module_paths, list): - module_paths = [module_paths] - prefix_order = [pth.format(base_path) for pth in ["{0}._internal", "{0}"]] - # Pip 10 introduced the internal api division - if self._parse(self.pip_version) < self._parse("10.0.0"): - prefix_order = reversed(prefix_order) - paths = sorted(module_paths, key=self.is_valid, reverse=True) - search_order = [ - "{0}.{1}".format(p, pth.path) - for p in prefix_order - for pth in paths - if pth is not None - ] - return search_order - - def import_module(self, module): - if module in self._modules: - return self._modules[module] - if not isinstance(module, six.string_types): - return module - try: - imported = importlib.import_module(module) - except ImportError: - imported = None - else: - self._modules[module] = imported - return imported - - def none_or_ctxmanager(self, pkg_name): - if pkg_name in self._contextmanagers: - return self.nullcontext - return None - - def get_package_from_modules(self, modules): - modules = [ - (package_name, self.import_module(m)) - for m, package_name in map(self.get_package, modules) - ] - imports = [ - getattr(m, pkg, self.none_or_ctxmanager(pkg)) - for pkg, m in modules - if m is not None - ] - return next(iter(imports), None) - - def _import(self, module_paths, base_path=None): - if not base_path: - base_path = self.BASE_IMPORT_PATH - if not isinstance(next(iter(module_paths)), (tuple, list)): - module_paths = self.get_pathinfo(module_paths) - else: - module_paths = [self.get_pathinfo(pth) for pth in module_paths] - search_order = self.sort_paths(module_paths, base_path) - return self.get_package_from_modules(search_order) - - def do_import(self, *args, **kwargs): - return self._import(*args, **kwargs) - - @contextmanager - def nullcontext(self, *args, **kwargs): - try: - yield - finally: - pass - - def get_pathinfo(self, module_path): - assert isinstance(module_path, (list, tuple)) - module_path, start_version, end_version = module_path - return self.path_info( - module_path, self._parse(start_version), self._parse(end_version) - ) - old_module = sys.modules[__name__] if __name__ in sys.modules else None module = sys.modules[__name__] = _shims() diff --git a/pipenv/vendor/pip_shims/utils.py b/pipenv/vendor/pip_shims/utils.py index d6101e21a2..931ff137c6 100644 --- a/pipenv/vendor/pip_shims/utils.py +++ b/pipenv/vendor/pip_shims/utils.py @@ -1,13 +1,95 @@ # -*- coding=utf-8 -*- -from functools import wraps +from __future__ import absolute_import + +import contextlib +import copy +import inspect import sys +from functools import wraps + +import packaging.version +import six + +from .environment import MYPY_RUNNING + +# format: off +six.add_move( + six.MovedAttribute("Callable", "collections", "collections.abc") +) # type: ignore # noqa +from six.moves import Callable # type: ignore # isort:skip # noqa + +# format: on + +if MYPY_RUNNING: + from types import ModuleType + from typing import ( + Any, + Dict, + Iterator, + List, + Optional, + Sequence, + Tuple, + Type, + TypeVar, + Union, + ) + + TShimmedPath = TypeVar("TShimmedPath") + TShimmedPathCollection = TypeVar("TShimmedPathCollection") + TShim = Union[TShimmedPath, TShimmedPathCollection] + TShimmedFunc = Union[TShimmedPath, TShimmedPathCollection, Callable, Type] + STRING_TYPES = (str,) if sys.version_info < (3, 0): - STRING_TYPES = STRING_TYPES + (unicode,) + STRING_TYPES = STRING_TYPES + (unicode,) # noqa:F821 + + +class BaseMethod(Callable): + def __init__(self, func_base, name, *args, **kwargs): + # type: (Callable, str, Any, Any) -> None + self.func = func_base + self.__name__ = self.__qualname__ = name + + def __call__(self, *args, **kwargs): + # type: (Any, Any) -> Any + return self.func(*args, **kwargs) + + +class BaseClassMethod(Callable): + def __init__(self, func_base, name, *args, **kwargs): + # type: (Callable, str, Any, Any) -> None + self.func = func_base + self.__name__ = self.__qualname__ = name + + def __call__(self, cls, *args, **kwargs): + # type: (Type, Any, Any) -> Any + return self.func(*args, **kwargs) + + +def make_method(fn): + # type: (Callable) -> Callable + @wraps(fn) + def method_creator(*args, **kwargs): + # type: (Any, Any) -> Callable + return BaseMethod(fn, *args, **kwargs) + + return method_creator + + +def make_classmethod(fn): + # type: (Callable) -> Callable + @wraps(fn) + def classmethod_creator(*args, **kwargs): + # type: (Any, Any) -> Callable + return classmethod(BaseClassMethod(fn, *args, **kwargs)) + + return classmethod_creator def memoize(obj): + # type: (Any) -> Callable cache = obj.cache = {} @wraps(obj) @@ -16,20 +98,342 @@ def memoizer(*args, **kwargs): if key not in cache: cache[key] = obj(*args, **kwargs) return cache[key] + return memoizer @memoize def _parse(version): + # type: (str) -> Tuple[int, ...] if isinstance(version, STRING_TYPES): return tuple((int(i) for i in version.split("."))) return version -def get_package(module, subimport=None): +@memoize +def parse_version(version): + # type: (str) -> packaging.version._BaseVersion + if not isinstance(version, STRING_TYPES): + raise TypeError("Can only derive versions from string, got {0!r}".format(version)) + return packaging.version.parse(version) + + +@memoize +def split_package(module, subimport=None): + # type: (str, Optional[str]) -> Tuple[str, str] + """ + Used to determine what target to import. + + Either splits off the final segment or uses the provided sub-import to return a + 2-tuple of the import path and the target module or sub-path. + + :param str module: A package to import from + :param Optional[str] subimport: A class, function, or subpackage to import + :return: A 2-tuple of the corresponding import package and sub-import path + :rtype: Tuple[str, str] + + :Example: + + >>> from pip_shims.utils import split_package + >>> split_package("pip._internal.req.req_install", subimport="InstallRequirement") + ("pip._internal.req.req_install", "InstallRequirement") + >>> split_package("pip._internal.cli.base_command") + ("pip._internal.cli", "base_command") + """ package = None if subimport: package = subimport else: module, _, package = module.rpartition(".") return module, package + + +def get_method_args(target_method): + # type: (Callable) -> Tuple[Callable, Optional[inspect.Arguments]] + """ + Returns the arguments for a callable. + + :param Callable target_method: A callable to retrieve arguments for + :return: A 2-tuple of the original callable and its resulting arguments + :rtype: Tuple[Callable, Optional[inspect.Arguments]] + """ + inspected_args = None + try: + inspected_args = inspect.getargs(target_method.__code__) + except AttributeError: + target_func = getattr(target_method, "__func__", None) + if target_func is not None: + inspected_args = inspect.getargs(target_func.__code__) + else: + target_func = target_method + return target_func, inspected_args + + +def set_default_kwargs(basecls, method, *args, **default_kwargs): + # type: (Union[Type, ModuleType], Callable, Any, Any) -> Union[Type, ModuleType] # noqa + target_method = getattr(basecls, method, None) + if target_method is None: + return basecls + target_func, inspected_args = get_method_args(target_method) + if inspected_args is not None: + pos_args = inspected_args.args + else: + pos_args = [] + # Spit back the base class if we can't find matching arguments + # to put defaults in place of + if not any(arg in pos_args for arg in list(default_kwargs.keys())): + return basecls + prepended_defaults = tuple() # type: Tuple[Any, ...] + # iterate from the function's argument order to make sure we fill this + # out in the correct order + for arg in args: + prepended_defaults += (arg,) + for arg in pos_args: + if arg in default_kwargs: + prepended_defaults = prepended_defaults + (default_kwargs[arg],) + if not prepended_defaults: + return basecls + if six.PY2 and inspect.ismethod(target_method): + new_defaults = prepended_defaults + target_func.__defaults__ + target_method.__func__.__defaults__ = new_defaults + else: + new_defaults = prepended_defaults + target_method.__defaults__ + target_method.__defaults__ = new_defaults + setattr(basecls, method, target_method) + return basecls + + +def ensure_function(parent, funcname, func): + # type: (Union[ModuleType, Type, Callable, Any], str, Callable) -> Callable + """Given a module, a function name, and a function object, attaches the given + function to the module and ensures it is named properly according to the provided + argument + + :param Any parent: The parent to attack the function to + :param str funcname: The name to give the function + :param Callable func: The function to rename and attach to **parent** + :returns: The function with its name, qualname, etc set to mirror **parent** + :rtype: Callable + """ + qualname = funcname + if parent is None: + parent = __module__ # type: ignore # noqa:F821 + parent_is_module = inspect.ismodule(parent) + parent_is_class = inspect.isclass(parent) + module = None + if parent_is_module: + module = parent.__name__ + elif parent_is_class: + qualname = "{0}.{1}".format(parent.__name__, qualname) + module = getattr(parent, "__module__", None) + else: + module = getattr(parent, "__module__", None) + try: + func.__name__ = funcname + except AttributeError: + if getattr(func, "__func__", None) is not None: + func = func.__func__ + func.__name__ = funcname + func.__qualname__ = qualname + + func.__module__ = module + return func + + +def add_mixin_to_class(basecls, mixins): + # type: (Type, List[Type]) -> Type + """ + Given a class, adds the provided mixin classes as base classes and gives a new class + + :param Type basecls: An initial class to generate a new class from + :param List[Type] mixins: A list of mixins to add as base classes + :return: A new class with the provided mixins as base classes + :rtype: Type[basecls, *mixins] + """ + if not any(mixins): + return basecls + base_dict = basecls.__dict__.copy() + class_tuple = (basecls,) # type: Tuple[Type, ...] + for mixin in mixins: + if not mixin: + continue + mixin_dict = mixin.__dict__.copy() + base_dict.update(mixin_dict) + class_tuple = class_tuple + (mixin,) + base_dict.update(basecls.__dict__) + return type(basecls.__name__, class_tuple, base_dict) + + +def fallback_is_file_url(link): + # type: (Any) -> bool + return link.url.lower().startswith("file:") + + +def fallback_is_artifact(self): + # type: (Any) -> bool + return not getattr(self, "is_vcs", False) + + +def fallback_is_vcs(self): + # type: (Any) -> bool + return not getattr(self, "is_artifact", True) + + +def resolve_possible_shim(target): + # type: (TShimmedFunc) -> Optional[Union[Type, Callable]] + if target is None: + return target + if getattr(target, "shim", None): + return target.shim() + return target + + +@contextlib.contextmanager +def nullcontext(*args, **kwargs): + # type: (Any, Any) -> Iterator + try: + yield + finally: + pass + + +def has_property(target, name): + # type: (Any, str) -> bool + if getattr(target, name, None) is not None: + return True + return False + + +def apply_alias(imported, target, *aliases): + # type: (Union[ModuleType, Type, None], Any, Any) -> Any + """ + Given a target with attributes, point non-existant aliases at the first existing one + + :param Union[ModuleType, Type] imported: A Module or Class base + :param Any target: The target which is a member of **imported** and will have aliases + :param str aliases: A list of aliases, the first found attribute will be the basis + for all non-existant names which will be created as pointers + :return: The original target + :rtype: Any + """ + base_value = None # type: Optional[Any] + applied_aliases = set() + unapplied_aliases = set() + for alias in aliases: + if has_property(target, alias): + base_value = getattr(target, alias) + applied_aliases.add(alias) + else: + unapplied_aliases.add(alias) + is_callable = inspect.ismethod(base_value) or inspect.isfunction(base_value) + for alias in unapplied_aliases: + if is_callable: + func_copy = copy.deepcopy(base_value) + alias_value = ensure_function(imported, alias, func_copy) + else: + alias_value = base_value + setattr(target, alias, alias_value) + return target + + +def suppress_setattr(obj, attr, value, filter_none=False): + """ + Set an attribute, suppressing any exceptions and skipping the attempt on failure. + + :param Any obj: Object to set the attribute on + :param str attr: The attribute name to set + :param Any value: The value to set the attribute to + :param bool filter_none: [description], defaults to False + :return: Nothing + :rtype: None + + :Example: + + >>> class MyClass(object): + ... def __init__(self, name): + ... self.name = name + ... self.parent = None + ... def __repr__(self): + ... return "<{0!r} instance (name={1!r}, parent={2!r})>".format( + ... self.__class__.__name__, self.name, self.parent + ... ) + ... def __str__(self): + ... return self.name + >>> me = MyClass("Dan") + >>> dad = MyClass("John") + >>> grandfather = MyClass("Joe") + >>> suppress_setattr(dad, "parent", grandfather) + >>> dad + <'MyClass' instance (name='John', parent=<'MyClass' instance (name='Joe', parent=None + )>)> + >>> suppress_setattr(me, "parent", dad) + >>> me + <'MyClass' instance (name='Dan', parent=<'MyClass' instance (name='John', parent=<'My + Class' instance (name='Joe', parent=None)>)>)> + >>> suppress_setattr(me, "grandparent", grandfather) + >>> me + <'MyClass' instance (name='Dan', parent=<'MyClass' instance (name='John', parent=<'My + Class' instance (name='Joe', parent=None)>)>)> + """ + if filter_none and value is None: + pass + try: + setattr(obj, attr, value) + except Exception: # noqa + pass + + +def get_allowed_args(fn_or_class): + # type: (Union[Callable, Type]) -> Tuple[List[str], Dict[str, Any]] + """ + Given a callable or a class, returns the arguments and default kwargs passed in. + + :param Union[Callable, Type] fn_or_class: A function, method or class to inspect. + :return: A 2-tuple with a list of arguments and a dictionary of keywords mapped to + default values. + :rtype: Tuple[List[str], Dict[str, Any]] + """ + try: + signature = inspect.signature(fn_or_class) + except AttributeError: + import funcsigs + + signature = funcsigs.signature(fn_or_class) + args = [] + kwargs = {} + for arg, param in signature.parameters.items(): + if ( + param.kind in (param.POSITIONAL_OR_KEYWORD, param.POSITIONAL_ONLY) + ) and param.default is param.empty: + args.append(arg) + else: + kwargs[arg] = param.default if param.default is not param.empty else None + return args, kwargs + + +def call_function_with_correct_args(fn, **provided_kwargs): + # type: (Callable, Dict[str, Any]) -> Any + """ + Determines which arguments from **provided_kwargs** to call **fn** and calls it. + + Consumes a list of allowed arguments (e.g. from :func:`~inspect.getargs()`) and + uses it to determine which of the arguments in the provided kwargs should be passed + through to the given callable. + + :param Callable fn: A callable which has some dynamic arguments + :param List[str] allowed_args: A list of allowed arguments which can be passed to + the supplied function + :return: The result of calling the function + :rtype: Any + """ + # signature = inspect.signature(fn) + args = [] + kwargs = {} + func_args, func_kwargs = get_allowed_args(fn) + for arg in func_args: + args.append(provided_kwargs[arg]) + for arg in func_kwargs: + if not provided_kwargs.get(arg): + continue + kwargs[arg] = provided_kwargs[arg] + return fn(*args, **kwargs) diff --git a/pipenv/vendor/pipreqs/__init__.py b/pipenv/vendor/pipreqs/__init__.py index e0aa5d8bda..a6ce8933e5 100644 --- a/pipenv/vendor/pipreqs/__init__.py +++ b/pipenv/vendor/pipreqs/__init__.py @@ -2,4 +2,4 @@ __author__ = 'Vadim Kravcenko' __email__ = 'vadim.kravcenko@gmail.com' -__version__ = '0.4.9' +__version__ = '0.4.10' diff --git a/pipenv/vendor/pipreqs/mapping b/pipenv/vendor/pipreqs/mapping index 46cca297ea..6f5a469d12 100644 --- a/pipenv/vendor/pipreqs/mapping +++ b/pipenv/vendor/pipreqs/mapping @@ -9,7 +9,8 @@ BeautifulSoupTests:BeautifulSoup BioSQL:biopython BuildbotStatusShields:BuildbotEightStatusShields ComputedAttribute:ExtensionClass -Crypto:pycrypto +Crypto:pycryptodome +Cryptodome:pycryptodomex FSM:pexpect FiftyOneDegrees:51degrees_mobile_detector_v3_wrapper GeoBaseMain:GeoBasesDev @@ -263,7 +264,6 @@ armstrong:armstrong.hatband armstrong:armstrong.templates.standard armstrong:armstrong.utils.backends armstrong:armstrong.utils.celery -arrow:arrow_fatisar arstecnica:arstecnica.raccoon.autobahn arstecnica:arstecnica.sqlalchemy.async article-downloader:article_downloader @@ -536,6 +536,7 @@ cassandra:cassandra_driver cassandralauncher:CassandraLauncher cc42:42qucc cerberus:Cerberus +cfnlint:cfn-lint chameleon:Chameleon charmtools:charm_tools chef:PyChef @@ -582,7 +583,9 @@ dateutil:python_dateutil dawg:DAWG deb822:python_debian debian:python_debian +decouple:python-decouple demo:webunit +demosongs:PySynth deployer:juju_deployer depot:filedepot devtools:tg.devtools @@ -698,6 +701,7 @@ html:pies2overrides htmloutput:nosehtmloutput http:pies2overrides hvad:django_hvad +krbV:krbv i99fix:199Fix igraph:python_igraph imdb:IMDbPY @@ -771,6 +775,8 @@ mimeparse:python_mimeparse minitage:minitage.paste minitage:minitage.recipe.common missingdrawables:android_missingdrawables +mixfiles:PySynth +mkfreq:PySynth mkrst_themes:2lazy2rest mockredis:mockredispy modargs:python_modargs @@ -786,11 +792,13 @@ monthdelta:MonthDelta mopidy:Mopidy mopytools:MoPyTools mptt:django_mptt +mpv:python-mpv mrbob:mr.bob msgpack:msgpack_python mutations:aino_mutations mws:amazon_mws mysql:mysql_connector_repackaged +MySQL-python:MySQLdb native_tags:django_native_tags ndg:ndg_httpsclient nereid:trytond_nereid @@ -800,7 +808,7 @@ nester:abofly nester:bssm_pythonSig novaclient:python_novaclient oauth2_provider:alauda_django_oauth -oauth2client:google_api_python_client +oauth2client:oauth2client odf:odfpy ometa:Parsley openid:python_openid @@ -821,12 +829,14 @@ past:future paste:PasteScript path:forked_path path:path.py +patricia:patricia-trie paver:Paver peak:ProxyTypes picasso:anderson.picasso picklefield:django-picklefield pilot:BigJob pivotal:pivotal_py +play_wav:PySynth playhouse:peewee plivoxml:plivo plone:plone.alterego @@ -910,9 +920,9 @@ plone:plone.z3cform plonetheme:plonetheme.barceloneta png:pypng polymorphic:django_polymorphic -portalocker:ConcurrentLogHandler postmark:python_postmark powerprompt:bash_powerprompt +prefetch:django-prefetch printList:AndrewList progressbar:progressbar2 progressbar:progressbar33 @@ -947,6 +957,14 @@ pyrimaa:AEI pysideuic:PySide pysqlite2:adhocracy_pysqlite pysqlite2:pysqlite +pysynth_b:PySynth +pysynth_beeper:PySynth +pysynth_c:PySynth +pysynth_d:PySynth +pysynth_e:PySynth +pysynth_p:PySynth +pysynth_s:PySynth +pysynth_samp:PySynth pythongettext:python_gettext pythonjsonlogger:python_json_logger pyutilib:PyUtilib @@ -1004,6 +1022,7 @@ singleton:pysingleton sittercommon:cerebrod skbio:scikit_bio sklearn:scikit_learn +slack:slackclient slugify:unicode_slugify smarkets:smk_python_sdk snappy:ctypes_snappy diff --git a/pipenv/vendor/pipreqs/pipreqs.py b/pipenv/vendor/pipreqs/pipreqs.py index 791168a99d..4b817c3c43 100644 --- a/pipenv/vendor/pipreqs/pipreqs.py +++ b/pipenv/vendor/pipreqs/pipreqs.py @@ -3,25 +3,38 @@ """pipreqs - Generate pip requirements.txt file based on imports Usage: - pipreqs [options] <path> + pipreqs [options] [<path>] + +Arguments: + <path> The path to the directory containing the application + files for which a requirements file should be + generated (defaults to the current working + directory). Options: - --use-local Use ONLY local package info instead of querying PyPI - --pypi-server <url> Use custom PyPi server - --proxy <url> Use Proxy, parameter will be passed to requests library. You can also just set the - environments parameter in your terminal: + --use-local Use ONLY local package info instead of querying PyPI. + --pypi-server <url> Use custom PyPi server. + --proxy <url> Use Proxy, parameter will be passed to requests + library. You can also just set the environments + parameter in your terminal: $ export HTTP_PROXY="http://10.10.1.10:3128" $ export HTTPS_PROXY="https://10.10.1.10:1080" - --debug Print debug information - --ignore <dirs>... Ignore extra directories, each separated by a comma + --debug Print debug information. + --ignore <dirs>... Ignore extra directories, each separated by a comma. + --no-follow-links Do not follow symbolic links in the project --encoding <charset> Use encoding parameter for file open --savepath <file> Save the list of requirements in the given file - --print Output the list of requirements in the standard output + --print Output the list of requirements in the standard + output. --force Overwrite existing requirements.txt - --diff <file> Compare modules in requirements.txt to project imports. - --clean <file> Clean up requirements.txt by removing modules that are not imported in project. + --diff <file> Compare modules in requirements.txt to project + imports. + --clean <file> Clean up requirements.txt by removing modules + that are not imported in project. + --no-pin Omit version of output packages. """ from __future__ import print_function, absolute_import +from contextlib import contextmanager import os import sys import re @@ -50,7 +63,39 @@ py2_exclude = ["concurrent", "concurrent.futures"] -def get_all_imports(path, encoding=None, extra_ignore_dirs=None): +@contextmanager +def _open(filename=None, mode='r'): + """Open a file or ``sys.stdout`` depending on the provided filename. + + Args: + filename (str): The path to the file that should be opened. If + ``None`` or ``'-'``, ``sys.stdout`` or ``sys.stdin`` is + returned depending on the desired mode. Defaults to ``None``. + mode (str): The mode that should be used to open the file. + + Yields: + A file handle. + + """ + if not filename or filename == '-': + if not mode or 'r' in mode: + file = sys.stdin + elif 'w' in mode: + file = sys.stdout + else: + raise ValueError('Invalid mode for file: {}'.format(mode)) + else: + file = open(filename, mode) + + try: + yield file + finally: + if file not in (sys.stdin, sys.stdout): + file.close() + + +def get_all_imports( + path, encoding=None, extra_ignore_dirs=None, follow_links=True): imports = set() raw_imports = set() candidates = [] @@ -63,7 +108,8 @@ def get_all_imports(path, encoding=None, extra_ignore_dirs=None): ignore_dirs_parsed.append(os.path.basename(os.path.realpath(e))) ignore_dirs.extend(ignore_dirs_parsed) - for root, dirs, files in os.walk(path): + walk = os.walk(path, followlinks=follow_links) + for root, dirs, files in walk: dirs[:] = [d for d in dirs if d not in ignore_dirs] candidates.append(os.path.basename(root)) @@ -71,42 +117,44 @@ def get_all_imports(path, encoding=None, extra_ignore_dirs=None): candidates += [os.path.splitext(fn)[0] for fn in files] for file_name in files: - with open_func(os.path.join(root, file_name), "r", encoding=encoding) as f: + file_name = os.path.join(root, file_name) + with open_func(file_name, "r", encoding=encoding) as f: contents = f.read() - try: - tree = ast.parse(contents) - for node in ast.walk(tree): - if isinstance(node, ast.Import): - for subnode in node.names: - raw_imports.add(subnode.name) - elif isinstance(node, ast.ImportFrom): - raw_imports.add(node.module) - except Exception as exc: - if ignore_errors: - traceback.print_exc(exc) - logging.warn("Failed on file: %s" % os.path.join(root, file_name)) - continue - else: - logging.error("Failed on file: %s" % os.path.join(root, file_name)) - raise exc - - + try: + tree = ast.parse(contents) + for node in ast.walk(tree): + if isinstance(node, ast.Import): + for subnode in node.names: + raw_imports.add(subnode.name) + elif isinstance(node, ast.ImportFrom): + raw_imports.add(node.module) + except Exception as exc: + if ignore_errors: + traceback.print_exc(exc) + logging.warn("Failed on file: %s" % file_name) + continue + else: + logging.error("Failed on file: %s" % file_name) + raise exc # Clean up imports for name in [n for n in raw_imports if n]: - # Sanity check: Name could have been None if the import statement was as from . import X + # Sanity check: Name could have been None if the import + # statement was as ``from . import X`` # Cleanup: We only want to first part of the import. - # Ex: from django.conf --> django.conf. But we only want django as an import + # Ex: from django.conf --> django.conf. But we only want django + # as an import. cleaned_name, _, _ = name.partition('.') imports.add(cleaned_name) - packages = set(imports) - set(set(candidates) & set(imports)) + packages = imports - (set(candidates) & imports) logging.debug('Found packages: {0}'.format(packages)) with open(join("stdlib"), "r") as f: - data = [x.strip() for x in f.readlines()] - data = [x for x in data if x not in py2_exclude] if py2 else data - return sorted(list(set(packages) - set(data))) + data = {x.strip() for x in f} + + data = {x for x in data if x not in py2_exclude} if py2 else data + return list(packages - data) def filter_line(l): @@ -114,32 +162,30 @@ def filter_line(l): def generate_requirements_file(path, imports): - with open(path, "w") as out_file: + with _open(path, "w") as out_file: logging.debug('Writing {num} requirements: {imports} to {file}'.format( num=len(imports), file=path, imports=", ".join([x['name'] for x in imports]) )) fmt = '{name}=={version}' - out_file.write('\n'.join(fmt.format(**item) if item['version'] else '{name}'.format(**item) - for item in imports) + '\n') + out_file.write('\n'.join( + fmt.format(**item) if item['version'] else '{name}'.format(**item) + for item in imports) + '\n') + def output_requirements(imports): - logging.debug('Writing {num} requirements: {imports} to stdout'.format( - num=len(imports), - imports=", ".join([x['name'] for x in imports]) - )) - fmt = '{name}=={version}' - print('\n'.join(fmt.format(**item) if item['version'] else '{name}'.format(**item) - for item in imports)) + generate_requirements_file('-', imports) -def get_imports_info(imports, pypi_server="https://pypi.python.org/pypi/", proxy=None): +def get_imports_info( + imports, pypi_server="https://pypi.python.org/pypi/", proxy=None): result = [] for item in imports: try: - response = requests.get("{0}{1}/json".format(pypi_server, item), proxies=proxy) + response = requests.get( + "{0}{1}/json".format(pypi_server, item), proxies=proxy) if response.status_code == 200: if hasattr(response.content, 'decode'): data = json2package(response.content.decode()) @@ -163,11 +209,13 @@ def get_locally_installed_packages(encoding=None): for root, dirs, files in os.walk(path): for item in files: if "top_level" in item: - with open_func(os.path.join(root, item), "r", encoding=encoding) as f: + item = os.path.join(root, item) + with open_func(item, "r", encoding=encoding) as f: package = root.split(os.sep)[-1].split("-") try: package_import = f.read().strip().split("\n") - except: + except: # NOQA + # TODO: What errors do we intend to suppress here? continue for i_item in package_import: if ((i_item not in ignore) and @@ -203,18 +251,24 @@ def get_import_local(imports, encoding=None): def get_pkg_names(pkgs): - result = [] + """Get PyPI package names from a list of imports. + + Args: + pkgs (List[str]): List of import names. + + Returns: + List[str]: The corresponding PyPI package names. + + """ + result = set() with open(join("mapping"), "r") as f: - data = [x.strip().split(":") for x in f.readlines()] - for pkg in pkgs: - toappend = pkg - for item in data: - if item[0] == pkg: - toappend = item[1] - break - if toappend not in result: - result.append(toappend) - return result + data = dict(x.strip().split(":") for x in f) + for pkg in pkgs: + # Look up the mapped requirement. If a mapping isn't found, + # simply use the package name. + result.add(data.get(pkg, pkg)) + # Return a sorted list for backward compatibility. + return sorted(result, key=lambda s: s.lower()) def get_name_without_alias(name): @@ -228,6 +282,7 @@ def get_name_without_alias(name): def join(f): return os.path.join(os.path.dirname(__file__), f) + def parse_requirements(file_): """Parse a requirements formatted file. @@ -245,7 +300,9 @@ def parse_requirements(file_): tuple: The contents of the file, excluding comments. """ modules = [] - delim = ["<", ">", "=", "!", "~"] # https://www.python.org/dev/peps/pep-0508/#complete-grammar + # For the dependency identifier specification, see + # https://www.python.org/dev/peps/pep-0508/#complete-grammar + delim = ["<", ">", "=", "!", "~"] try: f = open_func(file_, "r") @@ -253,14 +310,16 @@ def parse_requirements(file_): logging.error("Failed on file: {}".format(file_)) raise else: - data = [x.strip() for x in f.readlines() if x != "\n"] - finally: - f.close() + try: + data = [x.strip() for x in f.readlines() if x != "\n"] + finally: + f.close() data = [x for x in data if x[0].isalpha()] for x in data: - if not any([y in x for y in delim]): # Check for modules w/o a specifier. + # Check for modules w/o a specifier. + if not any([y in x for y in delim]): modules.append({"name": x, "version": None}) for y in x: if y in delim: @@ -298,11 +357,13 @@ def compare_modules(file_, imports): def diff(file_, imports): - """Display the difference between modules in a file and imported modules.""" + """Display the difference between modules in a file and imported modules.""" # NOQA modules_not_imported = compare_modules(file_, imports) - logging.info("The following modules are in {} but do not seem to be imported: " - "{}".format(file_, ", ".join(x for x in modules_not_imported))) + logging.info( + "The following modules are in {} but do not seem to be imported: " + "{}".format(file_, ", ".join(x for x in modules_not_imported))) + def clean(file_, imports): """Remove modules that aren't imported in project from file.""" @@ -316,29 +377,36 @@ def clean(file_, imports): logging.error("Failed on file: {}".format(file_)) raise else: - for i in f.readlines(): - if re_remove.match(i) is None: - to_write.append(i) - f.seek(0) - f.truncate() - - for i in to_write: - f.write(i) - finally: - f.close() + try: + for i in f.readlines(): + if re_remove.match(i) is None: + to_write.append(i) + f.seek(0) + f.truncate() + + for i in to_write: + f.write(i) + finally: + f.close() logging.info("Successfully cleaned up requirements in " + file_) + def init(args): encoding = args.get('--encoding') extra_ignore_dirs = args.get('--ignore') + follow_links = not args.get('--no-follow-links') + input_path = args['<path>'] + if input_path is None: + input_path = os.path.abspath(os.curdir) if extra_ignore_dirs: extra_ignore_dirs = extra_ignore_dirs.split(',') - candidates = get_all_imports(args['<path>'], + candidates = get_all_imports(input_path, encoding=encoding, - extra_ignore_dirs=extra_ignore_dirs) + extra_ignore_dirs=extra_ignore_dirs, + follow_links=follow_links) candidates = get_pkg_names(candidates) logging.debug("Found imports: " + ", ".join(candidates)) pypi_server = "https://pypi.python.org/pypi/" @@ -364,7 +432,7 @@ def init(args): pypi_server=pypi_server) path = (args["--savepath"] if args["--savepath"] else - os.path.join(args['<path>'], "requirements.txt")) + os.path.join(input_path, "requirements.txt")) if args["--diff"]: diff(args["--diff"], imports) @@ -374,11 +442,17 @@ def init(args): clean(args["--clean"], imports) return - if not args["--print"] and not args["--savepath"] and not args["--force"] and os.path.exists(path): + if (not args["--print"] + and not args["--savepath"] + and not args["--force"] + and os.path.exists(path)): logging.warning("Requirements.txt already exists, " "use --force to overwrite it") return + if args.get('--no-pin'): + imports = [{'name': item["name"], 'version': ''} for item in imports] + if args["--print"]: output_requirements(imports) logging.info("Successfully output requirements") diff --git a/pipenv/vendor/pipreqs/stdlib b/pipenv/vendor/pipreqs/stdlib index 71edcc8752..470fd5cc91 100644 --- a/pipenv/vendor/pipreqs/stdlib +++ b/pipenv/vendor/pipreqs/stdlib @@ -1,6 +1,8 @@ __builtin__ __future__ __main__ +_dummy_thread +_thread _winreg abc aepack @@ -15,6 +17,7 @@ argparse array ast asynchat +asyncio asyncore atexit audioop @@ -28,6 +31,7 @@ binhex bisect bsddb buildtools +builtins bz2 calendar Carbon @@ -101,6 +105,7 @@ code codecs codeop collections +collections.abc ColorPicker colorsys commands @@ -108,12 +113,16 @@ compileall compiler compiler.ast compiler.visitor +concurrent +concurrent.futures ConfigParser +configparser contextlib Cookie cookielib copy copy_reg +copyreg cPickle cProfile crypt @@ -127,6 +136,9 @@ curses.textpad datetime dbhash dbm +dbm.dumb +dbm.gnu +dbm.ndbm decimal DEVICE difflib @@ -188,21 +200,27 @@ dummy_threading EasyDialogs email email.charset +email.contentmanager email.encoders email.errors email.generator email.header +email.headerregistry email.iterators email.message email.mime email.parser +email.policy email.utils encodings encodings.idna +encodings.mbcs encodings.utf_8_sig ensurepip +enum errno exceptions +faulthandler fcntl filecmp fileinput @@ -226,8 +244,8 @@ gensuitemodule getopt getpass gettext -gl GL +gl glob grp gzip @@ -236,9 +254,17 @@ heapq hmac hotshot hotshot.stats +html +html.entities +html.parser htmlentitydefs htmllib HTMLParser +http +http.client +http.cookiejar +http.cookies +http.server httplib ic icopen @@ -248,12 +274,17 @@ imgfile imghdr imp importlib +importlib.abc +importlib.machinery +importlib.util imputil inspect io +ipaddress itertools jpeg json +json.tool keyword lib2to3 linecache @@ -261,6 +292,7 @@ locale logging logging.config logging.handlers +lzma macerrors MacOS macostools @@ -301,11 +333,13 @@ os os.path ossaudiodev parser +pathlib pdb pickle pickletools pipes PixMapWrapper +pkg_resources pkgutil platform plistlib @@ -322,10 +356,12 @@ py_compile pyclbr pydoc Queue +queue quopri random re readline +reprlib resource rexec rfc822 @@ -335,7 +371,9 @@ runpy sched ScrolledText select +selectors sets +setuptools sgmllib sha shelve @@ -350,10 +388,12 @@ smtplib sndhdr socket SocketServer +socketserver spwd sqlite3 ssl stat +statistics statvfs string StringIO @@ -361,8 +401,8 @@ stringprep struct subprocess sunau -sunaudiodev SUNAUDIODEV +sunaudiodev symbol symtable sys @@ -374,6 +414,7 @@ telnetlib tempfile termios test +test.support test.test_support textwrap thread @@ -382,17 +423,30 @@ time timeit Tix Tkinter +tkinter +tkinter.scrolledtext +tkinter.tix +tkinter.ttk token tokenize trace traceback +tracemalloc ttk tty turtle +turtledemo types +typing unicodedata unittest +unittest.mock urllib +urllib.error +urllib.parse +urllib.request +urllib.response +urllib.robotparser urllib2 urlparse user @@ -401,6 +455,7 @@ UserList UserString uu uuid +venv videoreader W warnings @@ -408,6 +463,7 @@ wave weakref webbrowser whichdb +winreg winsound wsgiref wsgiref.handlers @@ -422,654 +478,17 @@ xml.dom.minidom xml.dom.pulldom xml.etree.ElementTree xml.parsers.expat +xml.parsers.expat.errors +xml.parsers.expat.model xml.sax xml.sax.handler xml.sax.saxutils xml.sax.xmlreader +xmlrpc +xmlrpc.client +xmlrpc.server xmlrpclib -zipfile -zipimport -zlib -__future__ -__main__ -_dummy_thread -_thread -abc -aifc -argparse -array -ast -asynchat -asyncio -asyncore -atexit -audioop -base64 -bdb -binascii -binhex -bisect -builtins -bz2 -calendar -cgi -cgitb -chunk -cmath -cmd -code -codecs -codeop -collections -collections.abc -colorsys -compileall -concurrent -concurrent.futures -configparser -contextlib -copy -copyreg -cProfile -crypt -csv -ctypes -curses -curses.ascii -curses.panel -curses.textpad -datetime -dbm -dbm.dumb -dbm.gnu -dbm.ndbm -decimal -difflib -dis -distutils -distutils.archive_util -distutils.bcppcompiler -distutils.ccompiler -distutils.cmd -distutils.command -distutils.command.bdist -distutils.command.bdist_dumb -distutils.command.bdist_msi -distutils.command.bdist_packager -distutils.command.bdist_rpm -distutils.command.bdist_wininst -distutils.command.build -distutils.command.build_clib -distutils.command.build_ext -distutils.command.build_py -distutils.command.build_scripts -distutils.command.check -distutils.command.clean -distutils.command.config -distutils.command.install -distutils.command.install_data -distutils.command.install_headers -distutils.command.install_lib -distutils.command.install_scripts -distutils.command.register -distutils.command.sdist -distutils.core -distutils.cygwinccompiler -distutils.debug -distutils.dep_util -distutils.dir_util -distutils.dist -distutils.errors -distutils.extension -distutils.fancy_getopt -distutils.file_util -distutils.filelist -distutils.log -distutils.msvccompiler -distutils.spawn -distutils.sysconfig -distutils.text_file -distutils.unixccompiler -distutils.util -distutils.version -doctest -dummy_threading -email -email.charset -email.contentmanager -email.encoders -email.errors -email.generator -email.header -email.headerregistry -email.iterators -email.message -email.mime -email.parser -email.policy -email.utils -encodings -encodings.idna -encodings.mbcs -encodings.utf_8_sig -ensurepip -enum -errno -faulthandler -fcntl -filecmp -fileinput -fnmatch -formatter -fpectl -fractions -ftplib -functools -gc -getopt -getpass -gettext -glob -grp -gzip -hashlib -heapq -hmac -html -html.entities -html.parser -http -http.client -http.cookiejar -http.cookies -http.server -imaplib -imghdr -imp -importlib -importlib.abc -importlib.machinery -importlib.util -inspect -io -ipaddress -itertools -json -keyword -lib2to3 -linecache -locale -logging -logging.config -logging.handlers -lzma -macpath -mailbox -mailcap -marshal -math -mimetypes -mmap -modulefinder -msilib -msvcrt -multiprocessing -multiprocessing.connection -multiprocessing.dummy -multiprocessing.managers -multiprocessing.pool -multiprocessing.sharedctypes -netrc -nis -nntplib -numbers -operator -optparse -os -os.path -ossaudiodev -parser -pathlib -pdb -pickle -pickletools -pipes -pkgutil -platform -plistlib -poplib -posix -pprint -profile -pstats -pty -pwd -py_compile -pyclbr -pydoc -queue -quopri -random -re -readline -reprlib -resource -rlcompleter -runpy -sched -select -selectors -shelve -shlex -shutil -signal -site -smtpd -smtplib -sndhdr -socket -socketserver -spwd -sqlite3 -ssl -stat -statistics -string -stringprep -struct -subprocess -sunau -symbol -symtable -sys -sysconfig -syslog -tabnanny -tarfile -telnetlib -tempfile -termios -test -test.support -textwrap -threading -time -timeit -tkinter -tkinter.scrolledtext -tkinter.tix -tkinter.ttk -token -tokenize -trace -traceback -tracemalloc -tty -turtle -turtledemo -types -unicodedata -unittest -unittest.mock -urllib -urllib.error -urllib.parse -urllib.request -urllib.response -urllib.robotparser -uu -uuid -venv -warnings -wave -weakref -webbrowser -winreg -winsound -wsgiref -wsgiref.handlers -wsgiref.headers -wsgiref.simple_server -wsgiref.util -wsgiref.validate -xdrlib -xml -xml.dom -xml.dom.minidom -xml.dom.pulldom -xml.etree.ElementTree -xml.parsers.expat -xml.parsers.expat.errors -xml.parsers.expat.model -xml.sax -xml.sax.handler -xml.sax.saxutils -xml.sax.xmlreader -xmlrpc -xmlrpc.client -xmlrpc.server -zipfile -zipimport -zlib -__future__ -__main__ -_dummy_thread -_thread -abc -aifc -argparse -array -ast -asynchat -asyncio -asyncore -atexit -audioop -base64 -bdb -binascii -binhex -bisect -builtins -bz2 -calendar -cgi -cgitb -chunk -cmath -cmd -code -codecs -codeop -collections -collections.abc -colorsys -compileall -concurrent -concurrent.futures -configparser -contextlib -copy -copyreg -cProfile -crypt -csv -ctypes -curses -curses.ascii -curses.panel -curses.textpad -datetime -dbm -dbm.dumb -dbm.gnu -dbm.ndbm -decimal -difflib -dis -distutils -distutils.archive_util -distutils.bcppcompiler -distutils.ccompiler -distutils.cmd -distutils.command -distutils.command.bdist -distutils.command.bdist_dumb -distutils.command.bdist_msi -distutils.command.bdist_packager -distutils.command.bdist_rpm -distutils.command.bdist_wininst -distutils.command.build -distutils.command.build_clib -distutils.command.build_ext -distutils.command.build_py -distutils.command.build_scripts -distutils.command.check -distutils.command.clean -distutils.command.config -distutils.command.install -distutils.command.install_data -distutils.command.install_headers -distutils.command.install_lib -distutils.command.install_scripts -distutils.command.register -distutils.command.sdist -distutils.core -distutils.cygwinccompiler -distutils.debug -distutils.dep_util -distutils.dir_util -distutils.dist -distutils.errors -distutils.extension -distutils.fancy_getopt -distutils.file_util -distutils.filelist -distutils.log -distutils.msvccompiler -distutils.spawn -distutils.sysconfig -distutils.text_file -distutils.unixccompiler -distutils.util -distutils.version -doctest -dummy_threading -email -email.charset -email.contentmanager -email.encoders -email.errors -email.generator -email.header -email.headerregistry -email.iterators -email.message -email.mime -email.parser -email.policy -email.utils -encodings -encodings.idna -encodings.mbcs -encodings.utf_8_sig -ensurepip -enum -errno -faulthandler -fcntl -filecmp -fileinput -fnmatch -formatter -fpectl -fractions -ftplib -functools -gc -getopt -getpass -gettext -glob -grp -gzip -hashlib -heapq -hmac -html -html.entities -html.parser -http -http.client -http.cookiejar -http.cookies -http.server -imaplib -imghdr -imp -importlib -importlib.abc -importlib.machinery -importlib.util -inspect -io -ipaddress -itertools -json -json.tool -keyword -lib2to3 -linecache -locale -logging -logging.config -logging.handlers -lzma -macpath -mailbox -mailcap -marshal -math -mimetypes -mmap -modulefinder -msilib -msvcrt -multiprocessing -multiprocessing.connection -multiprocessing.dummy -multiprocessing.managers -multiprocessing.pool -multiprocessing.sharedctypes -netrc -nis -nntplib -numbers -operator -optparse -os -os.path -ossaudiodev -parser -pathlib -pdb -pickle -pickletools -pipes -pkgutil -platform -plistlib -poplib -posix -pprint -profile -pstats -pty -pwd -py_compile -pyclbr -pydoc -queue -quopri -random -re -readline -reprlib -resource -rlcompleter -runpy -sched -select -selectors -shelve -shlex -shutil -signal -site -smtpd -smtplib -sndhdr -socket -socketserver -spwd -sqlite3 -ssl -stat -statistics -string -stringprep -struct -subprocess -sunau -symbol -symtable -sys -sysconfig -syslog -tabnanny -tarfile -telnetlib -tempfile -termios -test -test.support -textwrap -threading -time -timeit -tkinter -tkinter.scrolledtext -tkinter.tix -tkinter.ttk -token -tokenize -trace -traceback -tracemalloc -tty -turtle -turtledemo -types -unicodedata -unittest -unittest.mock -urllib -urllib.error -urllib.parse -urllib.request -urllib.response -urllib.robotparser -uu -uuid -venv -warnings -wave -weakref -webbrowser -winreg -winsound -wsgiref -wsgiref.handlers -wsgiref.headers -wsgiref.simple_server -wsgiref.util -wsgiref.validate -xdrlib -xml -xml.dom -xml.dom.minidom -xml.dom.pulldom -xml.etree.ElementTree -xml.parsers.expat -xml.parsers.expat.errors -xml.parsers.expat.model -xml.sax -xml.sax.handler -xml.sax.saxutils -xml.sax.xmlreader -xmlrpc -xmlrpc.client -xmlrpc.server +yp zipapp zipfile zipimport diff --git a/pipenv/vendor/plette/__init__.py b/pipenv/vendor/plette/__init__.py index 5daf460c2d..ba03cef562 100644 --- a/pipenv/vendor/plette/__init__.py +++ b/pipenv/vendor/plette/__init__.py @@ -3,7 +3,7 @@ "Lockfile", "Pipfile", ] -__version__ = '0.2.3.dev0' +__version__ = '0.2.4.dev0' from .lockfiles import Lockfile from .pipfiles import Pipfile diff --git a/pipenv/vendor/plette/models/base.py b/pipenv/vendor/plette/models/base.py index fad0d09e2d..72cf372e72 100644 --- a/pipenv/vendor/plette/models/base.py +++ b/pipenv/vendor/plette/models/base.py @@ -8,13 +8,24 @@ class ValidationError(ValueError): def __init__(self, value, validator): super(ValidationError, self).__init__(value) self.validator = validator + self.value = value + + def __str__(self): + return '{}\n{}'.format( + self.value, + '\n'.join( + '{}: {}'.format(k, e) + for k, errors in self.validator.errors.items() + for e in errors + ) + ) VALIDATORS = {} def validate(cls, data): - if not cerberus: # Skip validation if Cerberus is not available. + if not cerberus: # Skip validation if Cerberus is not available. return schema = cls.__SCHEMA__ key = id(schema) @@ -22,7 +33,7 @@ def validate(cls, data): v = VALIDATORS[key] except KeyError: v = VALIDATORS[key] = cerberus.Validator(schema, allow_unknown=True) - if v.validate(dict(data), normalize=False): + if v.validate(data, normalize=False): return raise ValidationError(data, v) @@ -33,6 +44,7 @@ class DataView(object): Validates the input mapping on creation. A subclass is expected to provide a `__SCHEMA__` class attribute specifying a validator schema. """ + def __init__(self, data): self.validate(data) self._data = data @@ -42,9 +54,11 @@ def __repr__(self): def __eq__(self, other): if not isinstance(other, type(self)): - raise TypeError("cannot compare {0!r} with {1!r}".format( - type(self).__name__, type(other).__name__, - )) + raise TypeError( + "cannot compare {0!r} with {1!r}".format( + type(self).__name__, type(other).__name__ + ) + ) return self._data == other._data def __getitem__(self, key): @@ -78,6 +92,7 @@ class DataViewCollection(DataView): You should not instantiate an instance from this class, but from one of its subclasses instead. """ + item_class = None def __repr__(self): @@ -103,6 +118,7 @@ class DataViewMapping(DataViewCollection): The keys are primitive values, while values are instances of `item_class`. """ + @classmethod def validate(cls, data): for d in data.values(): @@ -126,6 +142,7 @@ class DataViewSequence(DataViewCollection): Each entry is an instance of `item_class`. """ + @classmethod def validate(cls, data): for d in data: diff --git a/pipenv/vendor/pyparsing.py b/pipenv/vendor/pyparsing.py index ab804d530a..9a2dd7bf36 100644 --- a/pipenv/vendor/pyparsing.py +++ b/pipenv/vendor/pyparsing.py @@ -1,4 +1,4 @@ -#-*- coding: utf-8 -*- +# -*- coding: utf-8 -*- # module pyparsing.py # # Copyright (c) 2003-2019 Paul T. McGuire @@ -87,14 +87,16 @@ more complex ones - associate names with your parsed results using :class:`ParserElement.setResultsName` + - access the parsed data, which is returned as a :class:`ParseResults` + object - find some helpful expression short-cuts like :class:`delimitedList` and :class:`oneOf` - find more useful common expressions in the :class:`pyparsing_common` namespace class """ -__version__ = "2.3.1" -__versionTime__ = "09 Jan 2019 23:26 UTC" +__version__ = "2.4.5" +__versionTime__ = "09 Nov 2019 23:03 UTC" __author__ = "Paul McGuire <ptmcg@users.sourceforge.net>" import string @@ -109,6 +111,9 @@ import traceback import types from datetime import datetime +from operator import itemgetter +import itertools +from functools import wraps try: # Python 3 @@ -124,11 +129,11 @@ try: # Python 3 from collections.abc import Iterable - from collections.abc import MutableMapping + from collections.abc import MutableMapping, Mapping except ImportError: # Python 2.7 from collections import Iterable - from collections import MutableMapping + from collections import MutableMapping, Mapping try: from collections import OrderedDict as _OrderedDict @@ -143,29 +148,72 @@ except ImportError: class SimpleNamespace: pass - -#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) ) - -__all__ = [ -'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty', -'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal', -'PrecededBy', 'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or', -'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException', -'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException', -'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', -'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore', 'Char', -'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col', -'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString', -'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums', -'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno', -'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral', -'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables', -'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', -'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd', -'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute', -'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass', -'CloseMatch', 'tokenMap', 'pyparsing_common', 'pyparsing_unicode', 'unicode_set', -] +# version compatibility configuration +__compat__ = SimpleNamespace() +__compat__.__doc__ = """ + A cross-version compatibility configuration for pyparsing features that will be + released in a future version. By setting values in this configuration to True, + those features can be enabled in prior versions for compatibility development + and testing. + + - collect_all_And_tokens - flag to enable fix for Issue #63 that fixes erroneous grouping + of results names when an And expression is nested within an Or or MatchFirst; set to + True to enable bugfix released in pyparsing 2.3.0, or False to preserve + pre-2.3.0 handling of named results +""" +__compat__.collect_all_And_tokens = True + +__diag__ = SimpleNamespace() +__diag__.__doc__ = """ +Diagnostic configuration (all default to False) + - warn_multiple_tokens_in_named_alternation - flag to enable warnings when a results + name is defined on a MatchFirst or Or expression with one or more And subexpressions + (only warns if __compat__.collect_all_And_tokens is False) + - warn_ungrouped_named_tokens_in_collection - flag to enable warnings when a results + name is defined on a containing expression with ungrouped subexpressions that also + have results names + - warn_name_set_on_empty_Forward - flag to enable warnings whan a Forward is defined + with a results name, but has no contents defined + - warn_on_multiple_string_args_to_oneof - flag to enable warnings whan oneOf is + incorrectly called with multiple str arguments + - enable_debug_on_named_expressions - flag to auto-enable debug on all subsequent + calls to ParserElement.setName() +""" +__diag__.warn_multiple_tokens_in_named_alternation = False +__diag__.warn_ungrouped_named_tokens_in_collection = False +__diag__.warn_name_set_on_empty_Forward = False +__diag__.warn_on_multiple_string_args_to_oneof = False +__diag__.enable_debug_on_named_expressions = False + +def _enable_all_warnings(): + __diag__.warn_multiple_tokens_in_named_alternation = True + __diag__.warn_ungrouped_named_tokens_in_collection = True + __diag__.warn_name_set_on_empty_Forward = True + __diag__.warn_on_multiple_string_args_to_oneof = True +__diag__.enable_all_warnings = _enable_all_warnings + + +__all__ = ['__version__', '__versionTime__', '__author__', '__compat__', '__diag__', + 'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty', + 'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal', + 'PrecededBy', 'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or', + 'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException', + 'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException', + 'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', + 'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore', 'Char', + 'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col', + 'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString', + 'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums', + 'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno', + 'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral', + 'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables', + 'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', + 'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd', + 'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute', + 'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation', 'locatedExpr', 'withClass', + 'CloseMatch', 'tokenMap', 'pyparsing_common', 'pyparsing_unicode', 'unicode_set', + 'conditionAsParseAction', 're', + ] system_version = tuple(sys.version_info)[:3] PY_3 = system_version[0] == 3 @@ -190,7 +238,7 @@ def _ustr(obj): < returns the unicode object | encodes it with the default encoding | ... >. """ - if isinstance(obj,unicode): + if isinstance(obj, unicode): return obj try: @@ -208,9 +256,10 @@ def _ustr(obj): # build list of single arg builtins, tolerant of Python version, that can be used as parse actions singleArgBuiltins = [] import __builtin__ + for fname in "sum len sorted reversed list tuple set any all min max".split(): try: - singleArgBuiltins.append(getattr(__builtin__,fname)) + singleArgBuiltins.append(getattr(__builtin__, fname)) except AttributeError: continue @@ -221,23 +270,36 @@ def _xml_escape(data): # ampersand must be replaced first from_symbols = '&><"\'' - to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split()) - for from_,to_ in zip(from_symbols, to_symbols): + to_symbols = ('&' + s + ';' for s in "amp gt lt quot apos".split()) + for from_, to_ in zip(from_symbols, to_symbols): data = data.replace(from_, to_) return data -alphas = string.ascii_uppercase + string.ascii_lowercase -nums = "0123456789" -hexnums = nums + "ABCDEFabcdef" -alphanums = alphas + nums -_bslash = chr(92) +alphas = string.ascii_uppercase + string.ascii_lowercase +nums = "0123456789" +hexnums = nums + "ABCDEFabcdef" +alphanums = alphas + nums +_bslash = chr(92) printables = "".join(c for c in string.printable if c not in string.whitespace) + +def conditionAsParseAction(fn, message=None, fatal=False): + msg = message if message is not None else "failed user-defined condition" + exc_type = ParseFatalException if fatal else ParseException + fn = _trim_arity(fn) + + @wraps(fn) + def pa(s, l, t): + if not bool(fn(s, l, t)): + raise exc_type(s, l, msg) + + return pa + class ParseBaseException(Exception): """base exception class for all parsing runtime exceptions""" # Performance tuning: we construct a *lot* of these, so keep this # constructor as small and fast as possible - def __init__( self, pstr, loc=0, msg=None, elem=None ): + def __init__(self, pstr, loc=0, msg=None, elem=None): self.loc = loc if msg is None: self.msg = pstr @@ -256,27 +318,34 @@ def _from_exception(cls, pe): """ return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement) - def __getattr__( self, aname ): + def __getattr__(self, aname): """supported attributes by name are: - lineno - returns the line number of the exception text - col - returns the column number of the exception text - line - returns the line containing the exception text """ - if( aname == "lineno" ): - return lineno( self.loc, self.pstr ) - elif( aname in ("col", "column") ): - return col( self.loc, self.pstr ) - elif( aname == "line" ): - return line( self.loc, self.pstr ) + if aname == "lineno": + return lineno(self.loc, self.pstr) + elif aname in ("col", "column"): + return col(self.loc, self.pstr) + elif aname == "line": + return line(self.loc, self.pstr) else: raise AttributeError(aname) - def __str__( self ): - return "%s (at char %d), (line:%d, col:%d)" % \ - ( self.msg, self.loc, self.lineno, self.column ) - def __repr__( self ): + def __str__(self): + if self.pstr: + if self.loc >= len(self.pstr): + foundstr = ', found end of text' + else: + foundstr = (', found %r' % self.pstr[self.loc:self.loc + 1]).replace(r'\\', '\\') + else: + foundstr = '' + return ("%s%s (at char %d), (line:%d, col:%d)" % + (self.msg, foundstr, self.loc, self.lineno, self.column)) + def __repr__(self): return _ustr(self) - def markInputline( self, markerString = ">!<" ): + def markInputline(self, markerString=">!<"): """Extracts the exception line from the input string, and marks the location of the exception with a special symbol. """ @@ -350,7 +419,7 @@ def explain(exc, depth=16): callers = inspect.getinnerframes(exc.__traceback__, context=depth) seen = set() for i, ff in enumerate(callers[-depth:]): - frm = ff.frame + frm = ff[0] f_self = frm.f_locals.get('self', None) if isinstance(f_self, ParserElement): @@ -412,21 +481,21 @@ class RecursiveGrammarException(Exception): """exception thrown by :class:`ParserElement.validate` if the grammar could be improperly recursive """ - def __init__( self, parseElementList ): + def __init__(self, parseElementList): self.parseElementTrace = parseElementList - def __str__( self ): + def __str__(self): return "RecursiveGrammarException: %s" % self.parseElementTrace class _ParseResultsWithOffset(object): - def __init__(self,p1,p2): - self.tup = (p1,p2) - def __getitem__(self,i): + def __init__(self, p1, p2): + self.tup = (p1, p2) + def __getitem__(self, i): return self.tup[i] def __repr__(self): return repr(self.tup[0]) - def setOffset(self,i): - self.tup = (self.tup[0],i) + def setOffset(self, i): + self.tup = (self.tup[0], i) class ParseResults(object): """Structured parse results, to provide multiple means of access to @@ -471,7 +540,7 @@ def test(s, fn=repr): - month: 12 - year: 1999 """ - def __new__(cls, toklist=None, name=None, asList=True, modal=True ): + def __new__(cls, toklist=None, name=None, asList=True, modal=True): if isinstance(toklist, cls): return toklist retobj = object.__new__(cls) @@ -480,7 +549,7 @@ def __new__(cls, toklist=None, name=None, asList=True, modal=True ): # Performance tuning: we construct a *lot* of these, so keep this # constructor as small and fast as possible - def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ): + def __init__(self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance): if self.__doinit: self.__doinit = False self.__name = None @@ -501,85 +570,93 @@ def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance if name is not None and name: if not modal: self.__accumNames[name] = 0 - if isinstance(name,int): - name = _ustr(name) # will always return a str, but use _ustr for consistency + if isinstance(name, int): + name = _ustr(name) # will always return a str, but use _ustr for consistency self.__name = name - if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])): - if isinstance(toklist,basestring): - toklist = [ toklist ] + if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None, '', [])): + if isinstance(toklist, basestring): + toklist = [toklist] if asList: - if isinstance(toklist,ParseResults): + if isinstance(toklist, ParseResults): self[name] = _ParseResultsWithOffset(ParseResults(toklist.__toklist), 0) else: - self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0) + self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]), 0) self[name].__name = name else: try: self[name] = toklist[0] - except (KeyError,TypeError,IndexError): + except (KeyError, TypeError, IndexError): self[name] = toklist - def __getitem__( self, i ): - if isinstance( i, (int,slice) ): + def __getitem__(self, i): + if isinstance(i, (int, slice)): return self.__toklist[i] else: if i not in self.__accumNames: return self.__tokdict[i][-1][0] else: - return ParseResults([ v[0] for v in self.__tokdict[i] ]) + return ParseResults([v[0] for v in self.__tokdict[i]]) - def __setitem__( self, k, v, isinstance=isinstance ): - if isinstance(v,_ParseResultsWithOffset): - self.__tokdict[k] = self.__tokdict.get(k,list()) + [v] + def __setitem__(self, k, v, isinstance=isinstance): + if isinstance(v, _ParseResultsWithOffset): + self.__tokdict[k] = self.__tokdict.get(k, list()) + [v] sub = v[0] - elif isinstance(k,(int,slice)): + elif isinstance(k, (int, slice)): self.__toklist[k] = v sub = v else: - self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)] + self.__tokdict[k] = self.__tokdict.get(k, list()) + [_ParseResultsWithOffset(v, 0)] sub = v - if isinstance(sub,ParseResults): + if isinstance(sub, ParseResults): sub.__parent = wkref(self) - def __delitem__( self, i ): - if isinstance(i,(int,slice)): - mylen = len( self.__toklist ) + def __delitem__(self, i): + if isinstance(i, (int, slice)): + mylen = len(self.__toklist) del self.__toklist[i] # convert int to slice if isinstance(i, int): if i < 0: i += mylen - i = slice(i, i+1) + i = slice(i, i + 1) # get removed indices removed = list(range(*i.indices(mylen))) removed.reverse() # fixup indices in token dictionary - for name,occurrences in self.__tokdict.items(): + for name, occurrences in self.__tokdict.items(): for j in removed: for k, (value, position) in enumerate(occurrences): occurrences[k] = _ParseResultsWithOffset(value, position - (position > j)) else: del self.__tokdict[i] - def __contains__( self, k ): + def __contains__(self, k): return k in self.__tokdict - def __len__( self ): return len( self.__toklist ) - def __bool__(self): return ( not not self.__toklist ) + def __len__(self): + return len(self.__toklist) + + def __bool__(self): + return (not not self.__toklist) __nonzero__ = __bool__ - def __iter__( self ): return iter( self.__toklist ) - def __reversed__( self ): return iter( self.__toklist[::-1] ) - def _iterkeys( self ): + + def __iter__(self): + return iter(self.__toklist) + + def __reversed__(self): + return iter(self.__toklist[::-1]) + + def _iterkeys(self): if hasattr(self.__tokdict, "iterkeys"): return self.__tokdict.iterkeys() else: return iter(self.__tokdict) - def _itervalues( self ): + def _itervalues(self): return (self[k] for k in self._iterkeys()) - def _iteritems( self ): + def _iteritems(self): return ((k, self[k]) for k in self._iterkeys()) if PY_3: @@ -602,24 +679,24 @@ def _iteritems( self ): iteritems = _iteritems """Returns an iterator of all named result key-value tuples (Python 2.x only).""" - def keys( self ): + def keys(self): """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x).""" return list(self.iterkeys()) - def values( self ): + def values(self): """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x).""" return list(self.itervalues()) - def items( self ): + def items(self): """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x).""" return list(self.iteritems()) - def haskeys( self ): + def haskeys(self): """Since keys() returns an iterator, this method is helpful in bypassing code that looks for the existence of any defined results names.""" return bool(self.__tokdict) - def pop( self, *args, **kwargs): + def pop(self, *args, **kwargs): """ Removes and returns item at specified index (default= ``last``). Supports both ``list`` and ``dict`` semantics for ``pop()``. If @@ -658,14 +735,14 @@ def remove_LABEL(tokens): """ if not args: args = [-1] - for k,v in kwargs.items(): + for k, v in kwargs.items(): if k == 'default': args = (args[0], v) else: raise TypeError("pop() got an unexpected keyword argument '%s'" % k) - if (isinstance(args[0], int) or - len(args) == 1 or - args[0] in self): + if (isinstance(args[0], int) + or len(args) == 1 + or args[0] in self): index = args[0] ret = self[index] del self[index] @@ -697,7 +774,7 @@ def get(self, key, defaultValue=None): else: return defaultValue - def insert( self, index, insStr ): + def insert(self, index, insStr): """ Inserts new element at location index in the list of parsed tokens. @@ -714,11 +791,11 @@ def insert_locn(locn, tokens): """ self.__toklist.insert(index, insStr) # fixup indices in token dictionary - for name,occurrences in self.__tokdict.items(): + for name, occurrences in self.__tokdict.items(): for k, (value, position) in enumerate(occurrences): occurrences[k] = _ParseResultsWithOffset(value, position + (position > index)) - def append( self, item ): + def append(self, item): """ Add single element to end of ParseResults list of elements. @@ -733,7 +810,7 @@ def append_sum(tokens): """ self.__toklist.append(item) - def extend( self, itemseq ): + def extend(self, itemseq): """ Add sequence of elements to end of ParseResults list of elements. @@ -748,78 +825,70 @@ def make_palindrome(tokens): print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl' """ if isinstance(itemseq, ParseResults): - self += itemseq + self.__iadd__(itemseq) else: self.__toklist.extend(itemseq) - def clear( self ): + def clear(self): """ Clear all elements and results names. """ del self.__toklist[:] self.__tokdict.clear() - def __getattr__( self, name ): + def __getattr__(self, name): try: return self[name] except KeyError: return "" - if name in self.__tokdict: - if name not in self.__accumNames: - return self.__tokdict[name][-1][0] - else: - return ParseResults([ v[0] for v in self.__tokdict[name] ]) - else: - return "" - - def __add__( self, other ): + def __add__(self, other): ret = self.copy() ret += other return ret - def __iadd__( self, other ): + def __iadd__(self, other): if other.__tokdict: offset = len(self.__toklist) - addoffset = lambda a: offset if a<0 else a+offset + addoffset = lambda a: offset if a < 0 else a + offset otheritems = other.__tokdict.items() - otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) ) - for (k,vlist) in otheritems for v in vlist] - for k,v in otherdictitems: + otherdictitems = [(k, _ParseResultsWithOffset(v[0], addoffset(v[1]))) + for k, vlist in otheritems for v in vlist] + for k, v in otherdictitems: self[k] = v - if isinstance(v[0],ParseResults): + if isinstance(v[0], ParseResults): v[0].__parent = wkref(self) self.__toklist += other.__toklist - self.__accumNames.update( other.__accumNames ) + self.__accumNames.update(other.__accumNames) return self def __radd__(self, other): - if isinstance(other,int) and other == 0: + if isinstance(other, int) and other == 0: # useful for merging many ParseResults using sum() builtin return self.copy() else: # this may raise a TypeError - so be it return other + self - def __repr__( self ): - return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) ) + def __repr__(self): + return "(%s, %s)" % (repr(self.__toklist), repr(self.__tokdict)) - def __str__( self ): + def __str__(self): return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']' - def _asStringList( self, sep='' ): + def _asStringList(self, sep=''): out = [] for item in self.__toklist: if out and sep: out.append(sep) - if isinstance( item, ParseResults ): + if isinstance(item, ParseResults): out += item._asStringList() else: - out.append( _ustr(item) ) + out.append(_ustr(item)) return out - def asList( self ): + def asList(self): """ Returns the parse results as a nested list of matching tokens, all converted to strings. @@ -834,9 +903,9 @@ def asList( self ): result_list = result.asList() print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj'] """ - return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist] + return [res.asList() if isinstance(res, ParseResults) else res for res in self.__toklist] - def asDict( self ): + def asDict(self): """ Returns the named parse results as a nested dictionary. @@ -870,27 +939,27 @@ def toItem(obj): else: return obj - return dict((k,toItem(v)) for k,v in item_fn()) + return dict((k, toItem(v)) for k, v in item_fn()) - def copy( self ): + def copy(self): """ Returns a new copy of a :class:`ParseResults` object. """ - ret = ParseResults( self.__toklist ) + ret = ParseResults(self.__toklist) ret.__tokdict = dict(self.__tokdict.items()) ret.__parent = self.__parent - ret.__accumNames.update( self.__accumNames ) + ret.__accumNames.update(self.__accumNames) ret.__name = self.__name return ret - def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ): + def asXML(self, doctag=None, namedItemsOnly=False, indent="", formatted=True): """ (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names. """ nl = "\n" out = [] - namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items() - for v in vlist) + namedItems = dict((v[1], k) for (k, vlist) in self.__tokdict.items() + for v in vlist) nextLevelIndent = indent + " " # collapse out indents if formatting is not desired @@ -912,20 +981,20 @@ def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ): else: selfTag = "ITEM" - out += [ nl, indent, "<", selfTag, ">" ] + out += [nl, indent, "<", selfTag, ">"] - for i,res in enumerate(self.__toklist): - if isinstance(res,ParseResults): + for i, res in enumerate(self.__toklist): + if isinstance(res, ParseResults): if i in namedItems: - out += [ res.asXML(namedItems[i], - namedItemsOnly and doctag is None, - nextLevelIndent, - formatted)] + out += [res.asXML(namedItems[i], + namedItemsOnly and doctag is None, + nextLevelIndent, + formatted)] else: - out += [ res.asXML(None, - namedItemsOnly and doctag is None, - nextLevelIndent, - formatted)] + out += [res.asXML(None, + namedItemsOnly and doctag is None, + nextLevelIndent, + formatted)] else: # individual token, see if there is a name for it resTag = None @@ -937,16 +1006,16 @@ def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ): else: resTag = "ITEM" xmlBodyText = _xml_escape(_ustr(res)) - out += [ nl, nextLevelIndent, "<", resTag, ">", - xmlBodyText, - "</", resTag, ">" ] + out += [nl, nextLevelIndent, "<", resTag, ">", + xmlBodyText, + "</", resTag, ">"] - out += [ nl, indent, "</", selfTag, ">" ] + out += [nl, indent, "</", selfTag, ">"] return "".join(out) - def __lookup(self,sub): - for k,vlist in self.__tokdict.items(): - for v,loc in vlist: + def __lookup(self, sub): + for k, vlist in self.__tokdict.items(): + for v, loc in vlist: if sub is v: return k return None @@ -984,14 +1053,14 @@ def getName(self): return par.__lookup(self) else: return None - elif (len(self) == 1 and - len(self.__tokdict) == 1 and - next(iter(self.__tokdict.values()))[0][1] in (0,-1)): + elif (len(self) == 1 + and len(self.__tokdict) == 1 + and next(iter(self.__tokdict.values()))[0][1] in (0, -1)): return next(iter(self.__tokdict.keys())) else: return None - def dump(self, indent='', depth=0, full=True): + def dump(self, indent='', full=True, include_list=True, _depth=0): """ Diagnostic method for listing out the contents of a :class:`ParseResults`. Accepts an optional ``indent`` argument so @@ -1014,28 +1083,45 @@ def dump(self, indent='', depth=0, full=True): """ out = [] NL = '\n' - out.append( indent+_ustr(self.asList()) ) + if include_list: + out.append(indent + _ustr(self.asList())) + else: + out.append('') + if full: if self.haskeys(): - items = sorted((str(k), v) for k,v in self.items()) - for k,v in items: + items = sorted((str(k), v) for k, v in self.items()) + for k, v in items: if out: out.append(NL) - out.append( "%s%s- %s: " % (indent,(' '*depth), k) ) - if isinstance(v,ParseResults): + out.append("%s%s- %s: " % (indent, (' ' * _depth), k)) + if isinstance(v, ParseResults): if v: - out.append( v.dump(indent,depth+1) ) + out.append(v.dump(indent=indent, full=full, include_list=include_list, _depth=_depth + 1)) else: out.append(_ustr(v)) else: out.append(repr(v)) - elif any(isinstance(vv,ParseResults) for vv in self): + elif any(isinstance(vv, ParseResults) for vv in self): v = self - for i,vv in enumerate(v): - if isinstance(vv,ParseResults): - out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) )) + for i, vv in enumerate(v): + if isinstance(vv, ParseResults): + out.append("\n%s%s[%d]:\n%s%s%s" % (indent, + (' ' * (_depth)), + i, + indent, + (' ' * (_depth + 1)), + vv.dump(indent=indent, + full=full, + include_list=include_list, + _depth=_depth + 1))) else: - out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv))) + out.append("\n%s%s[%d]:\n%s%s%s" % (indent, + (' ' * (_depth)), + i, + indent, + (' ' * (_depth + 1)), + _ustr(vv))) return "".join(out) @@ -1068,18 +1154,15 @@ def pprint(self, *args, **kwargs): # add support for pickle protocol def __getstate__(self): - return ( self.__toklist, - ( self.__tokdict.copy(), - self.__parent is not None and self.__parent() or None, - self.__accumNames, - self.__name ) ) + return (self.__toklist, + (self.__tokdict.copy(), + self.__parent is not None and self.__parent() or None, + self.__accumNames, + self.__name)) - def __setstate__(self,state): + def __setstate__(self, state): self.__toklist = state[0] - (self.__tokdict, - par, - inAccumNames, - self.__name) = state[1] + self.__tokdict, par, inAccumNames, self.__name = state[1] self.__accumNames = {} self.__accumNames.update(inAccumNames) if par is not None: @@ -1091,11 +1174,39 @@ def __getnewargs__(self): return self.__toklist, self.__name, self.__asList, self.__modal def __dir__(self): - return (dir(type(self)) + list(self.keys())) + return dir(type(self)) + list(self.keys()) + + @classmethod + def from_dict(cls, other, name=None): + """ + Helper classmethod to construct a ParseResults from a dict, preserving the + name-value relations as results names. If an optional 'name' argument is + given, a nested ParseResults will be returned + """ + def is_iterable(obj): + try: + iter(obj) + except Exception: + return False + else: + if PY_3: + return not isinstance(obj, (str, bytes)) + else: + return not isinstance(obj, basestring) + + ret = cls([]) + for k, v in other.items(): + if isinstance(v, Mapping): + ret += cls.from_dict(v, name=k) + else: + ret += cls([v], name=k, asList=is_iterable(v)) + if name is not None: + ret = cls([ret], name=name) + return ret MutableMapping.register(ParseResults) -def col (loc,strg): +def col (loc, strg): """Returns current column within a string, counting newlines as line separators. The first column is number 1. @@ -1107,9 +1218,9 @@ def col (loc,strg): location, and line and column positions within the parsed string. """ s = strg - return 1 if 0<loc<len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc) + return 1 if 0 < loc < len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc) -def lineno(loc,strg): +def lineno(loc, strg): """Returns current line number within a string, counting newlines as line separators. The first line is number 1. @@ -1119,26 +1230,26 @@ def lineno(loc,strg): suggested methods to maintain a consistent view of the parsed string, the parse location, and line and column positions within the parsed string. """ - return strg.count("\n",0,loc) + 1 + return strg.count("\n", 0, loc) + 1 -def line( loc, strg ): +def line(loc, strg): """Returns the line of text containing loc within a string, counting newlines as line separators. """ lastCR = strg.rfind("\n", 0, loc) nextCR = strg.find("\n", loc) if nextCR >= 0: - return strg[lastCR+1:nextCR] + return strg[lastCR + 1:nextCR] else: - return strg[lastCR+1:] + return strg[lastCR + 1:] -def _defaultStartDebugAction( instring, loc, expr ): - print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))) +def _defaultStartDebugAction(instring, loc, expr): + print(("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % (lineno(loc, instring), col(loc, instring)))) -def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ): - print ("Matched " + _ustr(expr) + " -> " + str(toks.asList())) +def _defaultSuccessDebugAction(instring, startloc, endloc, expr, toks): + print("Matched " + _ustr(expr) + " -> " + str(toks.asList())) -def _defaultExceptionDebugAction( instring, loc, expr, exc ): - print ("Exception raised:" + _ustr(exc)) +def _defaultExceptionDebugAction(instring, loc, expr, exc): + print("Exception raised:" + _ustr(exc)) def nullDebugAction(*args): """'Do-nothing' debug action, to suppress debugging output during parsing.""" @@ -1169,16 +1280,16 @@ def nullDebugAction(*args): 'decorator to trim function calls to match the arity of the target' def _trim_arity(func, maxargs=2): if func in singleArgBuiltins: - return lambda s,l,t: func(t) + return lambda s, l, t: func(t) limit = [0] foundArity = [False] # traceback return data structure changed in Py3.5 - normalize back to plain tuples - if system_version[:2] >= (3,5): + if system_version[:2] >= (3, 5): def extract_stack(limit=0): # special handling for Python 3.5.0 - extra deep call stack by 1 - offset = -3 if system_version == (3,5,0) else -2 - frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset] + offset = -3 if system_version == (3, 5, 0) else -2 + frame_summary = traceback.extract_stack(limit=-offset + limit - 1)[offset] return [frame_summary[:2]] def extract_tb(tb, limit=0): frames = traceback.extract_tb(tb, limit=limit) @@ -1195,7 +1306,7 @@ def extract_tb(tb, limit=0): # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!! this_line = extract_stack(limit=2)[-1] - pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF) + pa_call_line_synth = (this_line[0], this_line[1] + LINE_DIFF) def wrapper(*args): while 1: @@ -1213,7 +1324,10 @@ def wrapper(*args): if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth: raise finally: - del tb + try: + del tb + except NameError: + pass if limit[0] <= maxargs: limit[0] += 1 @@ -1231,13 +1345,14 @@ def wrapper(*args): return wrapper + class ParserElement(object): """Abstract base level parser element class.""" DEFAULT_WHITE_CHARS = " \n\t\r" verbose_stacktrace = False @staticmethod - def setDefaultWhitespaceChars( chars ): + def setDefaultWhitespaceChars(chars): r""" Overrides the default whitespace chars @@ -1274,10 +1389,10 @@ def inlineLiteralsUsing(cls): """ ParserElement._literalStringClass = cls - def __init__( self, savelist=False ): + def __init__(self, savelist=False): self.parseAction = list() self.failAction = None - #~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall + # ~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall self.strRepr = None self.resultsName = None self.saveAsList = savelist @@ -1292,12 +1407,12 @@ def __init__( self, savelist=False ): self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index self.errmsg = "" self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all) - self.debugActions = ( None, None, None ) #custom debug actions + self.debugActions = (None, None, None) # custom debug actions self.re = None self.callPreparse = True # used to avoid redundant calls to preParse self.callDuringTry = False - def copy( self ): + def copy(self): """ Make a copy of this :class:`ParserElement`. Useful for defining different parse actions for the same parsing pattern, using copies of @@ -1306,8 +1421,8 @@ def copy( self ): Example:: integer = Word(nums).setParseAction(lambda toks: int(toks[0])) - integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K") - integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") + integerK = integer.copy().addParseAction(lambda toks: toks[0] * 1024) + Suppress("K") + integerM = integer.copy().addParseAction(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M")) @@ -1317,16 +1432,16 @@ def copy( self ): Equivalent form of ``expr.copy()`` is just ``expr()``:: - integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") + integerM = integer().addParseAction(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") """ - cpy = copy.copy( self ) + cpy = copy.copy(self) cpy.parseAction = self.parseAction[:] cpy.ignoreExprs = self.ignoreExprs[:] if self.copyDefaultWhiteChars: cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS return cpy - def setName( self, name ): + def setName(self, name): """ Define name for this expression, makes debugging and exception messages clearer. @@ -1337,11 +1452,11 @@ def setName( self, name ): """ self.name = name self.errmsg = "Expected " + self.name - if hasattr(self,"exception"): - self.exception.msg = self.errmsg + if __diag__.enable_debug_on_named_expressions: + self.setDebug() return self - def setResultsName( self, name, listAllMatches=False ): + def setResultsName(self, name, listAllMatches=False): """ Define name for referencing matching tokens as a nested attribute of the returned parse results. @@ -1362,15 +1477,18 @@ def setResultsName( self, name, listAllMatches=False ): # equivalent form: date_str = integer("year") + '/' + integer("month") + '/' + integer("day") """ + return self._setResultsName(name, listAllMatches) + + def _setResultsName(self, name, listAllMatches=False): newself = self.copy() if name.endswith("*"): name = name[:-1] - listAllMatches=True + listAllMatches = True newself.resultsName = name newself.modalResults = not listAllMatches return newself - def setBreak(self,breakFlag = True): + def setBreak(self, breakFlag=True): """Method to invoke the Python pdb debugger when this element is about to be parsed. Set ``breakFlag`` to True to enable, False to disable. @@ -1379,20 +1497,21 @@ def setBreak(self,breakFlag = True): _parseMethod = self._parse def breaker(instring, loc, doActions=True, callPreParse=True): import pdb + # this call to pdb.set_trace() is intentional, not a checkin error pdb.set_trace() - return _parseMethod( instring, loc, doActions, callPreParse ) + return _parseMethod(instring, loc, doActions, callPreParse) breaker._originalParseMethod = _parseMethod self._parse = breaker else: - if hasattr(self._parse,"_originalParseMethod"): + if hasattr(self._parse, "_originalParseMethod"): self._parse = self._parse._originalParseMethod return self - def setParseAction( self, *fns, **kwargs ): + def setParseAction(self, *fns, **kwargs): """ Define one or more actions to perform when successfully matching parse element definition. - Parse action fn is a callable method with 0-3 arguments, called as ``fn(s,loc,toks)`` , - ``fn(loc,toks)`` , ``fn(toks)`` , or just ``fn()`` , where: + Parse action fn is a callable method with 0-3 arguments, called as ``fn(s, loc, toks)`` , + ``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where: - s = the original string being parsed (see note below) - loc = the location of the matching substring @@ -1402,8 +1521,11 @@ def setParseAction( self, *fns, **kwargs ): value from fn, and the modified list of tokens will replace the original. Otherwise, fn does not need to return any value. + If None is passed as the parse action, all previously added parse actions for this + expression are cleared. + Optional keyword arguments: - - callDuringTry = (default= ``False`` ) indicate if parse action should be run during lookaheads and alternate testing + - callDuringTry = (default= ``False``) indicate if parse action should be run during lookaheads and alternate testing Note: the default parsing behavior is to expand tabs in the input string before starting the parsing process. See :class:`parseString for more @@ -1425,11 +1547,16 @@ def setParseAction( self, *fns, **kwargs ): # note that integer fields are now ints, not strings date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31] """ - self.parseAction = list(map(_trim_arity, list(fns))) - self.callDuringTry = kwargs.get("callDuringTry", False) + if list(fns) == [None,]: + self.parseAction = [] + else: + if not all(callable(fn) for fn in fns): + raise TypeError("parse actions must be callable") + self.parseAction = list(map(_trim_arity, list(fns))) + self.callDuringTry = kwargs.get("callDuringTry", False) return self - def addParseAction( self, *fns, **kwargs ): + def addParseAction(self, *fns, **kwargs): """ Add one or more parse actions to expression's list of parse actions. See :class:`setParseAction`. @@ -1457,21 +1584,17 @@ def addCondition(self, *fns, **kwargs): result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1) """ - msg = kwargs.get("message", "failed user-defined condition") - exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException for fn in fns: - fn = _trim_arity(fn) - def pa(s,l,t): - if not bool(fn(s,l,t)): - raise exc_type(s,l,msg) - self.parseAction.append(pa) + self.parseAction.append(conditionAsParseAction(fn, message=kwargs.get('message'), + fatal=kwargs.get('fatal', False))) + self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) return self - def setFailAction( self, fn ): + def setFailAction(self, fn): """Define action to perform if parsing fails at this expression. Fail acton fn is a callable function that takes the arguments - ``fn(s,loc,expr,err)`` where: + ``fn(s, loc, expr, err)`` where: - s = string being parsed - loc = location where expression match was attempted and failed - expr = the parse expression that failed @@ -1481,22 +1604,22 @@ def setFailAction( self, fn ): self.failAction = fn return self - def _skipIgnorables( self, instring, loc ): + def _skipIgnorables(self, instring, loc): exprsFound = True while exprsFound: exprsFound = False for e in self.ignoreExprs: try: while 1: - loc,dummy = e._parse( instring, loc ) + loc, dummy = e._parse(instring, loc) exprsFound = True except ParseException: pass return loc - def preParse( self, instring, loc ): + def preParse(self, instring, loc): if self.ignoreExprs: - loc = self._skipIgnorables( instring, loc ) + loc = self._skipIgnorables(instring, loc) if self.skipWhitespace: wt = self.whiteChars @@ -1506,101 +1629,105 @@ def preParse( self, instring, loc ): return loc - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): return loc, [] - def postParse( self, instring, loc, tokenlist ): + def postParse(self, instring, loc, tokenlist): return tokenlist - #~ @profile - def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ): - debugging = ( self.debug ) #and doActions ) + # ~ @profile + def _parseNoCache(self, instring, loc, doActions=True, callPreParse=True): + TRY, MATCH, FAIL = 0, 1, 2 + debugging = (self.debug) # and doActions) if debugging or self.failAction: - #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )) - if (self.debugActions[0] ): - self.debugActions[0]( instring, loc, self ) - if callPreParse and self.callPreparse: - preloc = self.preParse( instring, loc ) - else: - preloc = loc - tokensStart = preloc + # ~ print ("Match", self, "at loc", loc, "(%d, %d)" % (lineno(loc, instring), col(loc, instring))) + if self.debugActions[TRY]: + self.debugActions[TRY](instring, loc, self) try: - try: - loc,tokens = self.parseImpl( instring, preloc, doActions ) - except IndexError: - raise ParseException( instring, len(instring), self.errmsg, self ) - except ParseBaseException as err: - #~ print ("Exception raised:", err) - if self.debugActions[2]: - self.debugActions[2]( instring, tokensStart, self, err ) + if callPreParse and self.callPreparse: + preloc = self.preParse(instring, loc) + else: + preloc = loc + tokensStart = preloc + if self.mayIndexError or preloc >= len(instring): + try: + loc, tokens = self.parseImpl(instring, preloc, doActions) + except IndexError: + raise ParseException(instring, len(instring), self.errmsg, self) + else: + loc, tokens = self.parseImpl(instring, preloc, doActions) + except Exception as err: + # ~ print ("Exception raised:", err) + if self.debugActions[FAIL]: + self.debugActions[FAIL](instring, tokensStart, self, err) if self.failAction: - self.failAction( instring, tokensStart, self, err ) + self.failAction(instring, tokensStart, self, err) raise else: if callPreParse and self.callPreparse: - preloc = self.preParse( instring, loc ) + preloc = self.preParse(instring, loc) else: preloc = loc tokensStart = preloc if self.mayIndexError or preloc >= len(instring): try: - loc,tokens = self.parseImpl( instring, preloc, doActions ) + loc, tokens = self.parseImpl(instring, preloc, doActions) except IndexError: - raise ParseException( instring, len(instring), self.errmsg, self ) + raise ParseException(instring, len(instring), self.errmsg, self) else: - loc,tokens = self.parseImpl( instring, preloc, doActions ) + loc, tokens = self.parseImpl(instring, preloc, doActions) - tokens = self.postParse( instring, loc, tokens ) + tokens = self.postParse(instring, loc, tokens) - retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults ) + retTokens = ParseResults(tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults) if self.parseAction and (doActions or self.callDuringTry): if debugging: try: for fn in self.parseAction: try: - tokens = fn( instring, tokensStart, retTokens ) + tokens = fn(instring, tokensStart, retTokens) except IndexError as parse_action_exc: exc = ParseException("exception raised in parse action") exc.__cause__ = parse_action_exc raise exc if tokens is not None and tokens is not retTokens: - retTokens = ParseResults( tokens, + retTokens = ParseResults(tokens, self.resultsName, - asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), - modal=self.modalResults ) - except ParseBaseException as err: - #~ print "Exception raised in user parse action:", err - if (self.debugActions[2] ): - self.debugActions[2]( instring, tokensStart, self, err ) + asList=self.saveAsList and isinstance(tokens, (ParseResults, list)), + modal=self.modalResults) + except Exception as err: + # ~ print "Exception raised in user parse action:", err + if self.debugActions[FAIL]: + self.debugActions[FAIL](instring, tokensStart, self, err) raise else: for fn in self.parseAction: try: - tokens = fn( instring, tokensStart, retTokens ) + tokens = fn(instring, tokensStart, retTokens) except IndexError as parse_action_exc: exc = ParseException("exception raised in parse action") exc.__cause__ = parse_action_exc raise exc if tokens is not None and tokens is not retTokens: - retTokens = ParseResults( tokens, + retTokens = ParseResults(tokens, self.resultsName, - asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), - modal=self.modalResults ) + asList=self.saveAsList and isinstance(tokens, (ParseResults, list)), + modal=self.modalResults) if debugging: - #~ print ("Matched",self,"->",retTokens.asList()) - if (self.debugActions[1] ): - self.debugActions[1]( instring, tokensStart, loc, self, retTokens ) + # ~ print ("Matched", self, "->", retTokens.asList()) + if self.debugActions[MATCH]: + self.debugActions[MATCH](instring, tokensStart, loc, self, retTokens) return loc, retTokens - def tryParse( self, instring, loc ): + def tryParse(self, instring, loc): try: - return self._parse( instring, loc, doActions=False )[0] + return self._parse(instring, loc, doActions=False)[0] except ParseFatalException: - raise ParseException( instring, loc, self.errmsg, self) + raise ParseException(instring, loc, self.errmsg, self) def canParseNext(self, instring, loc): try: @@ -1697,7 +1824,7 @@ def cache_len(self): # this method gets repeatedly called during backtracking with the same arguments - # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression - def _parseCache( self, instring, loc, doActions=True, callPreParse=True ): + def _parseCache(self, instring, loc, doActions=True, callPreParse=True): HIT, MISS = 0, 1 lookup = (self, instring, loc, callPreParse, doActions) with ParserElement.packrat_cache_lock: @@ -1718,7 +1845,7 @@ def _parseCache( self, instring, loc, doActions=True, callPreParse=True ): ParserElement.packrat_cache_stats[HIT] += 1 if isinstance(value, Exception): raise value - return (value[0], value[1].copy()) + return value[0], value[1].copy() _parse = _parseNoCache @@ -1763,12 +1890,16 @@ def enablePackrat(cache_size_limit=128): ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit) ParserElement._parse = ParserElement._parseCache - def parseString( self, instring, parseAll=False ): + def parseString(self, instring, parseAll=False): """ Execute the parse expression with the given string. This is the main interface to the client code, once the complete expression has been built. + Returns the parsed data as a :class:`ParseResults` object, which may be + accessed as a list, or as a dict or object with attributes if the given parser + includes results names. + If you want the grammar to require that the entire input string be successfully parsed, then set ``parseAll`` to True (equivalent to ending the grammar with ``StringEnd()``). @@ -1782,7 +1913,7 @@ def parseString( self, instring, parseAll=False ): - calling ``parseWithTabs`` on your grammar before calling ``parseString`` (see :class:`parseWithTabs`) - - define your parse action using the full ``(s,loc,toks)`` signature, and + - define your parse action using the full ``(s, loc, toks)`` signature, and reference the input string using the parse action's ``s`` argument - explictly expand the tabs in your input string before calling ``parseString`` @@ -1795,17 +1926,17 @@ def parseString( self, instring, parseAll=False ): ParserElement.resetCache() if not self.streamlined: self.streamline() - #~ self.saveAsList = True + # ~ self.saveAsList = True for e in self.ignoreExprs: e.streamline() if not self.keepTabs: instring = instring.expandtabs() try: - loc, tokens = self._parse( instring, 0 ) + loc, tokens = self._parse(instring, 0) if parseAll: - loc = self.preParse( instring, loc ) + loc = self.preParse(instring, loc) se = Empty() + StringEnd() - se._parse( instring, loc ) + se._parse(instring, loc) except ParseBaseException as exc: if ParserElement.verbose_stacktrace: raise @@ -1815,7 +1946,7 @@ def parseString( self, instring, parseAll=False ): else: return tokens - def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ): + def scanString(self, instring, maxMatches=_MAX_INT, overlap=False): """ Scan the input string for expression matches. Each match will return the matching tokens, start location, and end location. May be called with optional @@ -1830,7 +1961,7 @@ def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ): source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" print(source) - for tokens,start,end in Word(alphas).scanString(source): + for tokens, start, end in Word(alphas).scanString(source): print(' '*start + '^'*(end-start)) print(' '*start + tokens[0]) @@ -1862,16 +1993,16 @@ def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ): try: while loc <= instrlen and matches < maxMatches: try: - preloc = preparseFn( instring, loc ) - nextLoc,tokens = parseFn( instring, preloc, callPreParse=False ) + preloc = preparseFn(instring, loc) + nextLoc, tokens = parseFn(instring, preloc, callPreParse=False) except ParseException: - loc = preloc+1 + loc = preloc + 1 else: if nextLoc > loc: matches += 1 yield tokens, preloc, nextLoc if overlap: - nextloc = preparseFn( instring, loc ) + nextloc = preparseFn(instring, loc) if nextloc > loc: loc = nextLoc else: @@ -1879,7 +2010,7 @@ def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ): else: loc = nextLoc else: - loc = preloc+1 + loc = preloc + 1 except ParseBaseException as exc: if ParserElement.verbose_stacktrace: raise @@ -1887,7 +2018,7 @@ def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ): # catch and re-raise exception from here, clears out pyparsing internal stack trace raise exc - def transformString( self, instring ): + def transformString(self, instring): """ Extension to :class:`scanString`, to modify matching text with modified tokens that may be returned from a parse action. To use ``transformString``, define a grammar and @@ -1913,19 +2044,19 @@ def transformString( self, instring ): # keep string locs straight between transformString and scanString self.keepTabs = True try: - for t,s,e in self.scanString( instring ): - out.append( instring[lastE:s] ) + for t, s, e in self.scanString(instring): + out.append(instring[lastE:s]) if t: - if isinstance(t,ParseResults): + if isinstance(t, ParseResults): out += t.asList() - elif isinstance(t,list): + elif isinstance(t, list): out += t else: out.append(t) lastE = e out.append(instring[lastE:]) out = [o for o in out if o] - return "".join(map(_ustr,_flatten(out))) + return "".join(map(_ustr, _flatten(out))) except ParseBaseException as exc: if ParserElement.verbose_stacktrace: raise @@ -1933,7 +2064,7 @@ def transformString( self, instring ): # catch and re-raise exception from here, clears out pyparsing internal stack trace raise exc - def searchString( self, instring, maxMatches=_MAX_INT ): + def searchString(self, instring, maxMatches=_MAX_INT): """ Another extension to :class:`scanString`, simplifying the access to the tokens found to match the given parse expression. May be called with optional @@ -1955,7 +2086,7 @@ def searchString( self, instring, maxMatches=_MAX_INT ): ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity'] """ try: - return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ]) + return ParseResults([t for t, s, e in self.scanString(instring, maxMatches)]) except ParseBaseException as exc: if ParserElement.verbose_stacktrace: raise @@ -1981,14 +2112,14 @@ def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False): """ splits = 0 last = 0 - for t,s,e in self.scanString(instring, maxMatches=maxsplit): + for t, s, e in self.scanString(instring, maxMatches=maxsplit): yield instring[last:s] if includeSeparators: yield t[0] last = e yield instring[last:] - def __add__(self, other ): + def __add__(self, other): """ Implementation of + operator - returns :class:`And`. Adding strings to a ParserElement converts them to :class:`Literal`s by default. @@ -2002,24 +2133,42 @@ def __add__(self, other ): prints:: Hello, World! -> ['Hello', ',', 'World', '!'] + + ``...`` may be used as a parse expression as a short form of :class:`SkipTo`. + + Literal('start') + ... + Literal('end') + + is equivalent to: + + Literal('start') + SkipTo('end')("_skipped*") + Literal('end') + + Note that the skipped text is returned with '_skipped' as a results name, + and to support having multiple skips in the same parser, the value returned is + a list of all skipped text. """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): + if other is Ellipsis: + return _PendingSkip(self) + + if isinstance(other, basestring): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) return None - return And( [ self, other ] ) + return And([self, other]) - def __radd__(self, other ): + def __radd__(self, other): """ Implementation of + operator when left operand is not a :class:`ParserElement` """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): + if other is Ellipsis: + return SkipTo(self)("_skipped*") + self + + if isinstance(other, basestring): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) return None return other + self @@ -2027,64 +2176,70 @@ def __sub__(self, other): """ Implementation of - operator, returns :class:`And` with error stop """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): + if isinstance(other, basestring): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) return None return self + And._ErrorStop() + other - def __rsub__(self, other ): + def __rsub__(self, other): """ Implementation of - operator when left operand is not a :class:`ParserElement` """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): + if isinstance(other, basestring): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) return None return other - self - def __mul__(self,other): + def __mul__(self, other): """ Implementation of * operator, allows use of ``expr * 3`` in place of ``expr + expr + expr``. Expressions may also me multiplied by a 2-integer - tuple, similar to ``{min,max}`` multipliers in regular expressions. Tuples + tuple, similar to ``{min, max}`` multipliers in regular expressions. Tuples may also include ``None`` as in: - - ``expr*(n,None)`` or ``expr*(n,)`` is equivalent + - ``expr*(n, None)`` or ``expr*(n, )`` is equivalent to ``expr*n + ZeroOrMore(expr)`` (read as "at least n instances of ``expr``") - - ``expr*(None,n)`` is equivalent to ``expr*(0,n)`` + - ``expr*(None, n)`` is equivalent to ``expr*(0, n)`` (read as "0 to n instances of ``expr``") - - ``expr*(None,None)`` is equivalent to ``ZeroOrMore(expr)`` - - ``expr*(1,None)`` is equivalent to ``OneOrMore(expr)`` + - ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)`` + - ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)`` - Note that ``expr*(None,n)`` does not raise an exception if + Note that ``expr*(None, n)`` does not raise an exception if more than n exprs exist in the input stream; that is, - ``expr*(None,n)`` does not enforce a maximum number of expr + ``expr*(None, n)`` does not enforce a maximum number of expr occurrences. If this behavior is desired, then write - ``expr*(None,n) + ~expr`` + ``expr*(None, n) + ~expr`` """ - if isinstance(other,int): - minElements, optElements = other,0 - elif isinstance(other,tuple): + if other is Ellipsis: + other = (0, None) + elif isinstance(other, tuple) and other[:1] == (Ellipsis,): + other = ((0, ) + other[1:] + (None,))[:2] + + if isinstance(other, int): + minElements, optElements = other, 0 + elif isinstance(other, tuple): + other = tuple(o if o is not Ellipsis else None for o in other) other = (other + (None, None))[:2] if other[0] is None: other = (0, other[1]) - if isinstance(other[0],int) and other[1] is None: + if isinstance(other[0], int) and other[1] is None: if other[0] == 0: return ZeroOrMore(self) if other[0] == 1: return OneOrMore(self) else: - return self*other[0] + ZeroOrMore(self) - elif isinstance(other[0],int) and isinstance(other[1],int): + return self * other[0] + ZeroOrMore(self) + elif isinstance(other[0], int) and isinstance(other[1], int): minElements, optElements = other optElements -= minElements else: - raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1])) + raise TypeError("cannot multiply 'ParserElement' and ('%s', '%s') objects", type(other[0]), type(other[1])) else: raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other)) @@ -2093,108 +2248,152 @@ def __mul__(self,other): if optElements < 0: raise ValueError("second tuple value must be greater or equal to first tuple value") if minElements == optElements == 0: - raise ValueError("cannot multiply ParserElement by 0 or (0,0)") + raise ValueError("cannot multiply ParserElement by 0 or (0, 0)") - if (optElements): + if optElements: def makeOptionalList(n): - if n>1: - return Optional(self + makeOptionalList(n-1)) + if n > 1: + return Optional(self + makeOptionalList(n - 1)) else: return Optional(self) if minElements: if minElements == 1: ret = self + makeOptionalList(optElements) else: - ret = And([self]*minElements) + makeOptionalList(optElements) + ret = And([self] * minElements) + makeOptionalList(optElements) else: ret = makeOptionalList(optElements) else: if minElements == 1: ret = self else: - ret = And([self]*minElements) + ret = And([self] * minElements) return ret def __rmul__(self, other): return self.__mul__(other) - def __or__(self, other ): + def __or__(self, other): """ Implementation of | operator - returns :class:`MatchFirst` """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): + if other is Ellipsis: + return _PendingSkip(self, must_skip=True) + + if isinstance(other, basestring): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) return None - return MatchFirst( [ self, other ] ) + return MatchFirst([self, other]) - def __ror__(self, other ): + def __ror__(self, other): """ Implementation of | operator when left operand is not a :class:`ParserElement` """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): + if isinstance(other, basestring): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) return None return other | self - def __xor__(self, other ): + def __xor__(self, other): """ Implementation of ^ operator - returns :class:`Or` """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): + if isinstance(other, basestring): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) return None - return Or( [ self, other ] ) + return Or([self, other]) - def __rxor__(self, other ): + def __rxor__(self, other): """ Implementation of ^ operator when left operand is not a :class:`ParserElement` """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): + if isinstance(other, basestring): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) return None return other ^ self - def __and__(self, other ): + def __and__(self, other): """ Implementation of & operator - returns :class:`Each` """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): + if isinstance(other, basestring): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) return None - return Each( [ self, other ] ) + return Each([self, other]) - def __rand__(self, other ): + def __rand__(self, other): """ Implementation of & operator when left operand is not a :class:`ParserElement` """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): + if isinstance(other, basestring): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) return None return other & self - def __invert__( self ): + def __invert__(self): """ Implementation of ~ operator - returns :class:`NotAny` """ - return NotAny( self ) + return NotAny(self) + + def __iter__(self): + # must implement __iter__ to override legacy use of sequential access to __getitem__ to + # iterate over a sequence + raise TypeError('%r object is not iterable' % self.__class__.__name__) + + def __getitem__(self, key): + """ + use ``[]`` indexing notation as a short form for expression repetition: + - ``expr[n]`` is equivalent to ``expr*n`` + - ``expr[m, n]`` is equivalent to ``expr*(m, n)`` + - ``expr[n, ...]`` or ``expr[n,]`` is equivalent + to ``expr*n + ZeroOrMore(expr)`` + (read as "at least n instances of ``expr``") + - ``expr[..., n]`` is equivalent to ``expr*(0, n)`` + (read as "0 to n instances of ``expr``") + - ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)`` + - ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)`` + ``None`` may be used in place of ``...``. + + Note that ``expr[..., n]`` and ``expr[m, n]``do not raise an exception + if more than ``n`` ``expr``s exist in the input stream. If this behavior is + desired, then write ``expr[..., n] + ~expr``. + """ + + # convert single arg keys to tuples + try: + if isinstance(key, str): + key = (key,) + iter(key) + except TypeError: + key = (key, key) + + if len(key) > 2: + warnings.warn("only 1 or 2 index arguments supported ({0}{1})".format(key[:5], + '... [{0}]'.format(len(key)) + if len(key) > 5 else '')) + + # clip to 2 elements + ret = self * tuple(key[:2]) + return ret def __call__(self, name=None): """ @@ -2208,22 +2407,22 @@ def __call__(self, name=None): Example:: # these are equivalent - userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno") - userdata = Word(alphas)("name") + Word(nums+"-")("socsecno") + userdata = Word(alphas).setResultsName("name") + Word(nums + "-").setResultsName("socsecno") + userdata = Word(alphas)("name") + Word(nums + "-")("socsecno") """ if name is not None: - return self.setResultsName(name) + return self._setResultsName(name) else: return self.copy() - def suppress( self ): + def suppress(self): """ Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from cluttering up returned output. """ - return Suppress( self ) + return Suppress(self) - def leaveWhitespace( self ): + def leaveWhitespace(self): """ Disables the skipping of whitespace before matching the characters in the :class:`ParserElement`'s defined pattern. This is normally only used internally by @@ -2232,7 +2431,7 @@ def leaveWhitespace( self ): self.skipWhitespace = False return self - def setWhitespaceChars( self, chars ): + def setWhitespaceChars(self, chars): """ Overrides the default whitespace chars """ @@ -2241,7 +2440,7 @@ def setWhitespaceChars( self, chars ): self.copyDefaultWhiteChars = False return self - def parseWithTabs( self ): + def parseWithTabs(self): """ Overrides default behavior to expand ``<TAB>``s to spaces before parsing the input string. Must be called before ``parseString`` when the input grammar contains elements that @@ -2250,7 +2449,7 @@ def parseWithTabs( self ): self.keepTabs = True return self - def ignore( self, other ): + def ignore(self, other): """ Define expression to be ignored (e.g., comments) while doing pattern matching; may be called repeatedly, to define multiple comment or other @@ -2267,14 +2466,14 @@ def ignore( self, other ): if isinstance(other, basestring): other = Suppress(other) - if isinstance( other, Suppress ): + if isinstance(other, Suppress): if other not in self.ignoreExprs: self.ignoreExprs.append(other) else: - self.ignoreExprs.append( Suppress( other.copy() ) ) + self.ignoreExprs.append(Suppress(other.copy())) return self - def setDebugActions( self, startAction, successAction, exceptionAction ): + def setDebugActions(self, startAction, successAction, exceptionAction): """ Enable display of debugging messages while doing pattern matching. """ @@ -2284,7 +2483,7 @@ def setDebugActions( self, startAction, successAction, exceptionAction ): self.debug = True return self - def setDebug( self, flag=True ): + def setDebug(self, flag=True): """ Enable display of debugging messages while doing pattern matching. Set ``flag`` to True to enable, False to disable. @@ -2322,32 +2521,32 @@ def setDebug( self, flag=True ): name created for the :class:`Word` expression without calling ``setName`` is ``"W:(ABCD...)"``. """ if flag: - self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction ) + self.setDebugActions(_defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction) else: self.debug = False return self - def __str__( self ): + def __str__(self): return self.name - def __repr__( self ): + def __repr__(self): return _ustr(self) - def streamline( self ): + def streamline(self): self.streamlined = True self.strRepr = None return self - def checkRecursion( self, parseElementList ): + def checkRecursion(self, parseElementList): pass - def validate( self, validateTrace=[] ): + def validate(self, validateTrace=None): """ Check defined expressions for valid structure, check for infinite recursive definitions. """ - self.checkRecursion( [] ) + self.checkRecursion([]) - def parseFile( self, file_or_filename, parseAll=False ): + def parseFile(self, file_or_filename, parseAll=False): """ Execute the parse expression on the given file or filename. If a filename is specified (instead of a file object), @@ -2367,24 +2566,25 @@ def parseFile( self, file_or_filename, parseAll=False ): # catch and re-raise exception from here, clears out pyparsing internal stack trace raise exc - def __eq__(self,other): - if isinstance(other, ParserElement): - return self is other or vars(self) == vars(other) + def __eq__(self, other): + if self is other: + return True elif isinstance(other, basestring): return self.matches(other) - else: - return super(ParserElement,self)==other + elif isinstance(other, ParserElement): + return vars(self) == vars(other) + return False - def __ne__(self,other): + def __ne__(self, other): return not (self == other) def __hash__(self): - return hash(id(self)) + return id(self) - def __req__(self,other): + def __req__(self, other): return self == other - def __rne__(self,other): + def __rne__(self, other): return not (self == other) def matches(self, testString, parseAll=True): @@ -2408,7 +2608,8 @@ def matches(self, testString, parseAll=True): return False def runTests(self, tests, parseAll=True, comment='#', - fullDump=True, printResults=True, failureTests=False, postParse=None): + fullDump=True, printResults=True, failureTests=False, postParse=None, + file=None): """ Execute the parse expression on a series of test strings, showing each test, the parsed results or where the parse failed. Quick and easy way to @@ -2425,6 +2626,8 @@ def runTests(self, tests, parseAll=True, comment='#', - failureTests - (default= ``False``) indicates if these tests are expected to fail parsing - postParse - (default= ``None``) optional callback for successful parse results; called as `fn(test_string, parse_results)` and returns a string to be added to the test output + - file - (default=``None``) optional file-like object to which test output will be written; + if None, will default to ``sys.stdout`` Returns: a (success, results) tuple, where success indicates that all tests succeeded (or failed if ``failureTests`` is True), and the results contain a list of lines of each @@ -2504,9 +2707,15 @@ def runTests(self, tests, parseAll=True, comment='#', tests = list(map(str.strip, tests.rstrip().splitlines())) if isinstance(comment, basestring): comment = Literal(comment) + if file is None: + file = sys.stdout + print_ = file.write + allResults = [] comments = [] success = True + NL = Literal(r'\n').addParseAction(replaceWith('\n')).ignore(quotedString) + BOM = u'\ufeff' for t in tests: if comment is not None and comment.matches(t, False) or comments and not t: comments.append(t) @@ -2517,24 +2726,15 @@ def runTests(self, tests, parseAll=True, comment='#', comments = [] try: # convert newline marks to actual newlines, and strip leading BOM if present - t = t.replace(r'\n','\n').lstrip('\ufeff') + t = NL.transformString(t.lstrip(BOM)) result = self.parseString(t, parseAll=parseAll) - out.append(result.dump(full=fullDump)) - success = success and not failureTests - if postParse is not None: - try: - pp_value = postParse(t, result) - if pp_value is not None: - out.append(str(pp_value)) - except Exception as e: - out.append("{0} failed: {1}: {2}".format(postParse.__name__, type(e).__name__, e)) except ParseBaseException as pe: fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else "" if '\n' in t: out.append(line(pe.loc, t)) - out.append(' '*(col(pe.loc,t)-1) + '^' + fatal) + out.append(' ' * (col(pe.loc, t) - 1) + '^' + fatal) else: - out.append(' '*pe.loc + '^' + fatal) + out.append(' ' * pe.loc + '^' + fatal) out.append("FAIL: " + str(pe)) success = success and failureTests result = pe @@ -2542,30 +2742,80 @@ def runTests(self, tests, parseAll=True, comment='#', out.append("FAIL-EXCEPTION: " + str(exc)) success = success and failureTests result = exc + else: + success = success and not failureTests + if postParse is not None: + try: + pp_value = postParse(t, result) + if pp_value is not None: + if isinstance(pp_value, ParseResults): + out.append(pp_value.dump()) + else: + out.append(str(pp_value)) + else: + out.append(result.dump()) + except Exception as e: + out.append(result.dump(full=fullDump)) + out.append("{0} failed: {1}: {2}".format(postParse.__name__, type(e).__name__, e)) + else: + out.append(result.dump(full=fullDump)) if printResults: if fullDump: out.append('') - print('\n'.join(out)) + print_('\n'.join(out)) allResults.append((t, result)) return success, allResults +class _PendingSkip(ParserElement): + # internal placeholder class to hold a place were '...' is added to a parser element, + # once another ParserElement is added, this placeholder will be replaced with a SkipTo + def __init__(self, expr, must_skip=False): + super(_PendingSkip, self).__init__() + self.strRepr = str(expr + Empty()).replace('Empty', '...') + self.name = self.strRepr + self.anchor = expr + self.must_skip = must_skip + + def __add__(self, other): + skipper = SkipTo(other).setName("...")("_skipped*") + if self.must_skip: + def must_skip(t): + if not t._skipped or t._skipped.asList() == ['']: + del t[0] + t.pop("_skipped", None) + def show_skip(t): + if t._skipped.asList()[-1:] == ['']: + skipped = t.pop('_skipped') + t['_skipped'] = 'missing <' + repr(self.anchor) + '>' + return (self.anchor + skipper().addParseAction(must_skip) + | skipper().addParseAction(show_skip)) + other + + return self.anchor + skipper + other + + def __repr__(self): + return self.strRepr + + def parseImpl(self, *args): + raise Exception("use of `...` expression without following SkipTo target expression") + + class Token(ParserElement): """Abstract :class:`ParserElement` subclass, for defining atomic matching patterns. """ - def __init__( self ): - super(Token,self).__init__( savelist=False ) + def __init__(self): + super(Token, self).__init__(savelist=False) class Empty(Token): """An empty token, will always match. """ - def __init__( self ): - super(Empty,self).__init__() + def __init__(self): + super(Empty, self).__init__() self.name = "Empty" self.mayReturnEmpty = True self.mayIndexError = False @@ -2574,14 +2824,14 @@ def __init__( self ): class NoMatch(Token): """A token that will never match. """ - def __init__( self ): - super(NoMatch,self).__init__() + def __init__(self): + super(NoMatch, self).__init__() self.name = "NoMatch" self.mayReturnEmpty = True self.mayIndexError = False self.errmsg = "Unmatchable token" - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): raise ParseException(instring, loc, self.errmsg, self) @@ -2599,8 +2849,8 @@ class Literal(Token): For keyword matching (force word break before and after the matched string), use :class:`Keyword` or :class:`CaselessKeyword`. """ - def __init__( self, matchString ): - super(Literal,self).__init__() + def __init__(self, matchString): + super(Literal, self).__init__() self.match = matchString self.matchLen = len(matchString) try: @@ -2614,15 +2864,22 @@ def __init__( self, matchString ): self.mayReturnEmpty = False self.mayIndexError = False - # Performance tuning: this routine gets called a *lot* - # if this is a single character match string and the first character matches, - # short-circuit as quickly as possible, and avoid calling startswith - #~ @profile - def parseImpl( self, instring, loc, doActions=True ): - if (instring[loc] == self.firstMatchChar and - (self.matchLen==1 or instring.startswith(self.match,loc)) ): - return loc+self.matchLen, self.match + # Performance tuning: modify __class__ to select + # a parseImpl optimized for single-character check + if self.matchLen == 1 and type(self) is Literal: + self.__class__ = _SingleCharLiteral + + def parseImpl(self, instring, loc, doActions=True): + if instring[loc] == self.firstMatchChar and instring.startswith(self.match, loc): + return loc + self.matchLen, self.match + raise ParseException(instring, loc, self.errmsg, self) + +class _SingleCharLiteral(Literal): + def parseImpl(self, instring, loc, doActions=True): + if instring[loc] == self.firstMatchChar: + return loc + 1, self.match raise ParseException(instring, loc, self.errmsg, self) + _L = Literal ParserElement._literalStringClass = Literal @@ -2651,10 +2908,10 @@ class Keyword(Token): For case-insensitive matching, use :class:`CaselessKeyword`. """ - DEFAULT_KEYWORD_CHARS = alphanums+"_$" + DEFAULT_KEYWORD_CHARS = alphanums + "_$" - def __init__( self, matchString, identChars=None, caseless=False ): - super(Keyword,self).__init__() + def __init__(self, matchString, identChars=None, caseless=False): + super(Keyword, self).__init__() if identChars is None: identChars = Keyword.DEFAULT_KEYWORD_CHARS self.match = matchString @@ -2663,7 +2920,7 @@ def __init__( self, matchString, identChars=None, caseless=False ): self.firstMatchChar = matchString[0] except IndexError: warnings.warn("null string passed to Keyword; use Empty() instead", - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) self.name = '"%s"' % self.match self.errmsg = "Expected " + self.name self.mayReturnEmpty = False @@ -2674,27 +2931,32 @@ def __init__( self, matchString, identChars=None, caseless=False ): identChars = identChars.upper() self.identChars = set(identChars) - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): if self.caseless: - if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and - (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and - (loc == 0 or instring[loc-1].upper() not in self.identChars) ): - return loc+self.matchLen, self.match + if ((instring[loc:loc + self.matchLen].upper() == self.caselessmatch) + and (loc >= len(instring) - self.matchLen + or instring[loc + self.matchLen].upper() not in self.identChars) + and (loc == 0 + or instring[loc - 1].upper() not in self.identChars)): + return loc + self.matchLen, self.match + else: - if (instring[loc] == self.firstMatchChar and - (self.matchLen==1 or instring.startswith(self.match,loc)) and - (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and - (loc == 0 or instring[loc-1] not in self.identChars) ): - return loc+self.matchLen, self.match + if instring[loc] == self.firstMatchChar: + if ((self.matchLen == 1 or instring.startswith(self.match, loc)) + and (loc >= len(instring) - self.matchLen + or instring[loc + self.matchLen] not in self.identChars) + and (loc == 0 or instring[loc - 1] not in self.identChars)): + return loc + self.matchLen, self.match + raise ParseException(instring, loc, self.errmsg, self) def copy(self): - c = super(Keyword,self).copy() + c = super(Keyword, self).copy() c.identChars = Keyword.DEFAULT_KEYWORD_CHARS return c @staticmethod - def setDefaultKeywordChars( chars ): + def setDefaultKeywordChars(chars): """Overrides the default Keyword chars """ Keyword.DEFAULT_KEYWORD_CHARS = chars @@ -2710,16 +2972,16 @@ class CaselessLiteral(Literal): (Contrast with example for :class:`CaselessKeyword`.) """ - def __init__( self, matchString ): - super(CaselessLiteral,self).__init__( matchString.upper() ) + def __init__(self, matchString): + super(CaselessLiteral, self).__init__(matchString.upper()) # Preserve the defining literal. self.returnString = matchString self.name = "'%s'" % self.returnString self.errmsg = "Expected " + self.name - def parseImpl( self, instring, loc, doActions=True ): - if instring[ loc:loc+self.matchLen ].upper() == self.match: - return loc+self.matchLen, self.returnString + def parseImpl(self, instring, loc, doActions=True): + if instring[loc:loc + self.matchLen].upper() == self.match: + return loc + self.matchLen, self.returnString raise ParseException(instring, loc, self.errmsg, self) class CaselessKeyword(Keyword): @@ -2732,8 +2994,8 @@ class CaselessKeyword(Keyword): (Contrast with example for :class:`CaselessLiteral`.) """ - def __init__( self, matchString, identChars=None ): - super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True ) + def __init__(self, matchString, identChars=None): + super(CaselessKeyword, self).__init__(matchString, identChars, caseless=True) class CloseMatch(Token): """A variation on :class:`Literal` which matches "close" matches, @@ -2769,7 +3031,7 @@ class CloseMatch(Token): patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']}) """ def __init__(self, match_string, maxMismatches=1): - super(CloseMatch,self).__init__() + super(CloseMatch, self).__init__() self.name = match_string self.match_string = match_string self.maxMismatches = maxMismatches @@ -2777,7 +3039,7 @@ def __init__(self, match_string, maxMismatches=1): self.mayIndexError = False self.mayReturnEmpty = False - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): start = loc instrlen = len(instring) maxloc = start + len(self.match_string) @@ -2788,8 +3050,8 @@ def parseImpl( self, instring, loc, doActions=True ): mismatches = [] maxMismatches = self.maxMismatches - for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)): - src,mat = s_m + for match_stringloc, s_m in enumerate(zip(instring[loc:maxloc], match_string)): + src, mat = s_m if src != mat: mismatches.append(match_stringloc) if len(mismatches) > maxMismatches: @@ -2797,7 +3059,7 @@ def parseImpl( self, instring, loc, doActions=True ): else: loc = match_stringloc + 1 results = ParseResults([instring[start:loc]]) - results['original'] = self.match_string + results['original'] = match_string results['mismatches'] = mismatches return loc, results @@ -2849,7 +3111,7 @@ class Word(Token): capital_word = Word(alphas.upper(), alphas.lower()) # hostnames are alphanumeric, with leading alpha, and '-' - hostname = Word(alphas, alphanums+'-') + hostname = Word(alphas, alphanums + '-') # roman numeral (not a strict parser, accepts invalid mix of characters) roman = Word("IVXLCDM") @@ -2857,15 +3119,16 @@ class Word(Token): # any string of non-whitespace characters, except for ',' csv_value = Word(printables, excludeChars=",") """ - def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ): - super(Word,self).__init__() + def __init__(self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None): + super(Word, self).__init__() if excludeChars: + excludeChars = set(excludeChars) initChars = ''.join(c for c in initChars if c not in excludeChars) if bodyChars: bodyChars = ''.join(c for c in bodyChars if c not in excludeChars) self.initCharsOrig = initChars self.initChars = set(initChars) - if bodyChars : + if bodyChars: self.bodyCharsOrig = bodyChars self.bodyChars = set(bodyChars) else: @@ -2893,34 +3156,28 @@ def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword= self.mayIndexError = False self.asKeyword = asKeyword - if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0): + if ' ' not in self.initCharsOrig + self.bodyCharsOrig and (min == 1 and max == 0 and exact == 0): if self.bodyCharsOrig == self.initCharsOrig: self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig) elif len(self.initCharsOrig) == 1: - self.reString = "%s[%s]*" % \ - (re.escape(self.initCharsOrig), - _escapeRegexRangeChars(self.bodyCharsOrig),) + self.reString = "%s[%s]*" % (re.escape(self.initCharsOrig), + _escapeRegexRangeChars(self.bodyCharsOrig),) else: - self.reString = "[%s][%s]*" % \ - (_escapeRegexRangeChars(self.initCharsOrig), - _escapeRegexRangeChars(self.bodyCharsOrig),) + self.reString = "[%s][%s]*" % (_escapeRegexRangeChars(self.initCharsOrig), + _escapeRegexRangeChars(self.bodyCharsOrig),) if self.asKeyword: - self.reString = r"\b"+self.reString+r"\b" + self.reString = r"\b" + self.reString + r"\b" + try: - self.re = re.compile( self.reString ) + self.re = re.compile(self.reString) except Exception: self.re = None + else: + self.re_match = self.re.match + self.__class__ = _WordRegex - def parseImpl( self, instring, loc, doActions=True ): - if self.re: - result = self.re.match(instring,loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - return loc, result.group() - - if not(instring[ loc ] in self.initChars): + def parseImpl(self, instring, loc, doActions=True): + if instring[loc] not in self.initChars: raise ParseException(instring, loc, self.errmsg, self) start = loc @@ -2928,17 +3185,18 @@ def parseImpl( self, instring, loc, doActions=True ): instrlen = len(instring) bodychars = self.bodyChars maxloc = start + self.maxLen - maxloc = min( maxloc, instrlen ) + maxloc = min(maxloc, instrlen) while loc < maxloc and instring[loc] in bodychars: loc += 1 throwException = False if loc - start < self.minLen: throwException = True - if self.maxSpecified and loc < instrlen and instring[loc] in bodychars: + elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars: throwException = True - if self.asKeyword: - if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars): + elif self.asKeyword: + if (start > 0 and instring[start - 1] in bodychars + or loc < instrlen and instring[loc] in bodychars): throwException = True if throwException: @@ -2946,38 +3204,49 @@ def parseImpl( self, instring, loc, doActions=True ): return loc, instring[start:loc] - def __str__( self ): + def __str__(self): try: - return super(Word,self).__str__() + return super(Word, self).__str__() except Exception: pass - if self.strRepr is None: def charsAsStr(s): - if len(s)>4: - return s[:4]+"..." + if len(s) > 4: + return s[:4] + "..." else: return s - if ( self.initCharsOrig != self.bodyCharsOrig ): - self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) ) + if self.initCharsOrig != self.bodyCharsOrig: + self.strRepr = "W:(%s, %s)" % (charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig)) else: self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig) return self.strRepr +class _WordRegex(Word): + def parseImpl(self, instring, loc, doActions=True): + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) -class Char(Word): + loc = result.end() + return loc, result.group() + + +class Char(_WordRegex): """A short-cut class for defining ``Word(characters, exact=1)``, when defining a match of any single character in a string of characters. """ - def __init__(self, charset): - super(Char, self).__init__(charset, exact=1) - self.reString = "[%s]" % _escapeRegexRangeChars(self.initCharsOrig) - self.re = re.compile( self.reString ) + def __init__(self, charset, asKeyword=False, excludeChars=None): + super(Char, self).__init__(charset, exact=1, asKeyword=asKeyword, excludeChars=excludeChars) + self.reString = "[%s]" % _escapeRegexRangeChars(''.join(self.initChars)) + if asKeyword: + self.reString = r"\b%s\b" % self.reString + self.re = re.compile(self.reString) + self.re_match = self.re.match class Regex(Token): @@ -2987,26 +3256,35 @@ class Regex(Token): If the given regex contains named groups (defined using ``(?P<name>...)``), these will be preserved as named parse results. + If instead of the Python stdlib re module you wish to use a different RE module + (such as the `regex` module), you can replace it by either building your + Regex object with a compiled RE that was compiled using regex: + Example:: realnum = Regex(r"[+-]?\d+\.\d*") date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)') # ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") + + # use regex module instead of stdlib re module to construct a Regex using + # a compiled regular expression + import regex + parser = pp.Regex(regex.compile(r'[0-9]')) + """ - compiledREtype = type(re.compile("[A-Z]")) - def __init__( self, pattern, flags=0, asGroupList=False, asMatch=False): + def __init__(self, pattern, flags=0, asGroupList=False, asMatch=False): """The parameters ``pattern`` and ``flags`` are passed to the ``re.compile()`` function as-is. See the Python `re module <https://docs.python.org/3/library/re.html>`_ module for an explanation of the acceptable patterns and flags. """ - super(Regex,self).__init__() + super(Regex, self).__init__() if isinstance(pattern, basestring): if not pattern: warnings.warn("null string passed to Regex; use Empty() instead", - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) self.pattern = pattern self.flags = flags @@ -3016,17 +3294,18 @@ def __init__( self, pattern, flags=0, asGroupList=False, asMatch=False): self.reString = self.pattern except sre_constants.error: warnings.warn("invalid pattern (%s) passed to Regex" % pattern, - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) raise - elif isinstance(pattern, Regex.compiledREtype): + elif hasattr(pattern, 'pattern') and hasattr(pattern, 'match'): self.re = pattern - self.pattern = \ - self.reString = str(pattern) + self.pattern = self.reString = pattern.pattern self.flags = flags else: - raise ValueError("Regex may only be constructed with a string or a compiled RE object") + raise TypeError("Regex may only be constructed with a string or a compiled RE object") + + self.re_match = self.re.match self.name = _ustr(self) self.errmsg = "Expected " + self.name @@ -3034,28 +3313,45 @@ def __init__( self, pattern, flags=0, asGroupList=False, asMatch=False): self.mayReturnEmpty = True self.asGroupList = asGroupList self.asMatch = asMatch + if self.asGroupList: + self.parseImpl = self.parseImplAsGroupList + if self.asMatch: + self.parseImpl = self.parseImplAsMatch - def parseImpl( self, instring, loc, doActions=True ): - result = self.re.match(instring,loc) + def parseImpl(self, instring, loc, doActions=True): + result = self.re_match(instring, loc) if not result: raise ParseException(instring, loc, self.errmsg, self) loc = result.end() - if self.asMatch: - ret = result - elif self.asGroupList: - ret = result.groups() - else: - ret = ParseResults(result.group()) - d = result.groupdict() - if d: - for k, v in d.items(): - ret[k] = v - return loc,ret - - def __str__( self ): + ret = ParseResults(result.group()) + d = result.groupdict() + if d: + for k, v in d.items(): + ret[k] = v + return loc, ret + + def parseImplAsGroupList(self, instring, loc, doActions=True): + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + ret = result.groups() + return loc, ret + + def parseImplAsMatch(self, instring, loc, doActions=True): + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + ret = result + return loc, ret + + def __str__(self): try: - return super(Regex,self).__str__() + return super(Regex, self).__str__() except Exception: pass @@ -3065,7 +3361,7 @@ def __str__( self ): return self.strRepr def sub(self, repl): - """ + r""" Return Regex with an attached parse action to transform the parsed result as if called using `re.sub(expr, repl, string) <https://docs.python.org/3/library/re.html#re.sub>`_. @@ -3077,12 +3373,12 @@ def sub(self, repl): """ if self.asGroupList: warnings.warn("cannot use sub() with Regex(asGroupList=True)", - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) raise SyntaxError() if self.asMatch and callable(repl): warnings.warn("cannot use sub() with a callable with Regex(asMatch=True)", - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) raise SyntaxError() if self.asMatch: @@ -3102,20 +3398,20 @@ class QuotedString(Token): - quoteChar - string of one or more characters defining the quote delimiting string - escChar - character to escape quotes, typically backslash - (default= ``None`` ) + (default= ``None``) - escQuote - special quote sequence to escape an embedded quote string (such as SQL's ``""`` to escape an embedded ``"``) - (default= ``None`` ) + (default= ``None``) - multiline - boolean indicating whether quotes can span - multiple lines (default= ``False`` ) + multiple lines (default= ``False``) - unquoteResults - boolean indicating whether the matched text - should be unquoted (default= ``True`` ) + should be unquoted (default= ``True``) - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default= ``None`` => same as quoteChar) - convertWhitespaceEscapes - convert escaped whitespace (``'\t'``, ``'\n'``, etc.) to actual whitespace - (default= ``True`` ) + (default= ``True``) Example:: @@ -3132,13 +3428,14 @@ class QuotedString(Token): [['This is the "quote"']] [['This is the quote with "embedded" quotes']] """ - def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True): - super(QuotedString,self).__init__() + def __init__(self, quoteChar, escChar=None, escQuote=None, multiline=False, + unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True): + super(QuotedString, self).__init__() # remove white space from quote chars - wont work anyway quoteChar = quoteChar.strip() if not quoteChar: - warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2) + warnings.warn("quoteChar cannot be the empty string", SyntaxWarning, stacklevel=2) raise SyntaxError() if endQuoteChar is None: @@ -3146,7 +3443,7 @@ def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unq else: endQuoteChar = endQuoteChar.strip() if not endQuoteChar: - warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2) + warnings.warn("endQuoteChar cannot be the empty string", SyntaxWarning, stacklevel=2) raise SyntaxError() self.quoteChar = quoteChar @@ -3161,35 +3458,34 @@ def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unq if multiline: self.flags = re.MULTILINE | re.DOTALL - self.pattern = r'%s(?:[^%s%s]' % \ - ( re.escape(self.quoteChar), - _escapeRegexRangeChars(self.endQuoteChar[0]), - (escChar is not None and _escapeRegexRangeChars(escChar) or '') ) + self.pattern = r'%s(?:[^%s%s]' % (re.escape(self.quoteChar), + _escapeRegexRangeChars(self.endQuoteChar[0]), + (escChar is not None and _escapeRegexRangeChars(escChar) or '')) else: self.flags = 0 - self.pattern = r'%s(?:[^%s\n\r%s]' % \ - ( re.escape(self.quoteChar), - _escapeRegexRangeChars(self.endQuoteChar[0]), - (escChar is not None and _escapeRegexRangeChars(escChar) or '') ) + self.pattern = r'%s(?:[^%s\n\r%s]' % (re.escape(self.quoteChar), + _escapeRegexRangeChars(self.endQuoteChar[0]), + (escChar is not None and _escapeRegexRangeChars(escChar) or '')) if len(self.endQuoteChar) > 1: self.pattern += ( '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]), - _escapeRegexRangeChars(self.endQuoteChar[i])) - for i in range(len(self.endQuoteChar)-1,0,-1)) + ')' - ) + _escapeRegexRangeChars(self.endQuoteChar[i])) + for i in range(len(self.endQuoteChar) - 1, 0, -1)) + ')') + if escQuote: self.pattern += (r'|(?:%s)' % re.escape(escQuote)) if escChar: self.pattern += (r'|(?:%s.)' % re.escape(escChar)) - self.escCharReplacePattern = re.escape(self.escChar)+"(.)" + self.escCharReplacePattern = re.escape(self.escChar) + "(.)" self.pattern += (r')*%s' % re.escape(self.endQuoteChar)) try: self.re = re.compile(self.pattern, self.flags) self.reString = self.pattern + self.re_match = self.re.match except sre_constants.error: warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern, - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) raise self.name = _ustr(self) @@ -3197,8 +3493,8 @@ def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unq self.mayIndexError = False self.mayReturnEmpty = True - def parseImpl( self, instring, loc, doActions=True ): - result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None + def parseImpl(self, instring, loc, doActions=True): + result = instring[loc] == self.firstQuoteChar and self.re_match(instring, loc) or None if not result: raise ParseException(instring, loc, self.errmsg, self) @@ -3208,18 +3504,18 @@ def parseImpl( self, instring, loc, doActions=True ): if self.unquoteResults: # strip off quotes - ret = ret[self.quoteCharLen:-self.endQuoteCharLen] + ret = ret[self.quoteCharLen: -self.endQuoteCharLen] - if isinstance(ret,basestring): + if isinstance(ret, basestring): # replace escaped whitespace if '\\' in ret and self.convertWhitespaceEscapes: ws_map = { - r'\t' : '\t', - r'\n' : '\n', - r'\f' : '\f', - r'\r' : '\r', + r'\t': '\t', + r'\n': '\n', + r'\f': '\f', + r'\r': '\r', } - for wslit,wschar in ws_map.items(): + for wslit, wschar in ws_map.items(): ret = ret.replace(wslit, wschar) # replace escaped characters @@ -3232,9 +3528,9 @@ def parseImpl( self, instring, loc, doActions=True ): return loc, ret - def __str__( self ): + def __str__(self): try: - return super(QuotedString,self).__str__() + return super(QuotedString, self).__str__() except Exception: pass @@ -3264,15 +3560,14 @@ class CharsNotIn(Token): ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] """ - def __init__( self, notChars, min=1, max=0, exact=0 ): - super(CharsNotIn,self).__init__() + def __init__(self, notChars, min=1, max=0, exact=0): + super(CharsNotIn, self).__init__() self.skipWhitespace = False self.notChars = notChars if min < 1: - raise ValueError( - "cannot specify a minimum length < 1; use " + - "Optional(CharsNotIn()) if zero-length char group is permitted") + raise ValueError("cannot specify a minimum length < 1; use " + "Optional(CharsNotIn()) if zero-length char group is permitted") self.minLen = min @@ -3287,19 +3582,18 @@ def __init__( self, notChars, min=1, max=0, exact=0 ): self.name = _ustr(self) self.errmsg = "Expected " + self.name - self.mayReturnEmpty = ( self.minLen == 0 ) + self.mayReturnEmpty = (self.minLen == 0) self.mayIndexError = False - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): if instring[loc] in self.notChars: raise ParseException(instring, loc, self.errmsg, self) start = loc loc += 1 notchars = self.notChars - maxlen = min( start+self.maxLen, len(instring) ) - while loc < maxlen and \ - (instring[loc] not in notchars): + maxlen = min(start + self.maxLen, len(instring)) + while loc < maxlen and instring[loc] not in notchars: loc += 1 if loc - start < self.minLen: @@ -3307,7 +3601,7 @@ def parseImpl( self, instring, loc, doActions=True ): return loc, instring[start:loc] - def __str__( self ): + def __str__(self): try: return super(CharsNotIn, self).__str__() except Exception: @@ -3356,10 +3650,10 @@ class White(Token): 'u\3000': '<IDEOGRAPHIC_SPACE>', } def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0): - super(White,self).__init__() + super(White, self).__init__() self.matchWhite = ws - self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) ) - #~ self.leaveWhitespace() + self.setWhitespaceChars("".join(c for c in self.whiteChars if c not in self.matchWhite)) + # ~ self.leaveWhitespace() self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite)) self.mayReturnEmpty = True self.errmsg = "Expected " + self.name @@ -3375,13 +3669,13 @@ def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0): self.maxLen = exact self.minLen = exact - def parseImpl( self, instring, loc, doActions=True ): - if not(instring[ loc ] in self.matchWhite): + def parseImpl(self, instring, loc, doActions=True): + if instring[loc] not in self.matchWhite: raise ParseException(instring, loc, self.errmsg, self) start = loc loc += 1 maxloc = start + self.maxLen - maxloc = min( maxloc, len(instring) ) + maxloc = min(maxloc, len(instring)) while loc < maxloc and instring[loc] in self.matchWhite: loc += 1 @@ -3392,9 +3686,9 @@ def parseImpl( self, instring, loc, doActions=True ): class _PositionToken(Token): - def __init__( self ): - super(_PositionToken,self).__init__() - self.name=self.__class__.__name__ + def __init__(self): + super(_PositionToken, self).__init__() + self.name = self.__class__.__name__ self.mayReturnEmpty = True self.mayIndexError = False @@ -3402,30 +3696,30 @@ class GoToColumn(_PositionToken): """Token to advance to a specific column of input text; useful for tabular report scraping. """ - def __init__( self, colno ): - super(GoToColumn,self).__init__() + def __init__(self, colno): + super(GoToColumn, self).__init__() self.col = colno - def preParse( self, instring, loc ): - if col(loc,instring) != self.col: + def preParse(self, instring, loc): + if col(loc, instring) != self.col: instrlen = len(instring) if self.ignoreExprs: - loc = self._skipIgnorables( instring, loc ) - while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col : + loc = self._skipIgnorables(instring, loc) + while loc < instrlen and instring[loc].isspace() and col(loc, instring) != self.col: loc += 1 return loc - def parseImpl( self, instring, loc, doActions=True ): - thiscol = col( loc, instring ) + def parseImpl(self, instring, loc, doActions=True): + thiscol = col(loc, instring) if thiscol > self.col: - raise ParseException( instring, loc, "Text not in expected column", self ) + raise ParseException(instring, loc, "Text not in expected column", self) newloc = loc + self.col - thiscol - ret = instring[ loc: newloc ] + ret = instring[loc: newloc] return newloc, ret class LineStart(_PositionToken): - """Matches if current position is at the beginning of a line within + r"""Matches if current position is at the beginning of a line within the parse string Example:: @@ -3446,11 +3740,11 @@ class LineStart(_PositionToken): ['AAA', ' and this line'] """ - def __init__( self ): - super(LineStart,self).__init__() + def __init__(self): + super(LineStart, self).__init__() self.errmsg = "Expected start of line" - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): if col(loc, instring) == 1: return loc, [] raise ParseException(instring, loc, self.errmsg, self) @@ -3459,19 +3753,19 @@ class LineEnd(_PositionToken): """Matches if current position is at the end of a line within the parse string """ - def __init__( self ): - super(LineEnd,self).__init__() - self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") ) + def __init__(self): + super(LineEnd, self).__init__() + self.setWhitespaceChars(ParserElement.DEFAULT_WHITE_CHARS.replace("\n", "")) self.errmsg = "Expected end of line" - def parseImpl( self, instring, loc, doActions=True ): - if loc<len(instring): + def parseImpl(self, instring, loc, doActions=True): + if loc < len(instring): if instring[loc] == "\n": - return loc+1, "\n" + return loc + 1, "\n" else: raise ParseException(instring, loc, self.errmsg, self) elif loc == len(instring): - return loc+1, [] + return loc + 1, [] else: raise ParseException(instring, loc, self.errmsg, self) @@ -3479,29 +3773,29 @@ class StringStart(_PositionToken): """Matches if current position is at the beginning of the parse string """ - def __init__( self ): - super(StringStart,self).__init__() + def __init__(self): + super(StringStart, self).__init__() self.errmsg = "Expected start of text" - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): if loc != 0: # see if entire string up to here is just whitespace and ignoreables - if loc != self.preParse( instring, 0 ): + if loc != self.preParse(instring, 0): raise ParseException(instring, loc, self.errmsg, self) return loc, [] class StringEnd(_PositionToken): """Matches if current position is at the end of the parse string """ - def __init__( self ): - super(StringEnd,self).__init__() + def __init__(self): + super(StringEnd, self).__init__() self.errmsg = "Expected end of text" - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): if loc < len(instring): raise ParseException(instring, loc, self.errmsg, self) elif loc == len(instring): - return loc+1, [] + return loc + 1, [] elif loc > len(instring): return loc, [] else: @@ -3516,15 +3810,15 @@ class WordStart(_PositionToken): the beginning of the string being parsed, or at the beginning of a line. """ - def __init__(self, wordChars = printables): - super(WordStart,self).__init__() + def __init__(self, wordChars=printables): + super(WordStart, self).__init__() self.wordChars = set(wordChars) self.errmsg = "Not at the start of a word" - def parseImpl(self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): if loc != 0: - if (instring[loc-1] in self.wordChars or - instring[loc] not in self.wordChars): + if (instring[loc - 1] in self.wordChars + or instring[loc] not in self.wordChars): raise ParseException(instring, loc, self.errmsg, self) return loc, [] @@ -3536,17 +3830,17 @@ class WordEnd(_PositionToken): will also match at the end of the string being parsed, or at the end of a line. """ - def __init__(self, wordChars = printables): - super(WordEnd,self).__init__() + def __init__(self, wordChars=printables): + super(WordEnd, self).__init__() self.wordChars = set(wordChars) self.skipWhitespace = False self.errmsg = "Not at the end of a word" - def parseImpl(self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): instrlen = len(instring) - if instrlen>0 and loc<instrlen: + if instrlen > 0 and loc < instrlen: if (instring[loc] in self.wordChars or - instring[loc-1] not in self.wordChars): + instring[loc - 1] not in self.wordChars): raise ParseException(instring, loc, self.errmsg, self) return loc, [] @@ -3555,90 +3849,89 @@ class ParseExpression(ParserElement): """Abstract subclass of ParserElement, for combining and post-processing parsed tokens. """ - def __init__( self, exprs, savelist = False ): - super(ParseExpression,self).__init__(savelist) - if isinstance( exprs, _generatorType ): + def __init__(self, exprs, savelist=False): + super(ParseExpression, self).__init__(savelist) + if isinstance(exprs, _generatorType): exprs = list(exprs) - if isinstance( exprs, basestring ): - self.exprs = [ ParserElement._literalStringClass( exprs ) ] - elif isinstance( exprs, Iterable ): + if isinstance(exprs, basestring): + self.exprs = [self._literalStringClass(exprs)] + elif isinstance(exprs, ParserElement): + self.exprs = [exprs] + elif isinstance(exprs, Iterable): exprs = list(exprs) # if sequence of strings provided, wrap with Literal - if all(isinstance(expr, basestring) for expr in exprs): - exprs = map(ParserElement._literalStringClass, exprs) + if any(isinstance(expr, basestring) for expr in exprs): + exprs = (self._literalStringClass(e) if isinstance(e, basestring) else e for e in exprs) self.exprs = list(exprs) else: try: - self.exprs = list( exprs ) + self.exprs = list(exprs) except TypeError: - self.exprs = [ exprs ] + self.exprs = [exprs] self.callPreparse = False - def __getitem__( self, i ): - return self.exprs[i] - - def append( self, other ): - self.exprs.append( other ) + def append(self, other): + self.exprs.append(other) self.strRepr = None return self - def leaveWhitespace( self ): + def leaveWhitespace(self): """Extends ``leaveWhitespace`` defined in base class, and also invokes ``leaveWhitespace`` on all contained expressions.""" self.skipWhitespace = False - self.exprs = [ e.copy() for e in self.exprs ] + self.exprs = [e.copy() for e in self.exprs] for e in self.exprs: e.leaveWhitespace() return self - def ignore( self, other ): - if isinstance( other, Suppress ): + def ignore(self, other): + if isinstance(other, Suppress): if other not in self.ignoreExprs: - super( ParseExpression, self).ignore( other ) + super(ParseExpression, self).ignore(other) for e in self.exprs: - e.ignore( self.ignoreExprs[-1] ) + e.ignore(self.ignoreExprs[-1]) else: - super( ParseExpression, self).ignore( other ) + super(ParseExpression, self).ignore(other) for e in self.exprs: - e.ignore( self.ignoreExprs[-1] ) + e.ignore(self.ignoreExprs[-1]) return self - def __str__( self ): + def __str__(self): try: - return super(ParseExpression,self).__str__() + return super(ParseExpression, self).__str__() except Exception: pass if self.strRepr is None: - self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) ) + self.strRepr = "%s:(%s)" % (self.__class__.__name__, _ustr(self.exprs)) return self.strRepr - def streamline( self ): - super(ParseExpression,self).streamline() + def streamline(self): + super(ParseExpression, self).streamline() for e in self.exprs: e.streamline() - # collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d ) + # collapse nested And's of the form And(And(And(a, b), c), d) to And(a, b, c, d) # but only if there are no parse actions or resultsNames on the nested And's # (likewise for Or's and MatchFirst's) - if ( len(self.exprs) == 2 ): + if len(self.exprs) == 2: other = self.exprs[0] - if ( isinstance( other, self.__class__ ) and - not(other.parseAction) and - other.resultsName is None and - not other.debug ): - self.exprs = other.exprs[:] + [ self.exprs[1] ] + if (isinstance(other, self.__class__) + and not other.parseAction + and other.resultsName is None + and not other.debug): + self.exprs = other.exprs[:] + [self.exprs[1]] self.strRepr = None self.mayReturnEmpty |= other.mayReturnEmpty self.mayIndexError |= other.mayIndexError other = self.exprs[-1] - if ( isinstance( other, self.__class__ ) and - not(other.parseAction) and - other.resultsName is None and - not other.debug ): + if (isinstance(other, self.__class__) + and not other.parseAction + and other.resultsName is None + and not other.debug): self.exprs = self.exprs[:-1] + other.exprs[:] self.strRepr = None self.mayReturnEmpty |= other.mayReturnEmpty @@ -3648,21 +3941,31 @@ def streamline( self ): return self - def setResultsName( self, name, listAllMatches=False ): - ret = super(ParseExpression,self).setResultsName(name,listAllMatches) - return ret - - def validate( self, validateTrace=[] ): - tmp = validateTrace[:]+[self] + def validate(self, validateTrace=None): + tmp = (validateTrace if validateTrace is not None else [])[:] + [self] for e in self.exprs: e.validate(tmp) - self.checkRecursion( [] ) + self.checkRecursion([]) def copy(self): - ret = super(ParseExpression,self).copy() + ret = super(ParseExpression, self).copy() ret.exprs = [e.copy() for e in self.exprs] return ret + def _setResultsName(self, name, listAllMatches=False): + if __diag__.warn_ungrouped_named_tokens_in_collection: + for e in self.exprs: + if isinstance(e, ParserElement) and e.resultsName: + warnings.warn("{0}: setting results name {1!r} on {2} expression " + "collides with {3!r} on contained expression".format("warn_ungrouped_named_tokens_in_collection", + name, + type(self).__name__, + e.resultsName), + stacklevel=3) + + return super(ParseExpression, self)._setResultsName(name, listAllMatches) + + class And(ParseExpression): """ Requires all given :class:`ParseExpression` s to be found in the given order. @@ -3676,33 +3979,58 @@ class And(ParseExpression): integer = Word(nums) name_expr = OneOrMore(Word(alphas)) - expr = And([integer("id"),name_expr("name"),integer("age")]) + expr = And([integer("id"), name_expr("name"), integer("age")]) # more easily written as: expr = integer("id") + name_expr("name") + integer("age") """ class _ErrorStop(Empty): def __init__(self, *args, **kwargs): - super(And._ErrorStop,self).__init__(*args, **kwargs) + super(And._ErrorStop, self).__init__(*args, **kwargs) self.name = '-' self.leaveWhitespace() - def __init__( self, exprs, savelist = True ): - super(And,self).__init__(exprs, savelist) + def __init__(self, exprs, savelist=True): + if exprs and Ellipsis in exprs: + tmp = [] + for i, expr in enumerate(exprs): + if expr is Ellipsis: + if i < len(exprs) - 1: + skipto_arg = (Empty() + exprs[i + 1]).exprs[-1] + tmp.append(SkipTo(skipto_arg)("_skipped*")) + else: + raise Exception("cannot construct And with sequence ending in ...") + else: + tmp.append(expr) + exprs[:] = tmp + super(And, self).__init__(exprs, savelist) self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - self.setWhitespaceChars( self.exprs[0].whiteChars ) + self.setWhitespaceChars(self.exprs[0].whiteChars) self.skipWhitespace = self.exprs[0].skipWhitespace self.callPreparse = True def streamline(self): + # collapse any _PendingSkip's + if self.exprs: + if any(isinstance(e, ParseExpression) and e.exprs and isinstance(e.exprs[-1], _PendingSkip) + for e in self.exprs[:-1]): + for i, e in enumerate(self.exprs[:-1]): + if e is None: + continue + if (isinstance(e, ParseExpression) + and e.exprs and isinstance(e.exprs[-1], _PendingSkip)): + e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1] + self.exprs[i + 1] = None + self.exprs = [e for e in self.exprs if e is not None] + super(And, self).streamline() self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) return self - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): # pass False as last arg to _parse for first element, since we already # pre-parsed the string as part of our And pre-parsing - loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False ) + loc, resultlist = self.exprs[0]._parse(instring, loc, doActions, callPreParse=False) errorStop = False for e in self.exprs[1:]: if isinstance(e, And._ErrorStop): @@ -3710,7 +4038,7 @@ def parseImpl( self, instring, loc, doActions=True ): continue if errorStop: try: - loc, exprtokens = e._parse( instring, loc, doActions ) + loc, exprtokens = e._parse(instring, loc, doActions) except ParseSyntaxException: raise except ParseBaseException as pe: @@ -3719,25 +4047,25 @@ def parseImpl( self, instring, loc, doActions=True ): except IndexError: raise ParseSyntaxException(instring, len(instring), self.errmsg, self) else: - loc, exprtokens = e._parse( instring, loc, doActions ) + loc, exprtokens = e._parse(instring, loc, doActions) if exprtokens or exprtokens.haskeys(): resultlist += exprtokens return loc, resultlist - def __iadd__(self, other ): - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - return self.append( other ) #And( [ self, other ] ) + def __iadd__(self, other): + if isinstance(other, basestring): + other = self._literalStringClass(other) + return self.append(other) # And([self, other]) - def checkRecursion( self, parseElementList ): - subRecCheckList = parseElementList[:] + [ self ] + def checkRecursion(self, parseElementList): + subRecCheckList = parseElementList[:] + [self] for e in self.exprs: - e.checkRecursion( subRecCheckList ) + e.checkRecursion(subRecCheckList) if not e.mayReturnEmpty: break - def __str__( self ): - if hasattr(self,"name"): + def __str__(self): + if hasattr(self, "name"): return self.name if self.strRepr is None: @@ -3763,8 +4091,8 @@ class Or(ParseExpression): [['123'], ['3.1416'], ['789']] """ - def __init__( self, exprs, savelist = False ): - super(Or,self).__init__(exprs, savelist) + def __init__(self, exprs, savelist=False): + super(Or, self).__init__(exprs, savelist) if self.exprs: self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) else: @@ -3772,16 +4100,17 @@ def __init__( self, exprs, savelist = False ): def streamline(self): super(Or, self).streamline() - self.saveAsList = any(e.saveAsList for e in self.exprs) + if __compat__.collect_all_And_tokens: + self.saveAsList = any(e.saveAsList for e in self.exprs) return self - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): maxExcLoc = -1 maxException = None matches = [] for e in self.exprs: try: - loc2 = e.tryParse( instring, loc ) + loc2 = e.tryParse(instring, loc) except ParseException as err: err.__traceback__ = None if err.loc > maxExcLoc: @@ -3789,22 +4118,45 @@ def parseImpl( self, instring, loc, doActions=True ): maxExcLoc = err.loc except IndexError: if len(instring) > maxExcLoc: - maxException = ParseException(instring,len(instring),e.errmsg,self) + maxException = ParseException(instring, len(instring), e.errmsg, self) maxExcLoc = len(instring) else: # save match among all matches, to retry longest to shortest matches.append((loc2, e)) if matches: - matches.sort(key=lambda x: -x[0]) - for _,e in matches: + # re-evaluate all matches in descending order of length of match, in case attached actions + # might change whether or how much they match of the input. + matches.sort(key=itemgetter(0), reverse=True) + + if not doActions: + # no further conditions or parse actions to change the selection of + # alternative, so the first match will be the best match + best_expr = matches[0][1] + return best_expr._parse(instring, loc, doActions) + + longest = -1, None + for loc1, expr1 in matches: + if loc1 <= longest[0]: + # already have a longer match than this one will deliver, we are done + return longest + try: - return e._parse( instring, loc, doActions ) + loc2, toks = expr1._parse(instring, loc, doActions) except ParseException as err: err.__traceback__ = None if err.loc > maxExcLoc: maxException = err maxExcLoc = err.loc + else: + if loc2 >= loc1: + return loc2, toks + # didn't match as much as before + elif loc2 > longest[0]: + longest = loc2, toks + + if longest != (-1, None): + return longest if maxException is not None: maxException.msg = self.errmsg @@ -3813,13 +4165,13 @@ def parseImpl( self, instring, loc, doActions=True ): raise ParseException(instring, loc, "no defined alternatives to match", self) - def __ixor__(self, other ): - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - return self.append( other ) #Or( [ self, other ] ) + def __ixor__(self, other): + if isinstance(other, basestring): + other = self._literalStringClass(other) + return self.append(other) # Or([self, other]) - def __str__( self ): - if hasattr(self,"name"): + def __str__(self): + if hasattr(self, "name"): return self.name if self.strRepr is None: @@ -3827,10 +4179,22 @@ def __str__( self ): return self.strRepr - def checkRecursion( self, parseElementList ): - subRecCheckList = parseElementList[:] + [ self ] + def checkRecursion(self, parseElementList): + subRecCheckList = parseElementList[:] + [self] for e in self.exprs: - e.checkRecursion( subRecCheckList ) + e.checkRecursion(subRecCheckList) + + def _setResultsName(self, name, listAllMatches=False): + if (not __compat__.collect_all_And_tokens + and __diag__.warn_multiple_tokens_in_named_alternation): + if any(isinstance(e, And) for e in self.exprs): + warnings.warn("{0}: setting results name {1!r} on {2} expression " + "may only return a single token for an And alternative, " + "in future will return the full list of tokens".format( + "warn_multiple_tokens_in_named_alternation", name, type(self).__name__), + stacklevel=3) + + return super(Or, self)._setResultsName(name, listAllMatches) class MatchFirst(ParseExpression): @@ -3850,25 +4214,25 @@ class MatchFirst(ParseExpression): number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums) print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']] """ - def __init__( self, exprs, savelist = False ): - super(MatchFirst,self).__init__(exprs, savelist) + def __init__(self, exprs, savelist=False): + super(MatchFirst, self).__init__(exprs, savelist) if self.exprs: self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - # self.saveAsList = any(e.saveAsList for e in self.exprs) else: self.mayReturnEmpty = True def streamline(self): super(MatchFirst, self).streamline() - self.saveAsList = any(e.saveAsList for e in self.exprs) + if __compat__.collect_all_And_tokens: + self.saveAsList = any(e.saveAsList for e in self.exprs) return self - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): maxExcLoc = -1 maxException = None for e in self.exprs: try: - ret = e._parse( instring, loc, doActions ) + ret = e._parse(instring, loc, doActions) return ret except ParseException as err: if err.loc > maxExcLoc: @@ -3876,7 +4240,7 @@ def parseImpl( self, instring, loc, doActions=True ): maxExcLoc = err.loc except IndexError: if len(instring) > maxExcLoc: - maxException = ParseException(instring,len(instring),e.errmsg,self) + maxException = ParseException(instring, len(instring), e.errmsg, self) maxExcLoc = len(instring) # only got here if no expression matched, raise exception for match that made it the furthest @@ -3887,13 +4251,13 @@ def parseImpl( self, instring, loc, doActions=True ): else: raise ParseException(instring, loc, "no defined alternatives to match", self) - def __ior__(self, other ): - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - return self.append( other ) #MatchFirst( [ self, other ] ) + def __ior__(self, other): + if isinstance(other, basestring): + other = self._literalStringClass(other) + return self.append(other) # MatchFirst([self, other]) - def __str__( self ): - if hasattr(self,"name"): + def __str__(self): + if hasattr(self, "name"): return self.name if self.strRepr is None: @@ -3901,10 +4265,22 @@ def __str__( self ): return self.strRepr - def checkRecursion( self, parseElementList ): - subRecCheckList = parseElementList[:] + [ self ] + def checkRecursion(self, parseElementList): + subRecCheckList = parseElementList[:] + [self] for e in self.exprs: - e.checkRecursion( subRecCheckList ) + e.checkRecursion(subRecCheckList) + + def _setResultsName(self, name, listAllMatches=False): + if (not __compat__.collect_all_And_tokens + and __diag__.warn_multiple_tokens_in_named_alternation): + if any(isinstance(e, And) for e in self.exprs): + warnings.warn("{0}: setting results name {1!r} on {2} expression " + "may only return a single token for an And alternative, " + "in future will return the full list of tokens".format( + "warn_multiple_tokens_in_named_alternation", name, type(self).__name__), + stacklevel=3) + + return super(MatchFirst, self)._setResultsName(name, listAllMatches) class Each(ParseExpression): @@ -3964,8 +4340,8 @@ class Each(ParseExpression): - shape: TRIANGLE - size: 20 """ - def __init__( self, exprs, savelist = True ): - super(Each,self).__init__(exprs, savelist) + def __init__(self, exprs, savelist=True): + super(Each, self).__init__(exprs, savelist) self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) self.skipWhitespace = True self.initExprGroups = True @@ -3976,15 +4352,15 @@ def streamline(self): self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) return self - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): if self.initExprGroups: - self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional)) - opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ] - opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)] + self.opt1map = dict((id(e.expr), e) for e in self.exprs if isinstance(e, Optional)) + opt1 = [e.expr for e in self.exprs if isinstance(e, Optional)] + opt2 = [e for e in self.exprs if e.mayReturnEmpty and not isinstance(e, Optional)] self.optionals = opt1 + opt2 - self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ] - self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ] - self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ] + self.multioptionals = [e.expr for e in self.exprs if isinstance(e, ZeroOrMore)] + self.multirequired = [e.expr for e in self.exprs if isinstance(e, OneOrMore)] + self.required = [e for e in self.exprs if not isinstance(e, (Optional, ZeroOrMore, OneOrMore))] self.required += self.multirequired self.initExprGroups = False tmpLoc = loc @@ -3998,11 +4374,11 @@ def parseImpl( self, instring, loc, doActions=True ): failed = [] for e in tmpExprs: try: - tmpLoc = e.tryParse( instring, tmpLoc ) + tmpLoc = e.tryParse(instring, tmpLoc) except ParseException: failed.append(e) else: - matchOrder.append(self.opt1map.get(id(e),e)) + matchOrder.append(self.opt1map.get(id(e), e)) if e in tmpReqd: tmpReqd.remove(e) elif e in tmpOpt: @@ -4012,21 +4388,21 @@ def parseImpl( self, instring, loc, doActions=True ): if tmpReqd: missing = ", ".join(_ustr(e) for e in tmpReqd) - raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing ) + raise ParseException(instring, loc, "Missing one or more required elements (%s)" % missing) # add any unmatched Optionals, in case they have default values defined - matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt] + matchOrder += [e for e in self.exprs if isinstance(e, Optional) and e.expr in tmpOpt] resultlist = [] for e in matchOrder: - loc,results = e._parse(instring,loc,doActions) + loc, results = e._parse(instring, loc, doActions) resultlist.append(results) finalResults = sum(resultlist, ParseResults([])) return loc, finalResults - def __str__( self ): - if hasattr(self,"name"): + def __str__(self): + if hasattr(self, "name"): return self.name if self.strRepr is None: @@ -4034,86 +4410,88 @@ def __str__( self ): return self.strRepr - def checkRecursion( self, parseElementList ): - subRecCheckList = parseElementList[:] + [ self ] + def checkRecursion(self, parseElementList): + subRecCheckList = parseElementList[:] + [self] for e in self.exprs: - e.checkRecursion( subRecCheckList ) + e.checkRecursion(subRecCheckList) class ParseElementEnhance(ParserElement): """Abstract subclass of :class:`ParserElement`, for combining and post-processing parsed tokens. """ - def __init__( self, expr, savelist=False ): - super(ParseElementEnhance,self).__init__(savelist) - if isinstance( expr, basestring ): - if issubclass(ParserElement._literalStringClass, Token): - expr = ParserElement._literalStringClass(expr) + def __init__(self, expr, savelist=False): + super(ParseElementEnhance, self).__init__(savelist) + if isinstance(expr, basestring): + if issubclass(self._literalStringClass, Token): + expr = self._literalStringClass(expr) else: - expr = ParserElement._literalStringClass(Literal(expr)) + expr = self._literalStringClass(Literal(expr)) self.expr = expr self.strRepr = None if expr is not None: self.mayIndexError = expr.mayIndexError self.mayReturnEmpty = expr.mayReturnEmpty - self.setWhitespaceChars( expr.whiteChars ) + self.setWhitespaceChars(expr.whiteChars) self.skipWhitespace = expr.skipWhitespace self.saveAsList = expr.saveAsList self.callPreparse = expr.callPreparse self.ignoreExprs.extend(expr.ignoreExprs) - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): if self.expr is not None: - return self.expr._parse( instring, loc, doActions, callPreParse=False ) + return self.expr._parse(instring, loc, doActions, callPreParse=False) else: - raise ParseException("",loc,self.errmsg,self) + raise ParseException("", loc, self.errmsg, self) - def leaveWhitespace( self ): + def leaveWhitespace(self): self.skipWhitespace = False self.expr = self.expr.copy() if self.expr is not None: self.expr.leaveWhitespace() return self - def ignore( self, other ): - if isinstance( other, Suppress ): + def ignore(self, other): + if isinstance(other, Suppress): if other not in self.ignoreExprs: - super( ParseElementEnhance, self).ignore( other ) + super(ParseElementEnhance, self).ignore(other) if self.expr is not None: - self.expr.ignore( self.ignoreExprs[-1] ) + self.expr.ignore(self.ignoreExprs[-1]) else: - super( ParseElementEnhance, self).ignore( other ) + super(ParseElementEnhance, self).ignore(other) if self.expr is not None: - self.expr.ignore( self.ignoreExprs[-1] ) + self.expr.ignore(self.ignoreExprs[-1]) return self - def streamline( self ): - super(ParseElementEnhance,self).streamline() + def streamline(self): + super(ParseElementEnhance, self).streamline() if self.expr is not None: self.expr.streamline() return self - def checkRecursion( self, parseElementList ): + def checkRecursion(self, parseElementList): if self in parseElementList: - raise RecursiveGrammarException( parseElementList+[self] ) - subRecCheckList = parseElementList[:] + [ self ] + raise RecursiveGrammarException(parseElementList + [self]) + subRecCheckList = parseElementList[:] + [self] if self.expr is not None: - self.expr.checkRecursion( subRecCheckList ) + self.expr.checkRecursion(subRecCheckList) - def validate( self, validateTrace=[] ): - tmp = validateTrace[:]+[self] + def validate(self, validateTrace=None): + if validateTrace is None: + validateTrace = [] + tmp = validateTrace[:] + [self] if self.expr is not None: self.expr.validate(tmp) - self.checkRecursion( [] ) + self.checkRecursion([]) - def __str__( self ): + def __str__(self): try: - return super(ParseElementEnhance,self).__str__() + return super(ParseElementEnhance, self).__str__() except Exception: pass if self.strRepr is None and self.expr is not None: - self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) ) + self.strRepr = "%s:(%s)" % (self.__class__.__name__, _ustr(self.expr)) return self.strRepr @@ -4139,13 +4517,16 @@ class FollowedBy(ParseElementEnhance): [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] """ - def __init__( self, expr ): - super(FollowedBy,self).__init__(expr) + def __init__(self, expr): + super(FollowedBy, self).__init__(expr) self.mayReturnEmpty = True - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): + # by using self._expr.parse and deleting the contents of the returned ParseResults list + # we keep any named results that were defined in the FollowedBy expression _, ret = self.expr._parse(instring, loc, doActions=doActions) del ret[:] + return loc, ret @@ -4198,6 +4579,7 @@ def __init__(self, expr, retreat=None): self.retreat = retreat self.errmsg = "not preceded by " + str(expr) self.skipWhitespace = False + self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None))) def parseImpl(self, instring, loc=0, doActions=True): if self.exact: @@ -4208,19 +4590,18 @@ def parseImpl(self, instring, loc=0, doActions=True): else: # retreat specified a maximum lookbehind window, iterate test_expr = self.expr + StringEnd() - instring_slice = instring[:loc] + instring_slice = instring[max(0, loc - self.retreat):loc] last_expr = ParseException(instring, loc, self.errmsg) - for offset in range(1, min(loc, self.retreat+1)): + for offset in range(1, min(loc, self.retreat + 1)+1): try: - _, ret = test_expr._parse(instring_slice, loc-offset) + # print('trying', offset, instring_slice, repr(instring_slice[loc - offset:])) + _, ret = test_expr._parse(instring_slice, len(instring_slice) - offset) except ParseBaseException as pbe: last_expr = pbe else: break else: raise last_expr - # return empty list of tokens, but preserve any defined results names - del ret[:] return loc, ret @@ -4247,20 +4628,20 @@ class NotAny(ParseElementEnhance): # integers that are followed by "." are actually floats integer = Word(nums) + ~Char(".") """ - def __init__( self, expr ): - super(NotAny,self).__init__(expr) - #~ self.leaveWhitespace() + def __init__(self, expr): + super(NotAny, self).__init__(expr) + # ~ self.leaveWhitespace() self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs self.mayReturnEmpty = True - self.errmsg = "Found unwanted token, "+_ustr(self.expr) + self.errmsg = "Found unwanted token, " + _ustr(self.expr) - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): if self.expr.canParseNext(instring, loc): raise ParseException(instring, loc, self.errmsg, self) return loc, [] - def __str__( self ): - if hasattr(self,"name"): + def __str__(self): + if hasattr(self, "name"): return self.name if self.strRepr is None: @@ -4269,15 +4650,21 @@ def __str__( self ): return self.strRepr class _MultipleMatch(ParseElementEnhance): - def __init__( self, expr, stopOn=None): + def __init__(self, expr, stopOn=None): super(_MultipleMatch, self).__init__(expr) self.saveAsList = True ender = stopOn if isinstance(ender, basestring): - ender = ParserElement._literalStringClass(ender) + ender = self._literalStringClass(ender) + self.stopOn(ender) + + def stopOn(self, ender): + if isinstance(ender, basestring): + ender = self._literalStringClass(ender) self.not_ender = ~ender if ender is not None else None + return self - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): self_expr_parse = self.expr._parse self_skip_ignorables = self._skipIgnorables check_ender = self.not_ender is not None @@ -4288,24 +4675,38 @@ def parseImpl( self, instring, loc, doActions=True ): # if so, fail) if check_ender: try_not_ender(instring, loc) - loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False ) + loc, tokens = self_expr_parse(instring, loc, doActions, callPreParse=False) try: hasIgnoreExprs = (not not self.ignoreExprs) while 1: if check_ender: try_not_ender(instring, loc) if hasIgnoreExprs: - preloc = self_skip_ignorables( instring, loc ) + preloc = self_skip_ignorables(instring, loc) else: preloc = loc - loc, tmptokens = self_expr_parse( instring, preloc, doActions ) + loc, tmptokens = self_expr_parse(instring, preloc, doActions) if tmptokens or tmptokens.haskeys(): tokens += tmptokens - except (ParseException,IndexError): + except (ParseException, IndexError): pass return loc, tokens + def _setResultsName(self, name, listAllMatches=False): + if __diag__.warn_ungrouped_named_tokens_in_collection: + for e in [self.expr] + getattr(self.expr, 'exprs', []): + if isinstance(e, ParserElement) and e.resultsName: + warnings.warn("{0}: setting results name {1!r} on {2} expression " + "collides with {3!r} on contained expression".format("warn_ungrouped_named_tokens_in_collection", + name, + type(self).__name__, + e.resultsName), + stacklevel=3) + + return super(_MultipleMatch, self)._setResultsName(name, listAllMatches) + + class OneOrMore(_MultipleMatch): """Repetition of one or more of the given expression. @@ -4332,8 +4733,8 @@ class OneOrMore(_MultipleMatch): (attr_expr * (1,)).parseString(text).pprint() """ - def __str__( self ): - if hasattr(self,"name"): + def __str__(self): + if hasattr(self, "name"): return self.name if self.strRepr is None: @@ -4352,18 +4753,18 @@ class ZeroOrMore(_MultipleMatch): Example: similar to :class:`OneOrMore` """ - def __init__( self, expr, stopOn=None): - super(ZeroOrMore,self).__init__(expr, stopOn=stopOn) + def __init__(self, expr, stopOn=None): + super(ZeroOrMore, self).__init__(expr, stopOn=stopOn) self.mayReturnEmpty = True - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): try: return super(ZeroOrMore, self).parseImpl(instring, loc, doActions) - except (ParseException,IndexError): + except (ParseException, IndexError): return loc, [] - def __str__( self ): - if hasattr(self,"name"): + def __str__(self): + if hasattr(self, "name"): return self.name if self.strRepr is None: @@ -4371,6 +4772,7 @@ def __str__( self ): return self.strRepr + class _NullToken(object): def __bool__(self): return False @@ -4378,7 +4780,6 @@ def __bool__(self): def __str__(self): return "" -_optionalNotMatched = _NullToken() class Optional(ParseElementEnhance): """Optional matching of the given expression. @@ -4416,28 +4817,30 @@ class Optional(ParseElementEnhance): ^ FAIL: Expected end of text (at char 5), (line:1, col:6) """ - def __init__( self, expr, default=_optionalNotMatched ): - super(Optional,self).__init__( expr, savelist=False ) + __optionalNotMatched = _NullToken() + + def __init__(self, expr, default=__optionalNotMatched): + super(Optional, self).__init__(expr, savelist=False) self.saveAsList = self.expr.saveAsList self.defaultValue = default self.mayReturnEmpty = True - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): try: - loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False ) - except (ParseException,IndexError): - if self.defaultValue is not _optionalNotMatched: + loc, tokens = self.expr._parse(instring, loc, doActions, callPreParse=False) + except (ParseException, IndexError): + if self.defaultValue is not self.__optionalNotMatched: if self.expr.resultsName: - tokens = ParseResults([ self.defaultValue ]) + tokens = ParseResults([self.defaultValue]) tokens[self.expr.resultsName] = self.defaultValue else: - tokens = [ self.defaultValue ] + tokens = [self.defaultValue] else: tokens = [] return loc, tokens - def __str__( self ): - if hasattr(self,"name"): + def __str__(self): + if hasattr(self, "name"): return self.name if self.strRepr is None: @@ -4503,20 +4906,20 @@ class SkipTo(ParseElementEnhance): - issue_num: 79 - sev: Minor """ - def __init__( self, other, include=False, ignore=None, failOn=None ): - super( SkipTo, self ).__init__( other ) + def __init__(self, other, include=False, ignore=None, failOn=None): + super(SkipTo, self).__init__(other) self.ignoreExpr = ignore self.mayReturnEmpty = True self.mayIndexError = False self.includeMatch = include self.saveAsList = False if isinstance(failOn, basestring): - self.failOn = ParserElement._literalStringClass(failOn) + self.failOn = self._literalStringClass(failOn) else: self.failOn = failOn - self.errmsg = "No match found for "+_ustr(self.expr) + self.errmsg = "No match found for " + _ustr(self.expr) - def parseImpl( self, instring, loc, doActions=True ): + def parseImpl(self, instring, loc, doActions=True): startloc = loc instrlen = len(instring) expr = self.expr @@ -4558,7 +4961,7 @@ def parseImpl( self, instring, loc, doActions=True ): skipresult = ParseResults(skiptext) if self.includeMatch: - loc, mat = expr_parse(instring,loc,doActions,callPreParse=False) + loc, mat = expr_parse(instring, loc, doActions, callPreParse=False) skipresult += mat return loc, skipresult @@ -4590,17 +4993,17 @@ class Forward(ParseElementEnhance): See :class:`ParseResults.pprint` for an example of a recursive parser created using ``Forward``. """ - def __init__( self, other=None ): - super(Forward,self).__init__( other, savelist=False ) + def __init__(self, other=None): + super(Forward, self).__init__(other, savelist=False) - def __lshift__( self, other ): - if isinstance( other, basestring ): - other = ParserElement._literalStringClass(other) + def __lshift__(self, other): + if isinstance(other, basestring): + other = self._literalStringClass(other) self.expr = other self.strRepr = None self.mayIndexError = self.expr.mayIndexError self.mayReturnEmpty = self.expr.mayReturnEmpty - self.setWhitespaceChars( self.expr.whiteChars ) + self.setWhitespaceChars(self.expr.whiteChars) self.skipWhitespace = self.expr.skipWhitespace self.saveAsList = self.expr.saveAsList self.ignoreExprs.extend(self.expr.ignoreExprs) @@ -4609,59 +5012,72 @@ def __lshift__( self, other ): def __ilshift__(self, other): return self << other - def leaveWhitespace( self ): + def leaveWhitespace(self): self.skipWhitespace = False return self - def streamline( self ): + def streamline(self): if not self.streamlined: self.streamlined = True if self.expr is not None: self.expr.streamline() return self - def validate( self, validateTrace=[] ): + def validate(self, validateTrace=None): + if validateTrace is None: + validateTrace = [] + if self not in validateTrace: - tmp = validateTrace[:]+[self] + tmp = validateTrace[:] + [self] if self.expr is not None: self.expr.validate(tmp) self.checkRecursion([]) - def __str__( self ): - if hasattr(self,"name"): + def __str__(self): + if hasattr(self, "name"): return self.name - return self.__class__.__name__ + ": ..." + if self.strRepr is not None: + return self.strRepr + + # Avoid infinite recursion by setting a temporary strRepr + self.strRepr = ": ..." - # stubbed out for now - creates awful memory and perf issues - self._revertClass = self.__class__ - self.__class__ = _ForwardNoRecurse + # Use the string representation of main expression. + retString = '...' try: if self.expr is not None: - retString = _ustr(self.expr) + retString = _ustr(self.expr)[:1000] else: retString = "None" finally: - self.__class__ = self._revertClass - return self.__class__.__name__ + ": " + retString + self.strRepr = self.__class__.__name__ + ": " + retString + return self.strRepr def copy(self): if self.expr is not None: - return super(Forward,self).copy() + return super(Forward, self).copy() else: ret = Forward() ret <<= self return ret -class _ForwardNoRecurse(Forward): - def __str__( self ): - return "..." + def _setResultsName(self, name, listAllMatches=False): + if __diag__.warn_name_set_on_empty_Forward: + if self.expr is None: + warnings.warn("{0}: setting results name {0!r} on {1} expression " + "that has no contained expression".format("warn_name_set_on_empty_Forward", + name, + type(self).__name__), + stacklevel=3) + + return super(Forward, self)._setResultsName(name, listAllMatches) class TokenConverter(ParseElementEnhance): """ Abstract subclass of :class:`ParseExpression`, for converting parsed results. """ - def __init__( self, expr, savelist=False ): - super(TokenConverter,self).__init__( expr )#, savelist ) + def __init__(self, expr, savelist=False): + super(TokenConverter, self).__init__(expr) # , savelist) self.saveAsList = False class Combine(TokenConverter): @@ -4682,8 +5098,8 @@ class Combine(TokenConverter): # no match when there are internal spaces print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...) """ - def __init__( self, expr, joinString="", adjacent=True ): - super(Combine,self).__init__( expr ) + def __init__(self, expr, joinString="", adjacent=True): + super(Combine, self).__init__(expr) # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself if adjacent: self.leaveWhitespace() @@ -4692,20 +5108,20 @@ def __init__( self, expr, joinString="", adjacent=True ): self.joinString = joinString self.callPreparse = True - def ignore( self, other ): + def ignore(self, other): if self.adjacent: ParserElement.ignore(self, other) else: - super( Combine, self).ignore( other ) + super(Combine, self).ignore(other) return self - def postParse( self, instring, loc, tokenlist ): + def postParse(self, instring, loc, tokenlist): retToks = tokenlist.copy() del retToks[:] - retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults) + retToks += ParseResults(["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults) if self.resultsName and retToks.haskeys(): - return [ retToks ] + return [retToks] else: return retToks @@ -4719,17 +5135,17 @@ class Group(TokenConverter): num = Word(nums) term = ident | num func = ident + Optional(delimitedList(term)) - print(func.parseString("fn a,b,100")) # -> ['fn', 'a', 'b', '100'] + print(func.parseString("fn a, b, 100")) # -> ['fn', 'a', 'b', '100'] func = ident + Group(Optional(delimitedList(term))) - print(func.parseString("fn a,b,100")) # -> ['fn', ['a', 'b', '100']] + print(func.parseString("fn a, b, 100")) # -> ['fn', ['a', 'b', '100']] """ - def __init__( self, expr ): - super(Group,self).__init__( expr ) - self.saveAsList = expr.saveAsList + def __init__(self, expr): + super(Group, self).__init__(expr) + self.saveAsList = True - def postParse( self, instring, loc, tokenlist ): - return [ tokenlist ] + def postParse(self, instring, loc, tokenlist): + return [tokenlist] class Dict(TokenConverter): """Converter to return a repetitive expression as a list, but also @@ -4770,31 +5186,31 @@ class Dict(TokenConverter): See more examples at :class:`ParseResults` of accessing fields by results name. """ - def __init__( self, expr ): - super(Dict,self).__init__( expr ) + def __init__(self, expr): + super(Dict, self).__init__(expr) self.saveAsList = True - def postParse( self, instring, loc, tokenlist ): - for i,tok in enumerate(tokenlist): + def postParse(self, instring, loc, tokenlist): + for i, tok in enumerate(tokenlist): if len(tok) == 0: continue ikey = tok[0] - if isinstance(ikey,int): + if isinstance(ikey, int): ikey = _ustr(tok[0]).strip() - if len(tok)==1: - tokenlist[ikey] = _ParseResultsWithOffset("",i) - elif len(tok)==2 and not isinstance(tok[1],ParseResults): - tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i) + if len(tok) == 1: + tokenlist[ikey] = _ParseResultsWithOffset("", i) + elif len(tok) == 2 and not isinstance(tok[1], ParseResults): + tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i) else: - dictvalue = tok.copy() #ParseResults(i) + dictvalue = tok.copy() # ParseResults(i) del dictvalue[0] - if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()): - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i) + if len(dictvalue) != 1 or (isinstance(dictvalue, ParseResults) and dictvalue.haskeys()): + tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i) else: - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i) + tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i) if self.resultsName: - return [ tokenlist ] + return [tokenlist] else: return tokenlist @@ -4821,10 +5237,10 @@ class Suppress(TokenConverter): (See also :class:`delimitedList`.) """ - def postParse( self, instring, loc, tokenlist ): + def postParse(self, instring, loc, tokenlist): return [] - def suppress( self ): + def suppress(self): return self @@ -4834,12 +5250,12 @@ class OnlyOnce(object): def __init__(self, methodCall): self.callable = _trim_arity(methodCall) self.called = False - def __call__(self,s,l,t): + def __call__(self, s, l, t): if not self.called: - results = self.callable(s,l,t) + results = self.callable(s, l, t) self.called = True return results - raise ParseException(s,l,"") + raise ParseException(s, l, "") def reset(self): self.called = False @@ -4871,16 +5287,16 @@ def remove_duplicate_chars(tokens): f = _trim_arity(f) def z(*paArgs): thisFunc = f.__name__ - s,l,t = paArgs[-3:] - if len(paArgs)>3: + s, l, t = paArgs[-3:] + if len(paArgs) > 3: thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc - sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) ) + sys.stderr.write(">>entering %s(line: '%s', %d, %r)\n" % (thisFunc, line(l, s), l, t)) try: ret = f(*paArgs) except Exception as exc: - sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) ) + sys.stderr.write("<<leaving %s (exception: %s)\n" % (thisFunc, exc)) raise - sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) ) + sys.stderr.write("<<leaving %s (ret: %r)\n" % (thisFunc, ret)) return ret try: z.__name__ = f.__name__ @@ -4891,7 +5307,7 @@ def z(*paArgs): # # global helpers # -def delimitedList( expr, delim=",", combine=False ): +def delimitedList(expr, delim=",", combine=False): """Helper to define a delimited list of expressions - the delimiter defaults to ','. By default, the list elements and delimiters can have intervening whitespace, and comments, but this can be @@ -4906,13 +5322,13 @@ def delimitedList( expr, delim=",", combine=False ): delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc'] delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] """ - dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..." + dlName = _ustr(expr) + " [" + _ustr(delim) + " " + _ustr(expr) + "]..." if combine: - return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName) + return Combine(expr + ZeroOrMore(delim + expr)).setName(dlName) else: - return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName) + return (expr + ZeroOrMore(Suppress(delim) + expr)).setName(dlName) -def countedArray( expr, intExpr=None ): +def countedArray(expr, intExpr=None): """Helper to define a counted list of expressions. This helper defines a pattern of the form:: @@ -4936,22 +5352,22 @@ def countedArray( expr, intExpr=None ): countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd'] """ arrayExpr = Forward() - def countFieldParseAction(s,l,t): + def countFieldParseAction(s, l, t): n = t[0] - arrayExpr << (n and Group(And([expr]*n)) or Group(empty)) + arrayExpr << (n and Group(And([expr] * n)) or Group(empty)) return [] if intExpr is None: - intExpr = Word(nums).setParseAction(lambda t:int(t[0])) + intExpr = Word(nums).setParseAction(lambda t: int(t[0])) else: intExpr = intExpr.copy() intExpr.setName("arrayLen") intExpr.addParseAction(countFieldParseAction, callDuringTry=True) - return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...') + return (intExpr + arrayExpr).setName('(len) ' + _ustr(expr) + '...') def _flatten(L): ret = [] for i in L: - if isinstance(i,list): + if isinstance(i, list): ret.extend(_flatten(i)) else: ret.append(i) @@ -4973,7 +5389,7 @@ def matchPreviousLiteral(expr): enabled. """ rep = Forward() - def copyTokenToRepeater(s,l,t): + def copyTokenToRepeater(s, l, t): if t: if len(t) == 1: rep << t[0] @@ -5005,26 +5421,26 @@ def matchPreviousExpr(expr): rep = Forward() e2 = expr.copy() rep <<= e2 - def copyTokenToRepeater(s,l,t): + def copyTokenToRepeater(s, l, t): matchTokens = _flatten(t.asList()) - def mustMatchTheseTokens(s,l,t): + def mustMatchTheseTokens(s, l, t): theseTokens = _flatten(t.asList()) - if theseTokens != matchTokens: - raise ParseException("",0,"") - rep.setParseAction( mustMatchTheseTokens, callDuringTry=True ) + if theseTokens != matchTokens: + raise ParseException('', 0, '') + rep.setParseAction(mustMatchTheseTokens, callDuringTry=True) expr.addParseAction(copyTokenToRepeater, callDuringTry=True) rep.setName('(prev) ' + _ustr(expr)) return rep def _escapeRegexRangeChars(s): - #~ escape these chars: ^-] + # ~ escape these chars: ^-] for c in r"\^-]": - s = s.replace(c,_bslash+c) - s = s.replace("\n",r"\n") - s = s.replace("\t",r"\t") + s = s.replace(c, _bslash + c) + s = s.replace("\n", r"\n") + s = s.replace("\t", r"\t") return _ustr(s) -def oneOf( strs, caseless=False, useRegex=True ): +def oneOf(strs, caseless=False, useRegex=True, asKeyword=False): """Helper to quickly define a set of alternative Literals, and makes sure to do longest-first testing when there is a conflict, regardless of the input order, but returns @@ -5038,8 +5454,10 @@ def oneOf( strs, caseless=False, useRegex=True ): caseless - useRegex - (default= ``True``) - as an optimization, will generate a Regex object; otherwise, will generate - a :class:`MatchFirst` object (if ``caseless=True``, or if + a :class:`MatchFirst` object (if ``caseless=True`` or ``asKeyword=True``, or if creating a :class:`Regex` raises an exception) + - asKeyword - (default=``False``) - enforce Keyword-style matching on the + generated expressions Example:: @@ -5054,57 +5472,62 @@ def oneOf( strs, caseless=False, useRegex=True ): [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] """ + if isinstance(caseless, basestring): + warnings.warn("More than one string argument passed to oneOf, pass " + "choices as a list or space-delimited string", stacklevel=2) + if caseless: - isequal = ( lambda a,b: a.upper() == b.upper() ) - masks = ( lambda a,b: b.upper().startswith(a.upper()) ) - parseElementClass = CaselessLiteral + isequal = (lambda a, b: a.upper() == b.upper()) + masks = (lambda a, b: b.upper().startswith(a.upper())) + parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral else: - isequal = ( lambda a,b: a == b ) - masks = ( lambda a,b: b.startswith(a) ) - parseElementClass = Literal + isequal = (lambda a, b: a == b) + masks = (lambda a, b: b.startswith(a)) + parseElementClass = Keyword if asKeyword else Literal symbols = [] - if isinstance(strs,basestring): + if isinstance(strs, basestring): symbols = strs.split() elif isinstance(strs, Iterable): symbols = list(strs) else: warnings.warn("Invalid argument to oneOf, expected string or iterable", - SyntaxWarning, stacklevel=2) + SyntaxWarning, stacklevel=2) if not symbols: return NoMatch() - i = 0 - while i < len(symbols)-1: - cur = symbols[i] - for j,other in enumerate(symbols[i+1:]): - if ( isequal(other, cur) ): - del symbols[i+j+1] - break - elif ( masks(cur, other) ): - del symbols[i+j+1] - symbols.insert(i,other) - cur = other - break - else: - i += 1 + if not asKeyword: + # if not producing keywords, need to reorder to take care to avoid masking + # longer choices with shorter ones + i = 0 + while i < len(symbols) - 1: + cur = symbols[i] + for j, other in enumerate(symbols[i + 1:]): + if isequal(other, cur): + del symbols[i + j + 1] + break + elif masks(cur, other): + del symbols[i + j + 1] + symbols.insert(i, other) + break + else: + i += 1 - if not caseless and useRegex: - #~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] )) + if not (caseless or asKeyword) and useRegex: + # ~ print (strs, "->", "|".join([_escapeRegexChars(sym) for sym in symbols])) try: - if len(symbols)==len("".join(symbols)): - return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols)) + if len(symbols) == len("".join(symbols)): + return Regex("[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols)).setName(' | '.join(symbols)) else: - return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols)) + return Regex("|".join(re.escape(sym) for sym in symbols)).setName(' | '.join(symbols)) except Exception: warnings.warn("Exception creating Regex for oneOf, building MatchFirst", SyntaxWarning, stacklevel=2) - # last resort, just use MatchFirst return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols)) -def dictOf( key, value ): +def dictOf(key, value): """Helper to easily and clearly define a dictionary by specifying the respective patterns for the key and value. Takes care of defining the :class:`Dict`, :class:`ZeroOrMore`, and @@ -5162,8 +5585,8 @@ def originalTextFor(expr, asString=True): Example:: src = "this is test <b> bold <i>text</i> </b> normal text " - for tag in ("b","i"): - opener,closer = makeHTMLTags(tag) + for tag in ("b", "i"): + opener, closer = makeHTMLTags(tag) patt = originalTextFor(opener + SkipTo(closer) + closer) print(patt.searchString(src)[0]) @@ -5172,14 +5595,14 @@ def originalTextFor(expr, asString=True): ['<b> bold <i>text</i> </b>'] ['<i>text</i>'] """ - locMarker = Empty().setParseAction(lambda s,loc,t: loc) + locMarker = Empty().setParseAction(lambda s, loc, t: loc) endlocMarker = locMarker.copy() endlocMarker.callPreparse = False matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end") if asString: - extractText = lambda s,l,t: s[t._original_start:t._original_end] + extractText = lambda s, l, t: s[t._original_start: t._original_end] else: - def extractText(s,l,t): + def extractText(s, l, t): t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]] matchExpr.setParseAction(extractText) matchExpr.ignoreExprs = expr.ignoreExprs @@ -5189,7 +5612,7 @@ def ungroup(expr): """Helper to undo pyparsing's default grouping of And expressions, even if all but one are non-empty. """ - return TokenConverter(expr).setParseAction(lambda t:t[0]) + return TokenConverter(expr).addParseAction(lambda t: t[0]) def locatedExpr(expr): """Helper to decorate a returned token with its starting and ending @@ -5216,7 +5639,7 @@ def locatedExpr(expr): [[8, 'lksdjjf', 15]] [[18, 'lkkjj', 23]] """ - locator = Empty().setParseAction(lambda s,l,t: l) + locator = Empty().setParseAction(lambda s, l, t: l) return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end")) @@ -5227,12 +5650,12 @@ def locatedExpr(expr): stringStart = StringStart().setName("stringStart") stringEnd = StringEnd().setName("stringEnd") -_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1]) -_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16))) -_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8))) +_escapedPunc = Word(_bslash, r"\[]-*.$+^?()~ ", exact=2).setParseAction(lambda s, l, t: t[0][1]) +_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s, l, t: unichr(int(t[0].lstrip(r'\0x'), 16))) +_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s, l, t: unichr(int(t[0][1:], 8))) _singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1) _charRange = Group(_singleChar + Suppress("-") + _singleChar) -_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]" +_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group(OneOrMore(_charRange | _singleChar)).setResultsName("body") + "]" def srange(s): r"""Helper to easily define string ranges for use in Word @@ -5260,7 +5683,7 @@ def srange(s): - any combination of the above (``'aeiouy'``, ``'a-zA-Z0-9_$'``, etc.) """ - _expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1)) + _expanded = lambda p: p if not isinstance(p, ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]), ord(p[1]) + 1)) try: return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body) except Exception: @@ -5270,9 +5693,9 @@ def matchOnlyAtCol(n): """Helper method for defining parse actions that require matching at a specific column in the input text. """ - def verifyCol(strg,locn,toks): - if col(locn,strg) != n: - raise ParseException(strg,locn,"matched token not at column %d" % n) + def verifyCol(strg, locn, toks): + if col(locn, strg) != n: + raise ParseException(strg, locn, "matched token not at column %d" % n) return verifyCol def replaceWith(replStr): @@ -5288,9 +5711,9 @@ def replaceWith(replStr): OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234] """ - return lambda s,l,t: [replStr] + return lambda s, l, t: [replStr] -def removeQuotes(s,l,t): +def removeQuotes(s, l, t): """Helper parse action for removing quotation marks from parsed quoted strings. @@ -5341,7 +5764,7 @@ def tokenMap(func, *args): now is the winter of our discontent made glorious summer by this sun of york ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York'] """ - def pa(s,l,t): + def pa(s, l, t): return [func(tokn, *args) for tokn in t] try: @@ -5361,33 +5784,41 @@ def pa(s,l,t): """(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of :class:`pyparsing_common.downcaseTokens`""" -def _makeTags(tagStr, xml): +def _makeTags(tagStr, xml, + suppress_LT=Suppress("<"), + suppress_GT=Suppress(">")): """Internal helper to construct opening and closing tag expressions, given a tag name""" - if isinstance(tagStr,basestring): + if isinstance(tagStr, basestring): resname = tagStr tagStr = Keyword(tagStr, caseless=not xml) else: resname = tagStr.name - tagAttrName = Word(alphas,alphanums+"_-:") - if (xml): - tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes ) - openTag = Suppress("<") + tagStr("tag") + \ - Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \ - Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">") + tagAttrName = Word(alphas, alphanums + "_-:") + if xml: + tagAttrValue = dblQuotedString.copy().setParseAction(removeQuotes) + openTag = (suppress_LT + + tagStr("tag") + + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue))) + + Optional("/", default=[False])("empty").setParseAction(lambda s, l, t: t[0] == '/') + + suppress_GT) else: - printablesLessRAbrack = "".join(c for c in printables if c not in ">") - tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack) - openTag = Suppress("<") + tagStr("tag") + \ - Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \ - Optional( Suppress("=") + tagAttrValue ) ))) + \ - Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">") - closeTag = Combine(_L("</") + tagStr + ">") - - openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname) - closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname) + tagAttrValue = quotedString.copy().setParseAction(removeQuotes) | Word(printables, excludeChars=">") + openTag = (suppress_LT + + tagStr("tag") + + Dict(ZeroOrMore(Group(tagAttrName.setParseAction(downcaseTokens) + + Optional(Suppress("=") + tagAttrValue)))) + + Optional("/", default=[False])("empty").setParseAction(lambda s, l, t: t[0] == '/') + + suppress_GT) + closeTag = Combine(_L("</") + tagStr + ">", adjacent=False) + + openTag.setName("<%s>" % resname) + # add start<tagname> results name in parse action now that ungrouped names are not reported at two levels + openTag.addParseAction(lambda t: t.__setitem__("start" + "".join(resname.replace(":", " ").title().split()), t.copy())) + closeTag = closeTag("end" + "".join(resname.replace(":", " ").title().split())).setName("</%s>" % resname) openTag.tag = resname closeTag.tag = resname + openTag.tag_body = SkipTo(closeTag()) return openTag, closeTag def makeHTMLTags(tagStr): @@ -5400,7 +5831,7 @@ def makeHTMLTags(tagStr): text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>' # makeHTMLTags returns pyparsing expressions for the opening and # closing tags as a 2-tuple - a,a_end = makeHTMLTags("A") + a, a_end = makeHTMLTags("A") link_expr = a + SkipTo(a_end)("link_text") + a_end for link in link_expr.searchString(text): @@ -5412,7 +5843,7 @@ def makeHTMLTags(tagStr): pyparsing -> https://github.com/pyparsing/pyparsing/wiki """ - return _makeTags( tagStr, False ) + return _makeTags(tagStr, False) def makeXMLTags(tagStr): """Helper to construct opening and closing tag expressions for XML, @@ -5420,9 +5851,9 @@ def makeXMLTags(tagStr): Example: similar to :class:`makeHTMLTags` """ - return _makeTags( tagStr, True ) + return _makeTags(tagStr, True) -def withAttribute(*args,**attrDict): +def withAttribute(*args, **attrDict): """Helper to create a validating parse action to be used with start tags created with :class:`makeXMLTags` or :class:`makeHTMLTags`. Use ``withAttribute`` to qualify @@ -5435,7 +5866,7 @@ def withAttribute(*args,**attrDict): - keyword arguments, as in ``(align="right")``, or - as an explicit dict with ``**`` operator, when an attribute name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}`` - - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align","right"))`` + - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))`` For attribute names with a namespace prefix, you must use the second form. Attribute names are matched insensitive to upper/lower case. @@ -5482,13 +5913,13 @@ def withAttribute(*args,**attrDict): attrs = args[:] else: attrs = attrDict.items() - attrs = [(k,v) for k,v in attrs] - def pa(s,l,tokens): - for attrName,attrValue in attrs: + attrs = [(k, v) for k, v in attrs] + def pa(s, l, tokens): + for attrName, attrValue in attrs: if attrName not in tokens: - raise ParseException(s,l,"no matching attribute " + attrName) + raise ParseException(s, l, "no matching attribute " + attrName) if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue: - raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" % + raise ParseException(s, l, "attribute '%s' has value '%s', must be '%s'" % (attrName, tokens[attrName], attrValue)) return pa withAttribute.ANY_VALUE = object() @@ -5529,13 +5960,13 @@ def withClass(classname, namespace=''): 1,3 2,3 1,1 """ classattr = "%s:class" % namespace if namespace else "class" - return withAttribute(**{classattr : classname}) + return withAttribute(**{classattr: classname}) opAssoc = SimpleNamespace() opAssoc.LEFT = object() opAssoc.RIGHT = object() -def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ): +def infixNotation(baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')')): """Helper method for constructing grammars of expressions made up of operators working in a precedence hierarchy. Operators may be unary or binary, left- or right-associative. Parse actions can also be @@ -5613,9 +6044,9 @@ def parseImpl(self, instring, loc, doActions=True): return loc, [] ret = Forward() - lastExpr = baseExpr | ( lpar + ret + rpar ) - for i,operDef in enumerate(opList): - opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4] + lastExpr = baseExpr | (lpar + ret + rpar) + for i, operDef in enumerate(opList): + opExpr, arity, rightLeftAssoc, pa = (operDef + (None, ))[:4] termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr if arity == 3: if opExpr is None or len(opExpr) != 2: @@ -5625,15 +6056,15 @@ def parseImpl(self, instring, loc, doActions=True): thisExpr = Forward().setName(termName) if rightLeftAssoc == opAssoc.LEFT: if arity == 1: - matchExpr = _FB(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) ) + matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + OneOrMore(opExpr)) elif arity == 2: if opExpr is not None: - matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) ) + matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group(lastExpr + OneOrMore(opExpr + lastExpr)) else: - matchExpr = _FB(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) ) + matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr + OneOrMore(lastExpr)) elif arity == 3: - matchExpr = _FB(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \ - Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr ) + matchExpr = (_FB(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + + Group(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr)) else: raise ValueError("operator must be unary (1), binary (2), or ternary (3)") elif rightLeftAssoc == opAssoc.RIGHT: @@ -5641,15 +6072,15 @@ def parseImpl(self, instring, loc, doActions=True): # try to avoid LR with this extra test if not isinstance(opExpr, Optional): opExpr = Optional(opExpr) - matchExpr = _FB(opExpr.expr + thisExpr) + Group( opExpr + thisExpr ) + matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr) elif arity == 2: if opExpr is not None: - matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) ) + matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group(lastExpr + OneOrMore(opExpr + thisExpr)) else: - matchExpr = _FB(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) ) + matchExpr = _FB(lastExpr + thisExpr) + Group(lastExpr + OneOrMore(thisExpr)) elif arity == 3: - matchExpr = _FB(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \ - Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr ) + matchExpr = (_FB(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr)) else: raise ValueError("operator must be unary (1), binary (2), or ternary (3)") else: @@ -5659,7 +6090,7 @@ def parseImpl(self, instring, loc, doActions=True): matchExpr.setParseAction(*pa) else: matchExpr.setParseAction(pa) - thisExpr <<= ( matchExpr.setName(termName) | lastExpr ) + thisExpr <<= (matchExpr.setName(termName) | lastExpr) lastExpr = thisExpr ret <<= lastExpr return ret @@ -5668,10 +6099,10 @@ def parseImpl(self, instring, loc, doActions=True): """(Deprecated) Former name of :class:`infixNotation`, will be dropped in a future release.""" -dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes") -sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes") -quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'| - Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes") +dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"').setName("string enclosed in double quotes") +sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'").setName("string enclosed in single quotes") +quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' + | Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'").setName("quotedString using single or double quotes") unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal") def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()): @@ -5707,7 +6138,7 @@ def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.cop ident = Word(alphas+'_', alphanums+'_') number = pyparsing_common.number arg = Group(decl_data_type + ident) - LPAR,RPAR = map(Suppress, "()") + LPAR, RPAR = map(Suppress, "()") code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment)) @@ -5742,33 +6173,40 @@ def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.cop if opener == closer: raise ValueError("opening and closing strings cannot be the same") if content is None: - if isinstance(opener,basestring) and isinstance(closer,basestring): - if len(opener) == 1 and len(closer)==1: + if isinstance(opener, basestring) and isinstance(closer, basestring): + if len(opener) == 1 and len(closer) == 1: if ignoreExpr is not None: - content = (Combine(OneOrMore(~ignoreExpr + - CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1)) - ).setParseAction(lambda t:t[0].strip())) + content = (Combine(OneOrMore(~ignoreExpr + + CharsNotIn(opener + + closer + + ParserElement.DEFAULT_WHITE_CHARS, exact=1) + ) + ).setParseAction(lambda t: t[0].strip())) else: - content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS - ).setParseAction(lambda t:t[0].strip())) + content = (empty.copy() + CharsNotIn(opener + + closer + + ParserElement.DEFAULT_WHITE_CHARS + ).setParseAction(lambda t: t[0].strip())) else: if ignoreExpr is not None: - content = (Combine(OneOrMore(~ignoreExpr + - ~Literal(opener) + ~Literal(closer) + - CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) - ).setParseAction(lambda t:t[0].strip())) + content = (Combine(OneOrMore(~ignoreExpr + + ~Literal(opener) + + ~Literal(closer) + + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)) + ).setParseAction(lambda t: t[0].strip())) else: - content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) + - CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) - ).setParseAction(lambda t:t[0].strip())) + content = (Combine(OneOrMore(~Literal(opener) + + ~Literal(closer) + + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)) + ).setParseAction(lambda t: t[0].strip())) else: raise ValueError("opening and closing arguments must be strings if no content expression is given") ret = Forward() if ignoreExpr is not None: - ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) ) + ret <<= Group(Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer)) else: - ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) ) - ret.setName('nested %s%s expression' % (opener,closer)) + ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer)) + ret.setName('nested %s%s expression' % (opener, closer)) return ret def indentedBlock(blockStatementExpr, indentStack, indent=True): @@ -5783,7 +6221,7 @@ def indentedBlock(blockStatementExpr, indentStack, indent=True): (multiple statementWithIndentedBlock expressions within a single grammar should share a common indentStack) - indent - boolean indicating whether block must be indented beyond - the the current level; set to False for block of left-most + the current level; set to False for block of left-most statements (default= ``True``) A valid block must contain at least one ``blockStatement``. @@ -5816,15 +6254,15 @@ def eggs(z): stmt = Forward() identifier = Word(alphas, alphanums) - funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":") + funcDecl = ("def" + identifier + Group("(" + Optional(delimitedList(identifier)) + ")") + ":") func_body = indentedBlock(stmt, indentStack) - funcDef = Group( funcDecl + func_body ) + funcDef = Group(funcDecl + func_body) rvalue = Forward() funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")") rvalue << (funcCall | identifier | Word(nums)) assignment = Group(identifier + "=" + rvalue) - stmt << ( funcDef | assignment | identifier ) + stmt << (funcDef | assignment | identifier) module_body = OneOrMore(stmt) @@ -5852,47 +6290,56 @@ def eggs(z): ':', [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] """ - def checkPeerIndent(s,l,t): + backup_stack = indentStack[:] + + def reset_stack(): + indentStack[:] = backup_stack + + def checkPeerIndent(s, l, t): if l >= len(s): return - curCol = col(l,s) + curCol = col(l, s) if curCol != indentStack[-1]: if curCol > indentStack[-1]: - raise ParseFatalException(s,l,"illegal nesting") - raise ParseException(s,l,"not a peer entry") + raise ParseException(s, l, "illegal nesting") + raise ParseException(s, l, "not a peer entry") - def checkSubIndent(s,l,t): - curCol = col(l,s) + def checkSubIndent(s, l, t): + curCol = col(l, s) if curCol > indentStack[-1]: - indentStack.append( curCol ) + indentStack.append(curCol) else: - raise ParseException(s,l,"not a subentry") + raise ParseException(s, l, "not a subentry") - def checkUnindent(s,l,t): + def checkUnindent(s, l, t): if l >= len(s): return - curCol = col(l,s) - if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]): - raise ParseException(s,l,"not an unindent") - indentStack.pop() + curCol = col(l, s) + if not(indentStack and curCol in indentStack): + raise ParseException(s, l, "not an unindent") + if curCol < indentStack[-1]: + indentStack.pop() - NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress()) + NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress(), stopOn=StringEnd()) INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT') PEER = Empty().setParseAction(checkPeerIndent).setName('') UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT') if indent: - smExpr = Group( Optional(NL) + - #~ FollowedBy(blockStatementExpr) + - INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT) + smExpr = Group(Optional(NL) + + INDENT + + OneOrMore(PEER + Group(blockStatementExpr) + Optional(NL), stopOn=StringEnd()) + + UNDENT) else: - smExpr = Group( Optional(NL) + - (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) ) + smExpr = Group(Optional(NL) + + OneOrMore(PEER + Group(blockStatementExpr) + Optional(NL), stopOn=StringEnd()) + + UNDENT) + smExpr.setFailAction(lambda a, b, c, d: reset_stack()) blockStatementExpr.ignore(_bslash + LineEnd()) return smExpr.setName('indented block') alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") -anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag')) -_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\'')) +anyOpenTag, anyCloseTag = makeHTMLTags(Word(alphas, alphanums + "_:").setName('any tag')) +_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(), '><& "\'')) commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity") def replaceHTMLEntity(t): """Helper parser action to replace common HTML entities with their special characters""" @@ -5909,7 +6356,7 @@ def replaceHTMLEntity(t): dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment") "Comment of the form ``// ... (to end of line)``" -cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment") +cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/' | dblSlashComment).setName("C++ style comment") "Comment of either form :class:`cStyleComment` or :class:`dblSlashComment`" javaStyleComment = cppStyleComment @@ -5918,10 +6365,10 @@ def replaceHTMLEntity(t): pythonStyleComment = Regex(r"#.*").setName("Python style comment") "Comment of the form ``# ... (to end of line)``" -_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') + - Optional( Word(" \t") + - ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem") -commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList") +_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') + + Optional(Word(" \t") + + ~Literal(",") + ~LineEnd()))).streamline().setName("commaItem") +commaSeparatedList = delimitedList(Optional(quotedString.copy() | _commasepitem, default="")).setName("commaSeparatedList") """(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas. @@ -6087,7 +6534,7 @@ class pyparsing_common: integer = Word(nums).setName("integer").setParseAction(convertToInteger) """expression that parses an unsigned integer, returns an int""" - hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16)) + hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int, 16)) """expression that parses a hexadecimal integer, returns an int""" signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger) @@ -6101,10 +6548,10 @@ class pyparsing_common: """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" mixed_integer.addParseAction(sum) - real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat) + real = Regex(r'[+-]?(:?\d+\.\d*|\.\d+)').setName("real number").setParseAction(convertToFloat) """expression that parses a floating point number and returns a float""" - sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat) + sci_real = Regex(r'[+-]?(:?\d+(:?[eE][+-]?\d+)|(:?\d+\.\d*|\.\d+)(:?[eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat) """expression that parses a floating point number with optional scientific notation and returns a float""" @@ -6115,15 +6562,18 @@ class pyparsing_common: fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat) """any int or real number, returned as float""" - identifier = Word(alphas+'_', alphanums+'_').setName("identifier") + identifier = Word(alphas + '_', alphanums + '_').setName("identifier") """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address") "IPv4 address (``0.0.0.0 - 255.255.255.255``)" _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer") - _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address") - _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address") + _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part) * 7).setName("full IPv6 address") + _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part) * (0, 6)) + + "::" + + Optional(_ipv6_part + (':' + _ipv6_part) * (0, 6)) + ).setName("short IPv6 address") _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8) _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address") ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address") @@ -6150,7 +6600,7 @@ def convertToDate(fmt="%Y-%m-%d"): [datetime.date(1999, 12, 31)] """ - def cvt_fn(s,l,t): + def cvt_fn(s, l, t): try: return datetime.strptime(t[0], fmt).date() except ValueError as ve: @@ -6175,7 +6625,7 @@ def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"): [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] """ - def cvt_fn(s,l,t): + def cvt_fn(s, l, t): try: return datetime.strptime(t[0], fmt) except ValueError as ve: @@ -6200,7 +6650,7 @@ def stripHTMLTags(s, l, tokens): # strip HTML links from normal text text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>' - td,td_end = makeHTMLTags("TD") + td, td_end = makeHTMLTags("TD") table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end print(table_text.parseString(text).body) @@ -6210,9 +6660,13 @@ def stripHTMLTags(s, l, tokens): """ return pyparsing_common._html_stripper.transformString(tokens[0]) - _commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',') - + Optional( White(" \t") ) ) ).streamline().setName("commaItem") - comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list") + _commasepitem = Combine(OneOrMore(~Literal(",") + + ~LineEnd() + + Word(printables, excludeChars=',') + + Optional(White(" \t")))).streamline().setName("commaItem") + comma_separated_list = delimitedList(Optional(quotedString.copy() + | _commasepitem, default='') + ).setName("comma separated list") """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper())) @@ -6231,7 +6685,8 @@ def __init__(self, fn): def __get__(self, obj, cls): if cls is None: cls = type(obj) - if not hasattr(cls, '_intern') or any(cls._intern is getattr(superclass, '_intern', []) for superclass in cls.__mro__[1:]): + if not hasattr(cls, '_intern') or any(cls._intern is getattr(superclass, '_intern', []) + for superclass in cls.__mro__[1:]): cls._intern = {} attrname = self.fn.__name__ if attrname not in cls._intern: @@ -6262,7 +6717,7 @@ def _get_chars_for_ranges(cls): if cc is unicode_set: break for rr in cc._ranges: - ret.extend(range(rr[0], rr[-1]+1)) + ret.extend(range(rr[0], rr[-1] + 1)) return [unichr(c) for c in sorted(set(ret))] @_lazyclassproperty @@ -6318,27 +6773,27 @@ class Cyrillic(unicode_set): class Chinese(unicode_set): "Unicode set for Chinese Unicode Character Range" - _ranges = [(0x4e00, 0x9fff), (0x3000, 0x303f), ] + _ranges = [(0x4e00, 0x9fff), (0x3000, 0x303f),] class Japanese(unicode_set): "Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges" - _ranges = [ ] + _ranges = [] class Kanji(unicode_set): "Unicode set for Kanji Unicode Character Range" - _ranges = [(0x4E00, 0x9Fbf), (0x3000, 0x303f), ] + _ranges = [(0x4E00, 0x9Fbf), (0x3000, 0x303f),] class Hiragana(unicode_set): "Unicode set for Hiragana Unicode Character Range" - _ranges = [(0x3040, 0x309f), ] + _ranges = [(0x3040, 0x309f),] class Katakana(unicode_set): "Unicode set for Katakana Unicode Character Range" - _ranges = [(0x30a0, 0x30ff), ] + _ranges = [(0x30a0, 0x30ff),] class Korean(unicode_set): "Unicode set for Korean Unicode Character Range" - _ranges = [(0xac00, 0xd7af), (0x1100, 0x11ff), (0x3130, 0x318f), (0xa960, 0xa97f), (0xd7b0, 0xd7ff), (0x3000, 0x303f), ] + _ranges = [(0xac00, 0xd7af), (0x1100, 0x11ff), (0x3130, 0x318f), (0xa960, 0xa97f), (0xd7b0, 0xd7ff), (0x3000, 0x303f),] class CJK(Chinese, Japanese, Korean): "Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range" @@ -6346,15 +6801,15 @@ class CJK(Chinese, Japanese, Korean): class Thai(unicode_set): "Unicode set for Thai Unicode Character Range" - _ranges = [(0x0e01, 0x0e3a), (0x0e3f, 0x0e5b), ] + _ranges = [(0x0e01, 0x0e3a), (0x0e3f, 0x0e5b),] class Arabic(unicode_set): "Unicode set for Arabic Unicode Character Range" - _ranges = [(0x0600, 0x061b), (0x061e, 0x06ff), (0x0700, 0x077f), ] + _ranges = [(0x0600, 0x061b), (0x061e, 0x06ff), (0x0700, 0x077f),] class Hebrew(unicode_set): "Unicode set for Hebrew Unicode Character Range" - _ranges = [(0x0590, 0x05ff), ] + _ranges = [(0x0590, 0x05ff),] class Devanagari(unicode_set): "Unicode set for Devanagari Unicode Character Range" @@ -6366,18 +6821,18 @@ class Devanagari(unicode_set): # define ranges in language character sets if PY_3: - setattr(pyparsing_unicode, "العربية", pyparsing_unicode.Arabic) - setattr(pyparsing_unicode, "中文", pyparsing_unicode.Chinese) - setattr(pyparsing_unicode, "кириллица", pyparsing_unicode.Cyrillic) - setattr(pyparsing_unicode, "Ελληνικά", pyparsing_unicode.Greek) - setattr(pyparsing_unicode, "עִברִית", pyparsing_unicode.Hebrew) - setattr(pyparsing_unicode, "日本語", pyparsing_unicode.Japanese) - setattr(pyparsing_unicode.Japanese, "漢字", pyparsing_unicode.Japanese.Kanji) - setattr(pyparsing_unicode.Japanese, "カタカナ", pyparsing_unicode.Japanese.Katakana) - setattr(pyparsing_unicode.Japanese, "ひらがな", pyparsing_unicode.Japanese.Hiragana) - setattr(pyparsing_unicode, "한국어", pyparsing_unicode.Korean) - setattr(pyparsing_unicode, "ไทย", pyparsing_unicode.Thai) - setattr(pyparsing_unicode, "देवनागरी", pyparsing_unicode.Devanagari) + setattr(pyparsing_unicode, u"العربية", pyparsing_unicode.Arabic) + setattr(pyparsing_unicode, u"中文", pyparsing_unicode.Chinese) + setattr(pyparsing_unicode, u"кириллица", pyparsing_unicode.Cyrillic) + setattr(pyparsing_unicode, u"Ελληνικά", pyparsing_unicode.Greek) + setattr(pyparsing_unicode, u"עִברִית", pyparsing_unicode.Hebrew) + setattr(pyparsing_unicode, u"日本語", pyparsing_unicode.Japanese) + setattr(pyparsing_unicode.Japanese, u"漢字", pyparsing_unicode.Japanese.Kanji) + setattr(pyparsing_unicode.Japanese, u"カタカナ", pyparsing_unicode.Japanese.Katakana) + setattr(pyparsing_unicode.Japanese, u"ひらがな", pyparsing_unicode.Japanese.Hiragana) + setattr(pyparsing_unicode, u"한국어", pyparsing_unicode.Korean) + setattr(pyparsing_unicode, u"ไทย", pyparsing_unicode.Thai) + setattr(pyparsing_unicode, u"देवनागरी", pyparsing_unicode.Devanagari) if __name__ == "__main__": diff --git a/pipenv/vendor/pytoml/LICENSE b/pipenv/vendor/pythonfinder/LICENSE similarity index 76% rename from pipenv/vendor/pytoml/LICENSE rename to pipenv/vendor/pythonfinder/LICENSE index 9739fc67c6..c7ac395fb9 100644 --- a/pipenv/vendor/pytoml/LICENSE +++ b/pipenv/vendor/pythonfinder/LICENSE @@ -1,16 +1,21 @@ -No-notice MIT License +MIT License + +Copyright (c) 2016 Steve Dower Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is -furnished to do so. +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/pipenv/vendor/pythonfinder/_vendor/pep514tools/environment.py b/pipenv/vendor/pythonfinder/_vendor/pep514tools/environment.py index e201d0b59e..f4104a453b 100644 --- a/pipenv/vendor/pythonfinder/_vendor/pep514tools/environment.py +++ b/pipenv/vendor/pythonfinder/_vendor/pep514tools/environment.py @@ -16,7 +16,7 @@ _PYTHONCORE_COMPATIBILITY_TAGS = { '2.0', '2.1', '2.2', '2.3', '2.4', '2.5', '2.6', '2.7', '3.0', '3.1', '3.2', '3.3', '3.4', '3.5', '3.6', '3.7', - '3.8' + '3.8', '3.9' } _IS_64BIT_OS = None diff --git a/pipenv/vendor/pythonfinder/models/mixins.py b/pipenv/vendor/pythonfinder/models/mixins.py index a3637e1261..4c473b1817 100644 --- a/pipenv/vendor/pythonfinder/models/mixins.py +++ b/pipenv/vendor/pythonfinder/models/mixins.py @@ -7,7 +7,6 @@ import attr import six -from cached_property import cached_property from vistir.compat import fs_str from ..environment import MYPY_RUNNING @@ -174,7 +173,6 @@ def is_dir(self): # type: () -> None self._is_dir = None - # @cached_property @property def is_executable(self): # type: () -> bool @@ -195,7 +193,6 @@ def is_executable(self): # type: () -> None self._is_executable = None - # @cached_property @property def is_python(self): # type: () -> bool @@ -240,7 +237,6 @@ def get_py_version(self): return py_version return None - # @cached_property @property def py_version(self): # type: () -> Optional[PythonVersion] diff --git a/pipenv/vendor/pythonfinder/models/path.py b/pipenv/vendor/pythonfinder/models/path.py index 80d5ac5982..f46677e98c 100644 --- a/pipenv/vendor/pythonfinder/models/path.py +++ b/pipenv/vendor/pythonfinder/models/path.py @@ -1,7 +1,6 @@ # -*- coding=utf-8 -*- from __future__ import absolute_import, print_function -import copy import operator import os import sys @@ -31,7 +30,6 @@ expand_paths, filter_pythons, is_in_path, - looks_like_python, normalize_path, optional_instance_of, parse_asdf_version_order, @@ -41,7 +39,6 @@ unnest, ) from .mixins import BaseFinder, BasePath -from .python import PythonVersion if MYPY_RUNNING: from typing import ( @@ -58,7 +55,7 @@ Any, TypeVar, ) - from .python import PythonFinder + from .python import PythonFinder, PythonVersion from .windows import WindowsFinder FinderType = TypeVar("FinderType", BaseFinder, PythonFinder, WindowsFinder) @@ -130,7 +127,7 @@ def __del__(self): self._python_executables = {} self._executables = [] self.python_version_dict = defaultdict(list) - self.version_dict = defaultdict(list) + self._version_dict = defaultdict(list) self.path_order = [] self.pyenv_finder = None self.asdf_finder = None @@ -193,7 +190,7 @@ def version_dict(self): if entry not in self._version_dict[version] and entry.is_python: self._version_dict[version].append(entry) for p, entry in self.python_executables.items(): - version = entry.as_python + version = entry.as_python # type: PythonVersion if not version: continue if not isinstance(version, tuple): @@ -331,7 +328,8 @@ def _setup_asdf(self): # we are in a virtualenv without global pyenv on the path, so we should # not write pyenv to the path here return self - root_paths = [p for p in asdf_finder.roots] + # * These are the root paths for the finder + _ = [p for p in asdf_finder.roots] new_instance = self._slice_in_paths(asdf_index, [asdf_finder.root]) paths = self.paths.copy() paths[asdf_finder.root] = asdf_finder @@ -393,8 +391,8 @@ def _setup_pyenv(self): # we are in a virtualenv without global pyenv on the path, so we should # not write pyenv to the path here return self - - root_paths = [p for p in pyenv_finder.roots] + # * These are the root paths for the finder + _ = [p for p in pyenv_finder.roots] new_instance = self._slice_in_paths(pyenv_index, [pyenv_finder.root]) paths = new_instance.paths.copy() paths[pyenv_finder.root] = pyenv_finder @@ -649,7 +647,7 @@ def create( if global_search: if "PATH" in os.environ: paths = os.environ["PATH"].split(os.pathsep) - path_order = [] + path_order = [] # type: List[str] if path: path_order = [path] path_instance = ensure_path(path) @@ -691,15 +689,19 @@ class PathEntry(BasePath): is_root = attr.ib(default=True, type=bool, cmp=False) def __lt__(self, other): + # type: (BasePath) -> bool return self.path.as_posix() < other.path.as_posix() def __lte__(self, other): + # type: (BasePath) -> bool return self.path.as_posix() <= other.path.as_posix() def __gt__(self, other): + # type: (BasePath) -> bool return self.path.as_posix() > other.path.as_posix() def __gte__(self, other): + # type: (BasePath) -> bool return self.path.as_posix() >= other.path.as_posix() def __del__(self): @@ -742,7 +744,6 @@ def _gen_children(self): yield (child.as_posix(), entry) return - # @cached_property @property def children(self): # type: () -> Dict[str, PathEntry] diff --git a/pipenv/vendor/pythonfinder/models/python.py b/pipenv/vendor/pythonfinder/models/python.py index 427eb694ff..3c859980c7 100644 --- a/pipenv/vendor/pythonfinder/models/python.py +++ b/pipenv/vendor/pythonfinder/models/python.py @@ -1,7 +1,6 @@ # -*- coding=utf-8 -*- from __future__ import absolute_import, print_function -import copy import logging import operator import platform @@ -46,9 +45,14 @@ Type, TypeVar, Iterator, + overload, ) from .path import PathEntry from .._vendor.pep514tools.environment import Environment +else: + + def overload(f): + return f logger = logging.getLogger(__name__) @@ -231,6 +235,7 @@ def get_pythons(self): # type: () -> DefaultDict[str, PathEntry] return self.pythons + @overload @classmethod def create(cls, root, sort_function, version_glob_path=None, ignore_unsupported=True): # type: (str, Callable, Optional[str], bool) -> PythonFinder @@ -501,7 +506,7 @@ def update_metadata(self, metadata): for key in metadata: try: - current_value = getattr(self, key) + _ = getattr(self, key) except AttributeError: continue else: @@ -677,7 +682,7 @@ def add_entry(self, entry): # type: (...) -> None version = entry.as_python # type: PythonVersion if version: - entries = self.versions[version.version_tuple] + _ = self.versions[version.version_tuple] paths = {p.path for p in self.versions.get(version.version_tuple, [])} if entry.path not in paths: self.versions[version.version_tuple].append(entry) diff --git a/pipenv/vendor/pytoml/__init__.py b/pipenv/vendor/pytoml/__init__.py deleted file mode 100644 index 8ed060ff52..0000000000 --- a/pipenv/vendor/pytoml/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .core import TomlError -from .parser import load, loads -from .test import translate_to_test -from .writer import dump, dumps \ No newline at end of file diff --git a/pipenv/vendor/pytoml/core.py b/pipenv/vendor/pytoml/core.py deleted file mode 100644 index c182734e1c..0000000000 --- a/pipenv/vendor/pytoml/core.py +++ /dev/null @@ -1,13 +0,0 @@ -class TomlError(RuntimeError): - def __init__(self, message, line, col, filename): - RuntimeError.__init__(self, message, line, col, filename) - self.message = message - self.line = line - self.col = col - self.filename = filename - - def __str__(self): - return '{}({}, {}): {}'.format(self.filename, self.line, self.col, self.message) - - def __repr__(self): - return 'TomlError({!r}, {!r}, {!r}, {!r})'.format(self.message, self.line, self.col, self.filename) diff --git a/pipenv/vendor/pytoml/parser.py b/pipenv/vendor/pytoml/parser.py deleted file mode 100644 index 3493aa644c..0000000000 --- a/pipenv/vendor/pytoml/parser.py +++ /dev/null @@ -1,341 +0,0 @@ -import string, re, sys, datetime -from .core import TomlError -from .utils import rfc3339_re, parse_rfc3339_re - -if sys.version_info[0] == 2: - _chr = unichr -else: - _chr = chr - -def load(fin, translate=lambda t, x, v: v, object_pairs_hook=dict): - return loads(fin.read(), translate=translate, object_pairs_hook=object_pairs_hook, filename=getattr(fin, 'name', repr(fin))) - -def loads(s, filename='<string>', translate=lambda t, x, v: v, object_pairs_hook=dict): - if isinstance(s, bytes): - s = s.decode('utf-8') - - s = s.replace('\r\n', '\n') - - root = object_pairs_hook() - tables = object_pairs_hook() - scope = root - - src = _Source(s, filename=filename) - ast = _p_toml(src, object_pairs_hook=object_pairs_hook) - - def error(msg): - raise TomlError(msg, pos[0], pos[1], filename) - - def process_value(v, object_pairs_hook): - kind, text, value, pos = v - if kind == 'str' and value.startswith('\n'): - value = value[1:] - if kind == 'array': - if value and any(k != value[0][0] for k, t, v, p in value[1:]): - error('array-type-mismatch') - value = [process_value(item, object_pairs_hook=object_pairs_hook) for item in value] - elif kind == 'table': - value = object_pairs_hook([(k, process_value(value[k], object_pairs_hook=object_pairs_hook)) for k in value]) - return translate(kind, text, value) - - for kind, value, pos in ast: - if kind == 'kv': - k, v = value - if k in scope: - error('duplicate_keys. Key "{0}" was used more than once.'.format(k)) - scope[k] = process_value(v, object_pairs_hook=object_pairs_hook) - else: - is_table_array = (kind == 'table_array') - cur = tables - for name in value[:-1]: - if isinstance(cur.get(name), list): - d, cur = cur[name][-1] - else: - d, cur = cur.setdefault(name, (None, object_pairs_hook())) - - scope = object_pairs_hook() - name = value[-1] - if name not in cur: - if is_table_array: - cur[name] = [(scope, object_pairs_hook())] - else: - cur[name] = (scope, object_pairs_hook()) - elif isinstance(cur[name], list): - if not is_table_array: - error('table_type_mismatch') - cur[name].append((scope, object_pairs_hook())) - else: - if is_table_array: - error('table_type_mismatch') - old_scope, next_table = cur[name] - if old_scope is not None: - error('duplicate_tables') - cur[name] = (scope, next_table) - - def merge_tables(scope, tables): - if scope is None: - scope = object_pairs_hook() - for k in tables: - if k in scope: - error('key_table_conflict') - v = tables[k] - if isinstance(v, list): - scope[k] = [merge_tables(sc, tbl) for sc, tbl in v] - else: - scope[k] = merge_tables(v[0], v[1]) - return scope - - return merge_tables(root, tables) - -class _Source: - def __init__(self, s, filename=None): - self.s = s - self._pos = (1, 1) - self._last = None - self._filename = filename - self.backtrack_stack = [] - - def last(self): - return self._last - - def pos(self): - return self._pos - - def fail(self): - return self._expect(None) - - def consume_dot(self): - if self.s: - self._last = self.s[0] - self.s = self[1:] - self._advance(self._last) - return self._last - return None - - def expect_dot(self): - return self._expect(self.consume_dot()) - - def consume_eof(self): - if not self.s: - self._last = '' - return True - return False - - def expect_eof(self): - return self._expect(self.consume_eof()) - - def consume(self, s): - if self.s.startswith(s): - self.s = self.s[len(s):] - self._last = s - self._advance(s) - return True - return False - - def expect(self, s): - return self._expect(self.consume(s)) - - def consume_re(self, re): - m = re.match(self.s) - if m: - self.s = self.s[len(m.group(0)):] - self._last = m - self._advance(m.group(0)) - return m - return None - - def expect_re(self, re): - return self._expect(self.consume_re(re)) - - def __enter__(self): - self.backtrack_stack.append((self.s, self._pos)) - - def __exit__(self, type, value, traceback): - if type is None: - self.backtrack_stack.pop() - else: - self.s, self._pos = self.backtrack_stack.pop() - return type == TomlError - - def commit(self): - self.backtrack_stack[-1] = (self.s, self._pos) - - def _expect(self, r): - if not r: - raise TomlError('msg', self._pos[0], self._pos[1], self._filename) - return r - - def _advance(self, s): - suffix_pos = s.rfind('\n') - if suffix_pos == -1: - self._pos = (self._pos[0], self._pos[1] + len(s)) - else: - self._pos = (self._pos[0] + s.count('\n'), len(s) - suffix_pos) - -_ews_re = re.compile(r'(?:[ \t]|#[^\n]*\n|#[^\n]*\Z|\n)*') -def _p_ews(s): - s.expect_re(_ews_re) - -_ws_re = re.compile(r'[ \t]*') -def _p_ws(s): - s.expect_re(_ws_re) - -_escapes = { 'b': '\b', 'n': '\n', 'r': '\r', 't': '\t', '"': '"', - '\\': '\\', 'f': '\f' } - -_basicstr_re = re.compile(r'[^"\\\000-\037]*') -_short_uni_re = re.compile(r'u([0-9a-fA-F]{4})') -_long_uni_re = re.compile(r'U([0-9a-fA-F]{8})') -_escapes_re = re.compile(r'[btnfr\"\\]') -_newline_esc_re = re.compile('\n[ \t\n]*') -def _p_basicstr_content(s, content=_basicstr_re): - res = [] - while True: - res.append(s.expect_re(content).group(0)) - if not s.consume('\\'): - break - if s.consume_re(_newline_esc_re): - pass - elif s.consume_re(_short_uni_re) or s.consume_re(_long_uni_re): - v = int(s.last().group(1), 16) - if 0xd800 <= v < 0xe000: - s.fail() - res.append(_chr(v)) - else: - s.expect_re(_escapes_re) - res.append(_escapes[s.last().group(0)]) - return ''.join(res) - -_key_re = re.compile(r'[0-9a-zA-Z-_]+') -def _p_key(s): - with s: - s.expect('"') - r = _p_basicstr_content(s, _basicstr_re) - s.expect('"') - return r - if s.consume('\''): - if s.consume('\'\''): - r = s.expect_re(_litstr_ml_re).group(0) - s.expect('\'\'\'') - else: - r = s.expect_re(_litstr_re).group(0) - s.expect('\'') - return r - return s.expect_re(_key_re).group(0) - -_float_re = re.compile(r'[+-]?(?:0|[1-9](?:_?\d)*)(?:\.\d(?:_?\d)*)?(?:[eE][+-]?(?:\d(?:_?\d)*))?') - -_basicstr_ml_re = re.compile(r'(?:""?(?!")|[^"\\\000-\011\013-\037])*') -_litstr_re = re.compile(r"[^'\000\010\012-\037]*") -_litstr_ml_re = re.compile(r"(?:(?:|'|'')(?:[^'\000-\010\013-\037]))*") -def _p_value(s, object_pairs_hook): - pos = s.pos() - - if s.consume('true'): - return 'bool', s.last(), True, pos - if s.consume('false'): - return 'bool', s.last(), False, pos - - if s.consume('"'): - if s.consume('""'): - r = _p_basicstr_content(s, _basicstr_ml_re) - s.expect('"""') - else: - r = _p_basicstr_content(s, _basicstr_re) - s.expect('"') - return 'str', r, r, pos - - if s.consume('\''): - if s.consume('\'\''): - r = s.expect_re(_litstr_ml_re).group(0) - s.expect('\'\'\'') - else: - r = s.expect_re(_litstr_re).group(0) - s.expect('\'') - return 'str', r, r, pos - - if s.consume_re(rfc3339_re): - m = s.last() - return 'datetime', m.group(0), parse_rfc3339_re(m), pos - - if s.consume_re(_float_re): - m = s.last().group(0) - r = m.replace('_','') - if '.' in m or 'e' in m or 'E' in m: - return 'float', m, float(r), pos - else: - return 'int', m, int(r, 10), pos - - if s.consume('['): - items = [] - with s: - while True: - _p_ews(s) - items.append(_p_value(s, object_pairs_hook=object_pairs_hook)) - s.commit() - _p_ews(s) - s.expect(',') - s.commit() - _p_ews(s) - s.expect(']') - return 'array', None, items, pos - - if s.consume('{'): - _p_ws(s) - items = object_pairs_hook() - if not s.consume('}'): - k = _p_key(s) - _p_ws(s) - s.expect('=') - _p_ws(s) - items[k] = _p_value(s, object_pairs_hook=object_pairs_hook) - _p_ws(s) - while s.consume(','): - _p_ws(s) - k = _p_key(s) - _p_ws(s) - s.expect('=') - _p_ws(s) - items[k] = _p_value(s, object_pairs_hook=object_pairs_hook) - _p_ws(s) - s.expect('}') - return 'table', None, items, pos - - s.fail() - -def _p_stmt(s, object_pairs_hook): - pos = s.pos() - if s.consume( '['): - is_array = s.consume('[') - _p_ws(s) - keys = [_p_key(s)] - _p_ws(s) - while s.consume('.'): - _p_ws(s) - keys.append(_p_key(s)) - _p_ws(s) - s.expect(']') - if is_array: - s.expect(']') - return 'table_array' if is_array else 'table', keys, pos - - key = _p_key(s) - _p_ws(s) - s.expect('=') - _p_ws(s) - value = _p_value(s, object_pairs_hook=object_pairs_hook) - return 'kv', (key, value), pos - -_stmtsep_re = re.compile(r'(?:[ \t]*(?:#[^\n]*)?\n)+[ \t]*') -def _p_toml(s, object_pairs_hook): - stmts = [] - _p_ews(s) - with s: - stmts.append(_p_stmt(s, object_pairs_hook=object_pairs_hook)) - while True: - s.commit() - s.expect_re(_stmtsep_re) - stmts.append(_p_stmt(s, object_pairs_hook=object_pairs_hook)) - _p_ews(s) - s.expect_eof() - return stmts diff --git a/pipenv/vendor/pytoml/test.py b/pipenv/vendor/pytoml/test.py deleted file mode 100644 index ec8abfc650..0000000000 --- a/pipenv/vendor/pytoml/test.py +++ /dev/null @@ -1,30 +0,0 @@ -import datetime -from .utils import format_rfc3339 - -try: - _string_types = (str, unicode) - _int_types = (int, long) -except NameError: - _string_types = str - _int_types = int - -def translate_to_test(v): - if isinstance(v, dict): - return { k: translate_to_test(v) for k, v in v.items() } - if isinstance(v, list): - a = [translate_to_test(x) for x in v] - if v and isinstance(v[0], dict): - return a - else: - return {'type': 'array', 'value': a} - if isinstance(v, datetime.datetime): - return {'type': 'datetime', 'value': format_rfc3339(v)} - if isinstance(v, bool): - return {'type': 'bool', 'value': 'true' if v else 'false'} - if isinstance(v, _int_types): - return {'type': 'integer', 'value': str(v)} - if isinstance(v, float): - return {'type': 'float', 'value': '{:.17}'.format(v)} - if isinstance(v, _string_types): - return {'type': 'string', 'value': v} - raise RuntimeError('unexpected value: {!r}'.format(v)) diff --git a/pipenv/vendor/pytoml/utils.py b/pipenv/vendor/pytoml/utils.py deleted file mode 100644 index 636a680b06..0000000000 --- a/pipenv/vendor/pytoml/utils.py +++ /dev/null @@ -1,67 +0,0 @@ -import datetime -import re - -rfc3339_re = re.compile(r'(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})(\.\d+)?(?:Z|([+-]\d{2}):(\d{2}))') - -def parse_rfc3339(v): - m = rfc3339_re.match(v) - if not m or m.group(0) != v: - return None - return parse_rfc3339_re(m) - -def parse_rfc3339_re(m): - r = map(int, m.groups()[:6]) - if m.group(7): - micro = float(m.group(7)) - else: - micro = 0 - - if m.group(8): - g = int(m.group(8), 10) * 60 + int(m.group(9), 10) - tz = _TimeZone(datetime.timedelta(0, g * 60)) - else: - tz = _TimeZone(datetime.timedelta(0, 0)) - - y, m, d, H, M, S = r - return datetime.datetime(y, m, d, H, M, S, int(micro * 1000000), tz) - - -def format_rfc3339(v): - offs = v.utcoffset() - offs = int(offs.total_seconds()) // 60 if offs is not None else 0 - - if offs == 0: - suffix = 'Z' - else: - if offs > 0: - suffix = '+' - else: - suffix = '-' - offs = -offs - suffix = '{0}{1:02}:{2:02}'.format(suffix, offs // 60, offs % 60) - - if v.microsecond: - return v.strftime('%Y-%m-%dT%H:%M:%S.%f') + suffix - else: - return v.strftime('%Y-%m-%dT%H:%M:%S') + suffix - -class _TimeZone(datetime.tzinfo): - def __init__(self, offset): - self._offset = offset - - def utcoffset(self, dt): - return self._offset - - def dst(self, dt): - return None - - def tzname(self, dt): - m = self._offset.total_seconds() // 60 - if m < 0: - res = '-' - m = -m - else: - res = '+' - h = m // 60 - m = m - h * 60 - return '{}{:.02}{:.02}'.format(res, h, m) diff --git a/pipenv/vendor/pytoml/writer.py b/pipenv/vendor/pytoml/writer.py deleted file mode 100644 index 73b5089c24..0000000000 --- a/pipenv/vendor/pytoml/writer.py +++ /dev/null @@ -1,106 +0,0 @@ -from __future__ import unicode_literals -import io, datetime, math, string, sys - -from .utils import format_rfc3339 - -if sys.version_info[0] == 3: - long = int - unicode = str - - -def dumps(obj, sort_keys=False): - fout = io.StringIO() - dump(obj, fout, sort_keys=sort_keys) - return fout.getvalue() - - -_escapes = {'\n': 'n', '\r': 'r', '\\': '\\', '\t': 't', '\b': 'b', '\f': 'f', '"': '"'} - - -def _escape_string(s): - res = [] - start = 0 - - def flush(): - if start != i: - res.append(s[start:i]) - return i + 1 - - i = 0 - while i < len(s): - c = s[i] - if c in '"\\\n\r\t\b\f': - start = flush() - res.append('\\' + _escapes[c]) - elif ord(c) < 0x20: - start = flush() - res.append('\\u%04x' % ord(c)) - i += 1 - - flush() - return '"' + ''.join(res) + '"' - - -_key_chars = string.digits + string.ascii_letters + '-_' -def _escape_id(s): - if any(c not in _key_chars for c in s): - return _escape_string(s) - return s - - -def _format_value(v): - if isinstance(v, bool): - return 'true' if v else 'false' - if isinstance(v, int) or isinstance(v, long): - return unicode(v) - if isinstance(v, float): - if math.isnan(v) or math.isinf(v): - raise ValueError("{0} is not a valid TOML value".format(v)) - else: - return repr(v) - elif isinstance(v, unicode) or isinstance(v, bytes): - return _escape_string(v) - elif isinstance(v, datetime.datetime): - return format_rfc3339(v) - elif isinstance(v, list): - return '[{0}]'.format(', '.join(_format_value(obj) for obj in v)) - elif isinstance(v, dict): - return '{{{0}}}'.format(', '.join('{} = {}'.format(_escape_id(k), _format_value(obj)) for k, obj in v.items())) - else: - raise RuntimeError(v) - - -def dump(obj, fout, sort_keys=False): - tables = [((), obj, False)] - - while tables: - name, table, is_array = tables.pop() - if name: - section_name = '.'.join(_escape_id(c) for c in name) - if is_array: - fout.write('[[{0}]]\n'.format(section_name)) - else: - fout.write('[{0}]\n'.format(section_name)) - - table_keys = sorted(table.keys()) if sort_keys else table.keys() - new_tables = [] - has_kv = False - for k in table_keys: - v = table[k] - if isinstance(v, dict): - new_tables.append((name + (k,), v, False)) - elif isinstance(v, list) and v and all(isinstance(o, dict) for o in v): - new_tables.extend((name + (k,), d, True) for d in v) - elif v is None: - # based on mojombo's comment: https://github.com/toml-lang/toml/issues/146#issuecomment-25019344 - fout.write( - '#{} = null # To use: uncomment and replace null with value\n'.format(_escape_id(k))) - has_kv = True - else: - fout.write('{0} = {1}\n'.format(_escape_id(k), _format_value(v))) - has_kv = True - - tables.extend(reversed(new_tables)) - - if (name or has_kv) and tables: - fout.write('\n') diff --git a/pipenv/vendor/requirementslib/__init__.py b/pipenv/vendor/requirementslib/__init__.py index 70b604a82e..8182d2ead1 100644 --- a/pipenv/vendor/requirementslib/__init__.py +++ b/pipenv/vendor/requirementslib/__init__.py @@ -10,7 +10,7 @@ from .models.pipfile import Pipfile from .models.requirements import Requirement -__version__ = "1.5.2.dev0" +__version__ = "1.5.4.dev0" logger = logging.getLogger(__name__) diff --git a/pipenv/vendor/requirementslib/models/__init__.py b/pipenv/vendor/requirementslib/models/__init__.py index e819cdd551..40a96afc6f 100644 --- a/pipenv/vendor/requirementslib/models/__init__.py +++ b/pipenv/vendor/requirementslib/models/__init__.py @@ -1,2 +1 @@ # -*- coding: utf-8 -*- -# This is intentionally left blank diff --git a/pipenv/vendor/requirementslib/models/dependencies.py b/pipenv/vendor/requirementslib/models/dependencies.py index 82eaba5f3e..2b42df8954 100644 --- a/pipenv/vendor/requirementslib/models/dependencies.py +++ b/pipenv/vendor/requirementslib/models/dependencies.py @@ -15,9 +15,10 @@ from packaging.utils import canonicalize_name from vistir.compat import JSONDecodeError, fs_str from vistir.contextmanagers import cd, temp_environ -from vistir.misc import partialclass from vistir.path import create_tracked_tempdir +from ..environment import MYPY_RUNNING +from ..utils import _ensure_dir, prepare_pip_source_args from .cache import CACHE_DIR, DependencyCache from .utils import ( clean_requires_python, @@ -30,8 +31,6 @@ name_from_req, version_from_ireq, ) -from ..environment import MYPY_RUNNING -from ..utils import _ensure_dir, prepare_pip_source_args if MYPY_RUNNING: from typing import ( @@ -103,22 +102,7 @@ def get_pip_command(): # Use pip's parser for pip.conf management and defaults. # General options (find_links, index_url, extra_index_url, trusted_host, # and pre) are defered to pip. - import optparse - - class PipCommand(pip_shims.shims.Command): - name = "PipCommand" - - pip_command = PipCommand() - pip_command.parser.add_option(pip_shims.shims.cmdoptions.no_binary()) - pip_command.parser.add_option(pip_shims.shims.cmdoptions.only_binary()) - index_opts = pip_shims.shims.cmdoptions.make_option_group( - pip_shims.shims.cmdoptions.index_group, pip_command.parser - ) - pip_command.parser.insert_option_group(0, index_opts) - pip_command.parser.add_option( - optparse.Option("--pre", action="store_true", default=False) - ) - + pip_command = pip_shims.shims.InstallCommand() return pip_command @@ -250,7 +234,7 @@ def from_requirement(cls, requirement, parent=None): extras = requirement.ireq.extras is_pinned = is_pinned_requirement(requirement.ireq) is_constraint = bool(parent) - finder = get_finder(sources=None) + _, finder = get_finder(sources=None) candidates = [] if not is_pinned and not requirement.editable: for r in requirement.find_all_matches(finder=finder): @@ -261,7 +245,7 @@ def from_requirement(cls, requirement, parent=None): markers=markers, constraint=is_constraint, ) - req.req.link = r.location + req.req.link = getattr(r, "location", getattr(r, "link", None)) req.parent = parent candidates.append(req) candidates = sorted( @@ -487,7 +471,7 @@ def get_dependencies_from_index(dep, sources=None, pip_options=None, wheel_cache :rtype: set(str) or None """ - finder = get_finder(sources=sources, pip_options=pip_options) + session, finder = get_finder(sources=sources, pip_options=pip_options) if not wheel_cache: wheel_cache = WHEEL_CACHE dep.is_direct = True @@ -496,7 +480,7 @@ def get_dependencies_from_index(dep, sources=None, pip_options=None, wheel_cache requirements = None setup_requires = {} with temp_environ(), start_resolver( - finder=finder, wheel_cache=wheel_cache + finder=finder, session=session, wheel_cache=wheel_cache ) as resolver: os.environ["PIP_EXISTS_ACTION"] = "i" dist = None @@ -619,38 +603,38 @@ def get_finder(sources=None, pip_command=None, pip_options=None): """ if not pip_command: - pip_command = get_pip_command() + pip_command = pip_shims.shims.InstallCommand() if not sources: sources = [{"url": "https://pypi.org/simple", "name": "pypi", "verify_ssl": True}] if not pip_options: pip_options = get_pip_options(sources=sources, pip_command=pip_command) session = pip_command._build_session(pip_options) atexit.register(session.close) - finder = pip_shims.shims.PackageFinder( - find_links=[], - index_urls=[s.get("url") for s in sources], - trusted_hosts=[], - allow_all_prereleases=pip_options.pre, - session=session, + finder = pip_shims.shims.get_package_finder( + pip_shims.shims.InstallCommand(), options=pip_options, session=session ) - return finder + return session, finder @contextlib.contextmanager -def start_resolver(finder=None, wheel_cache=None): +def start_resolver(finder=None, session=None, wheel_cache=None): """Context manager to produce a resolver. :param finder: A package finder to use for searching the index :type finder: :class:`~pip._internal.index.PackageFinder` + :param :class:`~requests.Session` session: A session instance + :param :class:`~pip._internal.cache.WheelCache` wheel_cache: A pip WheelCache instance :return: A 3-tuple of finder, preparer, resolver :rtype: (:class:`~pip._internal.operations.prepare.RequirementPreparer`, :class:`~pip._internal.resolve.Resolver`) """ pip_command = get_pip_command() pip_options = get_pip_options(pip_command=pip_command) - + session = None if not finder: - finder = get_finder(pip_command=pip_command, pip_options=pip_options) + session, finder = get_finder(pip_command=pip_command, pip_options=pip_options) + if not session: + session = pip_command._build_session(pip_options) if not wheel_cache: wheel_cache = WHEEL_CACHE @@ -661,40 +645,37 @@ def start_resolver(finder=None, wheel_cache=None): _build_dir = create_tracked_tempdir(fs_str("build")) _source_dir = create_tracked_tempdir(fs_str("source")) - preparer = partialclass( - pip_shims.shims.RequirementPreparer, - build_dir=_build_dir, - src_dir=_source_dir, - download_dir=download_dir, - wheel_download_dir=WHEEL_DOWNLOAD_DIR, - progress_bar="off", - build_isolation=False, - ) - resolver = partialclass( - pip_shims.shims.Resolver, - finder=finder, - session=finder.session, - upgrade_strategy="to-satisfy-only", - force_reinstall=True, - ignore_dependencies=False, - ignore_requires_python=True, - ignore_installed=True, - isolated=False, - wheel_cache=wheel_cache, - use_user_site=False, - ) try: - if packaging.version.parse( - pip_shims.shims.pip_version - ) >= packaging.version.parse("18"): - with pip_shims.shims.RequirementTracker() as req_tracker: - preparer = preparer(req_tracker=req_tracker) - yield resolver(preparer=preparer) - else: - preparer = preparer() - yield resolver(preparer=preparer) + with pip_shims.shims.make_preparer( + options=pip_options, + finder=finder, + session=session, + build_dir=_build_dir, + src_dir=_source_dir, + download_dir=download_dir, + wheel_download_dir=WHEEL_DOWNLOAD_DIR, + progress_bar="off", + build_isolation=False, + install_cmd=pip_command, + ) as preparer: + resolver = pip_shims.shims.get_resolver( + finder=finder, + ignore_dependencies=False, + ignore_requires_python=True, + preparer=preparer, + session=session, + options=pip_options, + install_cmd=pip_command, + wheel_cache=wheel_cache, + force_reinstall=True, + ignore_installed=True, + upgrade_strategy="to-satisfy-only", + isolated=False, + use_user_site=False, + ) + yield resolver finally: - finder.session.close() + session.close() def get_grouped_dependencies(constraints): diff --git a/pipenv/vendor/requirementslib/models/lockfile.py b/pipenv/vendor/requirementslib/models/lockfile.py index 42248868af..3eabc5043f 100644 --- a/pipenv/vendor/requirementslib/models/lockfile.py +++ b/pipenv/vendor/requirementslib/models/lockfile.py @@ -2,23 +2,21 @@ from __future__ import absolute_import, print_function import copy +import itertools import os import attr -import itertools import plette.lockfiles import six +from vistir.compat import FileNotFoundError, JSONDecodeError, Path -from vistir.compat import Path, FileNotFoundError, JSONDecodeError - +from ..exceptions import LockfileCorruptException, MissingParameter, PipfileNotFound +from ..utils import is_editable, is_vcs, merge_items from .project import ProjectFile from .requirements import Requirement - from .utils import optional_instance_of -from ..exceptions import LockfileCorruptException, PipfileNotFound, MissingParameter -from ..utils import is_vcs, is_editable, merge_items -DEFAULT_NEWLINES = u"\n" +DEFAULT_NEWLINES = six.text_type("\n") def preferred_newlines(f): @@ -42,7 +40,7 @@ class Lockfile(object): @path.default def _get_path(self): - return Path(os.curdir).absolute() + return Path(os.curdir).joinpath("Pipfile.lock").absolute() @projectfile.default def _get_projectfile(self): @@ -50,7 +48,7 @@ def _get_projectfile(self): @_lockfile.default def _get_lockfile(self): - return self.projectfile.lockfile + return self.projectfile.model @property def lockfile(self): @@ -127,16 +125,13 @@ def read_projectfile(cls, path): :rtype: :class:`~requirementslib.models.project.ProjectFile` """ - pf = ProjectFile.read( - path, - plette.lockfiles.Lockfile, - invalid_ok=True - ) + pf = ProjectFile.read(path, plette.lockfiles.Lockfile, invalid_ok=True) return pf @classmethod def lockfile_from_pipfile(cls, pipfile_path): from .pipfile import Pipfile + if os.path.isfile(pipfile_path): if not os.path.isabs(pipfile_path): pipfile_path = os.path.abspath(pipfile_path) @@ -164,7 +159,9 @@ def load_projectfile(cls, path, create=True, data=None): if not project_path.exists(): raise OSError("Project does not exist: %s" % project_path.as_posix()) elif not lockfile_path.exists() and not create: - raise FileNotFoundError("Lockfile does not exist: %s" % lockfile_path.as_posix()) + raise FileNotFoundError( + "Lockfile does not exist: %s" % lockfile_path.as_posix() + ) projectfile = cls.read_projectfile(lockfile_path.as_posix()) if not lockfile_path.exists(): if not data: @@ -207,10 +204,14 @@ def from_data(cls, path, data, meta_from_project=True): lockfile.update(data) else: lockfile = plette.lockfiles.Lockfile(data) - projectfile = ProjectFile(line_ending=DEFAULT_NEWLINES, location=lockfile_path, model=lockfile) + projectfile = ProjectFile( + line_ending=DEFAULT_NEWLINES, location=lockfile_path, model=lockfile + ) return cls( - projectfile=projectfile, lockfile=lockfile, - newlines=projectfile.line_ending, path=Path(projectfile.location) + projectfile=projectfile, + lockfile=lockfile, + newlines=projectfile.line_ending, + path=Path(projectfile.location), ) @classmethod @@ -243,7 +244,7 @@ def load(cls, path, create=True): "projectfile": projectfile, "lockfile": projectfile.model, "newlines": projectfile.line_ending, - "path": lockfile_path + "path": lockfile_path, } return cls(**creation_args) @@ -300,9 +301,7 @@ def as_requirements(self, include_hashes=False, dev=False): lines = [] section = self.dev_requirements if dev else self.requirements for req in section: - kwargs = { - "include_hashes": include_hashes, - } + kwargs = {"include_hashes": include_hashes} if req.editable: kwargs["include_markers"] = False r = req.as_line(**kwargs) diff --git a/pipenv/vendor/requirementslib/models/markers.py b/pipenv/vendor/requirementslib/models/markers.py index fc85fbdd51..c7649b77d6 100644 --- a/pipenv/vendor/requirementslib/models/markers.py +++ b/pipenv/vendor/requirementslib/models/markers.py @@ -635,7 +635,7 @@ def format_pyversion(parts): def normalize_marker_str(marker): - # type: (Union[Marker, STRING_TYPE]) + # type: (Union[Marker, STRING_TYPE]) -> str marker_str = "" if not marker: return None diff --git a/pipenv/vendor/requirementslib/models/pipfile.py b/pipenv/vendor/requirementslib/models/pipfile.py index e55ad741a6..db1f5ddef2 100644 --- a/pipenv/vendor/requirementslib/models/pipfile.py +++ b/pipenv/vendor/requirementslib/models/pipfile.py @@ -3,6 +3,7 @@ from __future__ import absolute_import, print_function, unicode_literals import copy +import itertools import os import sys @@ -12,12 +13,12 @@ import tomlkit from vistir.compat import FileNotFoundError, Path -from .project import ProjectFile -from .requirements import Requirement -from .utils import get_url_name, optional_instance_of, tomlkit_value_to_python from ..environment import MYPY_RUNNING from ..exceptions import RequirementError from ..utils import is_editable, is_vcs, merge_items +from .project import ProjectFile +from .requirements import Requirement +from .utils import get_url_name, optional_instance_of, tomlkit_value_to_python if MYPY_RUNNING: from typing import Union, Any, Dict, Iterable, Mapping, List, Text @@ -29,83 +30,61 @@ lockfile_type = Dict[Text, Union[package_type, meta_type]] -# Let's start by patching plette to make sure we can validate data without being broken -try: - import cerberus -except ImportError: - cerberus = None - -VALIDATORS = plette.models.base.VALIDATORS - - -def patch_plette(): - # type: () -> None - - global VALIDATORS - - def validate(cls, data): - # type: (Any, Dict[Text, Any]) -> None - if not cerberus: # Skip validation if Cerberus is not available. - return - schema = cls.__SCHEMA__ - key = id(schema) - try: - v = VALIDATORS[key] - except KeyError: - v = VALIDATORS[key] = cerberus.Validator(schema, allow_unknown=True) - if v.validate(dict(data), normalize=False): - return - raise plette.models.base.ValidationError(data, v) - - names = ["plette.models.base", plette.models.base.__name__] - names = [name for name in names if name in sys.modules] - for name in names: - if name in sys.modules: - module = sys.modules[name] - else: - module = plette.models.base - original_fn = getattr(module, "validate") - for key in ["__qualname__", "__name__", "__module__"]: - original_val = getattr(original_fn, key, None) - if original_val is not None: - setattr(validate, key, original_val) - setattr(module, "validate", validate) - sys.modules[name] = module - - -patch_plette() - - is_pipfile = optional_instance_of(plette.pipfiles.Pipfile) is_path = optional_instance_of(Path) is_projectfile = optional_instance_of(ProjectFile) def reorder_source_keys(data): - # type: ignore - sources = data["source"] # type: sources_type - for i, entry in enumerate(sources): - table = tomlkit.table() # type: Mapping + # type: (tomlkit.toml_document.TOMLDocument) -> tomlkit.toml_document.TOMLDocument + sources = [] # type: sources_type + for source_key in ["source", "sources"]: + sources.extend(data.get(source_key, tomlkit.aot()).value) + new_source_aot = tomlkit.aot() + for entry in sources: + table = tomlkit.table() # type: tomlkit.items.Table source_entry = PipfileLoader.populate_source(entry.copy()) - table["name"] = source_entry["name"] - table["url"] = source_entry["url"] - table["verify_ssl"] = source_entry["verify_ssl"] - data["source"][i] = table + for key in ["name", "url", "verify_ssl"]: + table.update({key: source_entry[key]}) + new_source_aot.append(table) + data["source"] = new_source_aot + if data.get("sources", None): + del data["sources"] return data class PipfileLoader(plette.pipfiles.Pipfile): @classmethod def validate(cls, data): - # type: (Dict[Text, Any]) -> None + # type: (tomlkit.toml_document.TOMLDocument) -> None for key, klass in plette.pipfiles.PIPFILE_SECTIONS.items(): - if key not in data or key == "source": + if key not in data or key == "sources": continue try: klass.validate(data[key]) except Exception: pass + @classmethod + def ensure_package_sections(cls, data): + # type: (tomlkit.toml_document.TOMLDocument[Text, Any]) -> tomlkit.toml_document.TOMLDocument[Text, Any] + """ + Ensure that all pipfile package sections are present in the given toml document + + :param :class:`~tomlkit.toml_document.TOMLDocument` data: The toml document to + ensure package sections are present on + :return: The updated toml document, ensuring ``packages`` and ``dev-packages`` + sections are present + :rtype: :class:`~tomlkit.toml_document.TOMLDocument` + """ + package_keys = ( + k for k in plette.pipfiles.PIPFILE_SECTIONS.keys() if k.endswith("packages") + ) + for key in package_keys: + if key not in data: + data.update({key: tomlkit.table()}) + return data + @classmethod def populate_source(cls, source): """Derive missing values of source from the existing fields.""" @@ -115,7 +94,7 @@ def populate_source(cls, source): if "verify_ssl" not in source: source["verify_ssl"] = "https://" in source["url"] if not isinstance(source["verify_ssl"], bool): - source["verify_ssl"] = source["verify_ssl"].lower() == "true" + source["verify_ssl"] = str(source["verify_ssl"]).lower() == "true" return source @classmethod @@ -125,11 +104,10 @@ def load(cls, f, encoding=None): if encoding is not None: content = content.decode(encoding) _data = tomlkit.loads(content) - _data["source"] = _data.get("source", []) + _data.get("sources", []) + should_reload = "source" not in _data _data = reorder_source_keys(_data) - if "source" not in _data: + if should_reload: if "sources" in _data: - _data["source"] = _data["sources"] content = tomlkit.dumps(_data) else: # HACK: There is no good way to prepend a section to an existing @@ -139,10 +117,19 @@ def load(cls, f, encoding=None): sep = "" if content.startswith("\n") else "\n" content = plette.pipfiles.DEFAULT_SOURCE_TOML + sep + content data = tomlkit.loads(content) + data = cls.ensure_package_sections(data) instance = cls(data) instance._data = dict(instance._data) return instance + def __contains__(self, key): + # type: (Text) -> bool + if key not in self._data: + package_keys = self._data.get("packages", {}).keys() + dev_package_keys = self._data.get("dev-packages", {}).keys() + return any(key in pkg_list for pkg_list in (package_keys, dev_package_keys)) + return True + def __getattribute__(self, key): # type: (Text) -> Any if key == "source": @@ -177,6 +164,15 @@ def _get_pipfile(self): # type: () -> Union[plette.pipfiles.Pipfile, PipfileLoader] return self.projectfile.model + @property + def extended_keys(self): + return [ + k + for k in itertools.product( + ("packages", "dev-packages"), ("", "vcs", "editable") + ) + ] + @property def pipfile(self): # type: () -> Union[PipfileLoader, plette.pipfiles.Pipfile] @@ -186,11 +182,11 @@ def get_deps(self, dev=False, only=True): # type: (bool, bool) -> Dict[Text, Dict[Text, Union[List[Text], Text]]] deps = {} # type: Dict[Text, Dict[Text, Union[List[Text], Text]]] if dev: - deps.update(self.pipfile._data["dev-packages"]) + deps.update(dict(self.pipfile._data.get("dev-packages", {}))) if only: return deps return tomlkit_value_to_python( - merge_items([deps, self.pipfile._data["packages"]]) + merge_items([deps, dict(self.pipfile._data.get("packages", {}))]) ) def get(self, k): @@ -357,22 +353,28 @@ def _read_pyproject(self): # type: () -> None pyproject = self.path.parent.joinpath("pyproject.toml") if pyproject.exists(): - self._pyproject = tomlkit.load(pyproject) + self._pyproject = tomlkit.loads(pyproject.read_text()) build_system = self._pyproject.get("build-system", None) - if not os.path.exists(self.path_to("setup.py")): - if not build_system or not build_system.get("requires"): - build_system = { - "requires": ["setuptools>=40.8", "wheel"], - "build-backend": "setuptools.build_meta:__legacy__", - } - self._build_system = build_system + if build_system and not build_system.get("build_backend"): + build_system["build-backend"] = "setuptools.build_meta:__legacy__" + elif not build_system or not build_system.get("requires"): + build_system = { + "requires": ["setuptools>=40.8", "wheel"], + "build-backend": "setuptools.build_meta:__legacy__", + } + self.build_system = build_system @property def build_requires(self): # type: () -> List[Text] + if not self.build_system: + self._read_pyproject() return self.build_system.get("requires", []) @property def build_backend(self): # type: () -> Text + pyproject = self.path.parent.joinpath("pyproject.toml") + if not self.build_system: + self._read_pyproject() return self.build_system.get("build-backend", None) diff --git a/pipenv/vendor/requirementslib/models/requirements.py b/pipenv/vendor/requirementslib/models/requirements.py index dc76661999..0546e9ed57 100644 --- a/pipenv/vendor/requirementslib/models/requirements.py +++ b/pipenv/vendor/requirementslib/models/requirements.py @@ -27,7 +27,7 @@ from packaging.utils import canonicalize_name from six.moves.urllib import parse as urllib_parse from six.moves.urllib.parse import unquote -from vistir.compat import FileNotFoundError, Mapping, Path, lru_cache +from vistir.compat import FileNotFoundError, Path, lru_cache from vistir.contextmanagers import temp_path from vistir.misc import dedup from vistir.path import ( @@ -39,6 +39,17 @@ normalize_path, ) +from ..environment import MYPY_RUNNING +from ..exceptions import RequirementError +from ..utils import ( + VCS_LIST, + add_ssh_scheme_to_git_uri, + get_setup_paths, + is_installable_dir, + is_installable_file, + is_vcs, + strip_ssh_from_git_uri, +) from .markers import ( cleanup_pyspecs, contains_pyversion, @@ -72,6 +83,7 @@ make_install_requirement, normalize_name, parse_extras, + read_source, specs_to_string, split_markers_from_line, split_ref_from_uri, @@ -80,17 +92,6 @@ validate_specifiers, validate_vcs, ) -from ..environment import MYPY_RUNNING -from ..exceptions import RequirementError -from ..utils import ( - VCS_LIST, - add_ssh_scheme_to_git_uri, - get_setup_paths, - is_installable_dir, - is_installable_file, - is_vcs, - strip_ssh_from_git_uri, -) if MYPY_RUNNING: from typing import ( @@ -548,11 +549,11 @@ def pyproject_backend(self): return self._pyproject_backend def parse_hashes(self): - # type: () -> None + # type: () -> "Line" """ Parse hashes from *self.line* and set them on the current object. - :returns: Nothing - :rtype: None + :returns: Self + :rtype: `:class:~Line` """ line, hashes = self.split_hashes(self.line) self.hashes = hashes @@ -560,11 +561,11 @@ def parse_hashes(self): return self def parse_extras(self): - # type: () -> None + # type: () -> "Line" """ Parse extras from *self.line* and set them on the current object - :returns: Nothing - :rtype: None + :returns: self + :rtype: :class:`~Line` """ extras = None if "@" in self.line or self.is_vcs or self.is_url: @@ -694,9 +695,10 @@ def is_wheel(self): @property def is_artifact(self): # type: () -> bool + if self.link is None: return False - return self.link.is_artifact + return not self.link.is_vcs @property def is_vcs(self): @@ -862,8 +864,10 @@ def metadata(self): def parsed_setup_cfg(self): # type: () -> Dict[Any, Any] if self.is_local and self.path and is_installable_dir(self.path): + setup_content = read_source(self.setup_cfg) + base_dir = os.path.dirname(os.path.abspath(self.setup_cfg)) if self.setup_cfg: - return parse_setup_cfg(self.setup_cfg) + return parse_setup_cfg(setup_content, base_dir) return {} @cached_property @@ -881,9 +885,7 @@ def vcsrepo(self, repo): ireq = self.ireq wheel_kwargs = self.wheel_kwargs.copy() wheel_kwargs["src_dir"] = repo.checkout_directory - ireq.source_dir = wheel_kwargs["src_dir"] - ireq.build_location(wheel_kwargs["build_dir"]) - ireq._temp_build_dir.path = wheel_kwargs["build_dir"] + ireq.ensure_has_source_dir(wheel_kwargs["src_dir"]) with temp_path(): sys.path = [repo.checkout_directory, "", ".", get_python_lib(plat_specific=0)] setupinfo = SetupInfo.create( @@ -1686,172 +1688,6 @@ def formatted_path(self): return path.as_posix() return None - @classmethod - def create( - cls, - path=None, # type: Optional[STRING_TYPE] - uri=None, # type: STRING_TYPE - editable=False, # type: bool - extras=None, # type: Optional[Tuple[STRING_TYPE, ...]] - link=None, # type: Link - vcs_type=None, # type: Optional[Any] - name=None, # type: Optional[STRING_TYPE] - req=None, # type: Optional[Any] - line=None, # type: Optional[STRING_TYPE] - uri_scheme=None, # type: STRING_TYPE - setup_path=None, # type: Optional[Any] - relpath=None, # type: Optional[Any] - parsed_line=None, # type: Optional[Line] - ): - # type: (...) -> F - if parsed_line is None and line is not None: - parsed_line = Line(line) - if relpath and not path: - path = relpath - if not path and uri and link is not None and link.scheme == "file": - path = os.path.abspath( - pip_shims.shims.url_to_path(unquote(uri)) - ) # type: ignore - try: - path = get_converted_relative_path(path) - except ValueError: # Vistir raises a ValueError if it can't make a relpath - path = path - if line is not None and not (uri_scheme and uri and link): - vcs_type, uri_scheme, relpath, path, uri, link = cls.get_link_from_line(line) - if not uri_scheme: - uri_scheme = "path" if path else "file" - if path and not uri: - uri = unquote( - pip_shims.shims.path_to_url(os.path.abspath(path)) - ) # type: ignore - if not link and uri: - link = cls.get_link_from_line(uri).link - if not uri and link: - uri = unquote(link.url_without_fragment) - if not extras: - extras = () - pyproject_path = None - pyproject_requires = None - pyproject_backend = None - pyproject_tuple = None # type: Optional[Tuple[STRING_TYPE]] - if path is not None: - pyproject_requires_and_backend = get_pyproject(path) - if pyproject_requires_and_backend is not None: - pyproject_requires, pyproject_backend = pyproject_requires_and_backend - if path: - setup_paths = get_setup_paths(path) - if isinstance(setup_paths, Mapping): - if "pyproject_toml" in setup_paths and setup_paths["pyproject_toml"]: - pyproject_path = Path(setup_paths["pyproject_toml"]) - if "setup_py" in setup_paths and setup_paths["setup_py"]: - setup_path = Path(setup_paths["setup_py"]).as_posix() - if setup_path and isinstance(setup_path, Path): - setup_path = setup_path.as_posix() - creation_kwargs = { - "editable": editable, - "extras": extras, - "pyproject_path": pyproject_path, - "setup_path": setup_path, - "uri_scheme": uri_scheme, - "link": link, - "uri": uri, - "pyproject_requires": pyproject_tuple, - "pyproject_backend": pyproject_backend, - "path": path or relpath, - "parsed_line": parsed_line, - } - if vcs_type: - creation_kwargs["vcs"] = vcs_type - if name: - creation_kwargs["name"] = name - _line = None # type: Optional[STRING_TYPE] - ireq = None # type: Optional[InstallRequirement] - setup_info = None # type: Optional[SetupInfo] - if parsed_line: - if parsed_line.name: - name = parsed_line.name - if parsed_line.setup_info: - name = parsed_line.setup_info.as_dict().get("name", name) - if not name or not parsed_line: - if link is not None and link.url_without_fragment is not None: - _line = unquote(link.url_without_fragment) - if name: - _line = "{0}#egg={1}".format(_line, name) - if _line and extras and extras_to_string(extras) not in _line: - _line = "{0}[{1}]".format( - _line, ",".join(sorted(set(extras))) - ) # type: ignore - elif isinstance(uri, six.string_types): - _line = unquote(uri) - elif line: - _line = unquote(line) - if editable: - if ( - _line - and extras - and extras_to_string(extras) not in _line - and ( - (link and link.scheme == "file") - or (uri and uri.startswith("file")) - or (not uri and not link) - ) - ): - _line = "{0}[{1}]".format( - _line, ",".join(sorted(set(extras))) - ) # type: ignore - if ireq is None: - ireq = pip_shims.shims.install_req_from_editable( - _line - ) # type: ignore - else: - _line = path if (uri_scheme and uri_scheme == "path") else _line - if _line and extras and extras_to_string(extras) not in _line: - _line = "{0}[{1}]".format( - _line, ",".join(sorted(set(extras))) - ) # type: ignore - if ireq is None: - ireq = pip_shims.shims.install_req_from_line(_line) # type: ignore - if editable: - _line = "-e {0}".format(editable) - if _line: - parsed_line = Line(_line) - if ireq is None and parsed_line and parsed_line.ireq: - ireq = parsed_line.ireq - if extras and ireq is not None and not ireq.extras: - ireq.extras = set(extras) - if setup_info is None: - setup_info = SetupInfo.from_ireq(ireq) - setupinfo_dict = setup_info.as_dict() - setup_name = setupinfo_dict.get("name", None) - build_requires = () # type: Tuple[STRING_TYPE, ...] - build_backend = "" - if setup_name is not None: - name = setup_name - build_requires = setupinfo_dict.get("build_requires", build_requires) - build_backend = setupinfo_dict.get("build_backend", build_backend) - if "pyproject_requires" not in creation_kwargs and build_requires: - creation_kwargs["pyproject_requires"] = tuple(build_requires) - if "pyproject_backend" not in creation_kwargs and build_backend: - creation_kwargs["pyproject_backend"] = build_backend - if setup_info is None and parsed_line and parsed_line.setup_info: - setup_info = parsed_line.setup_info - creation_kwargs["setup_info"] = setup_info - if path or relpath: - creation_kwargs["path"] = relpath if relpath else path - if req is not None: - creation_kwargs["req"] = req - creation_req = creation_kwargs.get("req") - if creation_kwargs.get("req") is not None: - creation_req_line = getattr(creation_req, "line", None) - if creation_req_line is None and line is not None: - creation_kwargs["req"].line = line # type: ignore - if parsed_line and parsed_line.name: - if name and len(parsed_line.name) != 7 and len(name) == 7: - name = parsed_line.name - if name: - creation_kwargs["name"] = name - return cls(**creation_kwargs) # type: ignore - @classmethod def from_line(cls, line, editable=None, extras=None, parsed_line=None): # type: (AnyStr, Optional[bool], Optional[Tuple[AnyStr, ...]], Optional[Line]) -> F @@ -1952,11 +1788,12 @@ def line_part(self): seed = None # type: Optional[STRING_TYPE] if self.link is not None: link_url = unquote(self.link.url_without_fragment) + is_vcs = getattr(self.link, "is_vcs", not self.link.is_artifact) if self._uri_scheme and self._uri_scheme == "path": # We may need any one of these for passing to pip seed = self.path or link_url or self.uri elif (self._uri_scheme and self._uri_scheme == "file") or ( - (self.link.is_artifact or self.link.is_wheel) and self.link.url + (self.link.is_wheel or not is_vcs) and self.link.url ): seed = link_url or self.uri # add egg fragments to remote artifacts (valid urls only) @@ -1998,6 +1835,9 @@ def pipfile_part(self): collision_order = ["file", "uri", "path"] # type: List[STRING_TYPE] collisions = [] # type: List[STRING_TYPE] key_match = next(iter(k for k in collision_order if k in pipfile_dict.keys())) + is_vcs = None + if self.link is not None: + is_vcs = getattr(self.link, "is_vcs", not self.link.is_artifact) if self._uri_scheme: dict_key = self._uri_scheme target_key = dict_key if dict_key in pipfile_dict else key_match @@ -2009,7 +1849,7 @@ def pipfile_part(self): pipfile_dict[dict_key] = winning_value elif ( self.is_remote_artifact - or (self.link is not None and self.link.is_artifact) + or (is_vcs is not None and not is_vcs) and (self._uri_scheme and self._uri_scheme == "file") ): dict_key = "file" @@ -3100,7 +2940,7 @@ def find_all_matches(self, sources=None, finder=None): from .dependencies import get_finder, find_all_matches if not finder: - finder = get_finder(sources=sources) + _, finder = get_finder(sources=sources) return find_all_matches(finder, self.as_ireq()) def run_requires(self, sources=None, finder=None): @@ -3167,6 +3007,9 @@ def file_req_from_parsed_line(parsed_line): pyproject_requires = None # type: Optional[Tuple[STRING_TYPE, ...]] if parsed_line.pyproject_requires is not None: pyproject_requires = tuple(parsed_line.pyproject_requires) + pyproject_path = ( + Path(parsed_line.pyproject_toml) if parsed_line.pyproject_toml else None + ) req_dict = { "setup_path": parsed_line.setup_py, "path": path, @@ -3177,9 +3020,7 @@ def file_req_from_parsed_line(parsed_line): "uri": parsed_line.uri, "pyproject_requires": pyproject_requires, "pyproject_backend": parsed_line.pyproject_backend, - "pyproject_path": Path(parsed_line.pyproject_toml) - if parsed_line.pyproject_toml - else None, + "pyproject_path": pyproject_path, "parsed_line": parsed_line, "req": parsed_line.requirement, } diff --git a/pipenv/vendor/requirementslib/models/resolvers.py b/pipenv/vendor/requirementslib/models/resolvers.py index bd773ba64f..43590523d1 100644 --- a/pipenv/vendor/requirementslib/models/resolvers.py +++ b/pipenv/vendor/requirementslib/models/resolvers.py @@ -3,7 +3,6 @@ import attr import six - from pip_shims.shims import Wheel from .cache import HashCache @@ -40,15 +39,16 @@ class DependencyResolver(object): def create(cls, finder=None, allow_prereleases=False, get_all_hashes=True): if not finder: from .dependencies import get_finder + finder_args = [] if allow_prereleases: - finder_args.append('--pre') + finder_args.append("--pre") finder = get_finder(*finder_args) creation_kwargs = { - 'allow_prereleases': allow_prereleases, - 'include_incompatible_hashes': get_all_hashes, - 'finder': finder, - 'hash_cache': HashCache(), + "allow_prereleases": allow_prereleases, + "include_incompatible_hashes": get_all_hashes, + "finder": finder, + "hash_cache": HashCache(), } resolver = cls(**creation_kwargs) return resolver @@ -75,9 +75,9 @@ def add_abstract_dep(self, dep): compatible_versions = self.dep_dict[dep.name].compatible_versions(dep) if compatible_versions: self.candidate_dict[dep.name] = compatible_versions - self.dep_dict[dep.name] = self.dep_dict[ - dep.name - ].compatible_abstract_dep(dep) + self.dep_dict[dep.name] = self.dep_dict[dep.name].compatible_abstract_dep( + dep + ) else: raise ResolutionError else: @@ -103,8 +103,10 @@ def pin_deps(self): old_version = version_from_ireq(self.pinned_deps[name]) if not pin.editable: new_version = version_from_ireq(pin) - if (new_version != old_version and - new_version not in self.candidate_dict[name]): + if ( + new_version != old_version + and new_version not in self.candidate_dict[name] + ): continue pin.parent = abs_dep.parent pin_subdeps = self.dep_dict[name].get_deps(pin) @@ -141,6 +143,7 @@ def resolve(self, root_nodes, max_rounds=20): # We accept str, Requirement, and AbstractDependency as input. from .dependencies import AbstractDependency from ..utils import log + for dep in root_nodes: if isinstance(dep, six.string_types): dep = AbstractDependency.from_string(dep) @@ -185,32 +188,40 @@ def get_hashes(self): def get_hashes_for_one(self, ireq): if not self.finder: from .dependencies import get_finder + finder_args = [] if self.allow_prereleases: - finder_args.append('--pre') + finder_args.append("--pre") self.finder = get_finder(*finder_args) if ireq.editable: return set() from pip_shims import VcsSupport + vcs = VcsSupport() - if ireq.link and ireq.link.scheme in vcs.all_schemes and 'ssh' in ireq.link.scheme: + if ( + ireq.link + and ireq.link.scheme in vcs.all_schemes + and "ssh" in ireq.link.scheme + ): return set() if not is_pinned_requirement(ireq): - raise TypeError( - "Expected pinned requirement, got {}".format(ireq)) + raise TypeError("Expected pinned requirement, got {}".format(ireq)) matching_candidates = set() with self.allow_all_wheels(): from .dependencies import find_all_matches - matching_candidates = ( - find_all_matches(self.finder, ireq, pre=self.allow_prereleases) + + matching_candidates = find_all_matches( + self.finder, ireq, pre=self.allow_prereleases ) return { - self.hash_cache.get_hash(candidate.location) + self.hash_cache.get_hash( + getattr(candidate, "location", getattr(candidate, "link", None)) + ) for candidate in matching_candidates } @@ -222,6 +233,7 @@ def allow_all_wheels(self): This also saves the candidate cache and set a new one, or else the results from the previous non-patched calls will interfere. """ + def _wheel_supported(self, tags=None): # Ignore current platform. Support everything. return True diff --git a/pipenv/vendor/requirementslib/models/setup_info.py b/pipenv/vendor/requirementslib/models/setup_info.py index 3bee49ea98..10bf3a18b4 100644 --- a/pipenv/vendor/requirementslib/models/setup_info.py +++ b/pipenv/vendor/requirementslib/models/setup_info.py @@ -225,10 +225,11 @@ def get_package_dir_from_setupcfg(parser, base_dir=None): package_dir = base_dir else: package_dir = os.getcwd() - if parser.has_option("options", "packages.find"): - pkg_dir = parser.get("options", "packages.find") - if isinstance(package_dir, Mapping): - package_dir = os.path.join(package_dir, pkg_dir.get("where")) + if parser.has_option("options.packages.find", "where"): + pkg_dir = parser.get("options.packages.find", "where") + if isinstance(pkg_dir, Mapping): + pkg_dir = pkg_dir.get("where") + package_dir = os.path.join(package_dir, pkg_dir) elif parser.has_option("options", "packages"): pkg_dir = parser.get("options", "packages") if "find:" in pkg_dir: @@ -279,14 +280,8 @@ def get_extras_from_setupcfg(parser): return extras -def parse_setup_cfg(setup_cfg_path): - # type: (S) -> Dict[S, Union[S, None, Set[BaseRequirement], List[S], Dict[STRING_TYPE, Tuple[BaseRequirement]]]] - if not os.path.exists(setup_cfg_path): - raise FileNotFoundError(setup_cfg_path) - try: - return setuptools_parse_setup_cfg(setup_cfg_path) - except Exception: - pass +def parse_setup_cfg(setup_cfg_contents, base_dir): + # type: (S, S) -> Dict[S, Union[S, None, Set[BaseRequirement], List[S], Dict[STRING_TYPE, Tuple[BaseRequirement]]]] default_opts = { "metadata": {"name": "", "version": ""}, "options": { @@ -299,9 +294,8 @@ def parse_setup_cfg(setup_cfg_path): }, } parser = configparser.ConfigParser(default_opts) - parser.read(setup_cfg_path) + parser.read_string(setup_cfg_contents) results = {} - base_dir = os.path.dirname(os.path.abspath(setup_cfg_path)) package_dir = get_package_dir_from_setupcfg(parser, base_dir=base_dir) name, version = get_name_and_version_from_setupcfg(parser, package_dir) results["name"] = name @@ -408,14 +402,11 @@ def _prepare_wheel_building_kwargs( wheel_download_dir = os.path.join(CACHE_DIR, "wheels") # type: STRING_TYPE mkdir_p(wheel_download_dir) - if src_dir is None: if editable and src_root is not None: src_dir = src_root - elif ireq is None and src_root is not None and not editable: + elif src_root is not None: src_dir = _get_src_dir(root=src_root) # type: STRING_TYPE - elif ireq is not None and ireq.editable and src_root is not None: - src_dir = _get_src_dir(root=src_root) else: src_dir = create_tracked_tempdir(prefix="reqlib-src") @@ -1033,11 +1024,6 @@ def version(self): self._version = info.get("version", None) return self._version - @classmethod - def get_setup_cfg(cls, setup_cfg_path): - # type: (S) -> Dict[S, Union[S, None, Set[BaseRequirement], List[S], Tuple[S, Tuple[BaseRequirement]]]] - return parse_setup_cfg(setup_cfg_path) - @property def egg_base(self): # type: () -> S @@ -1120,7 +1106,12 @@ def get_extras_from_ireq(self): def parse_setup_cfg(self): # type: () -> Dict[STRING_TYPE, Any] if self.setup_cfg is not None and self.setup_cfg.exists(): - parsed = self.get_setup_cfg(self.setup_cfg.as_posix()) + contents = self.setup_cfg.read_text() + base_dir = self.setup_cfg.absolute().parent.as_posix() + try: + parsed = setuptools_parse_setup_cfg(self.setup_cfg.as_posix()) + except Exception: + parsed = parse_setup_cfg(contents, base_dir) if not parsed: return {} return parsed @@ -1183,13 +1174,15 @@ def build_wheel(self): if not self.pyproject.exists(): build_requires = ", ".join(['"{0}"'.format(r) for r in self.build_requires]) self.pyproject.write_text( - u""" + six.text_type( + """ [build-system] requires = [{0}] build-backend = "{1}" - """.format( - build_requires, self.build_backend - ).strip() + """.format( + build_requires, self.build_backend + ).strip() + ) ) return build_pep517( self.base_dir, @@ -1209,13 +1202,15 @@ def build_sdist(self): ['"{0}"'.format(r) for r in self.build_requires] ) self.pyproject.write_text( - u""" + six.text_type( + """ [build-system] requires = [{0}] build-backend = "{1}" - """.format( - build_requires, self.build_backend - ).strip() + """.format( + build_requires, self.build_backend + ).strip() + ) ) return build_pep517( self.base_dir, @@ -1434,8 +1429,8 @@ def from_requirement(cls, requirement, finder=None): @classmethod @lru_cache() - def from_ireq(cls, ireq, subdir=None, finder=None): - # type: (InstallRequirement, Optional[AnyStr], Optional[PackageFinder]) -> Optional[SetupInfo] + def from_ireq(cls, ireq, subdir=None, finder=None, session=None): + # type: (InstallRequirement, Optional[AnyStr], Optional[PackageFinder], Optional[requests.Session]) -> Optional[SetupInfo] import pip_shims.shims if not ireq.link: @@ -1445,7 +1440,7 @@ def from_ireq(cls, ireq, subdir=None, finder=None): if not finder: from .dependencies import get_finder - finder = get_finder() + session, finder = get_finder() _, uri = split_vcs_method_from_uri(unquote(ireq.link.url_without_fragment)) parsed = urlparse(uri) if "file" in parsed.scheme: @@ -1461,11 +1456,14 @@ def from_ireq(cls, ireq, subdir=None, finder=None): path = pip_shims.shims.url_to_path(uri) kwargs = _prepare_wheel_building_kwargs(ireq) ireq.source_dir = kwargs["src_dir"] - if not ( - ireq.editable - and pip_shims.shims.is_file_url(ireq.link) - and not ireq.link.is_artifact - ): + try: + is_vcs = ireq.link.is_vcs + except AttributeError: + try: + is_vcs = not ireq.link.is_artifact + except AttributeError: + is_vcs = False + if not (ireq.editable and pip_shims.shims.is_file_url(ireq.link) and is_vcs): if ireq.is_wheel: only_download = True download_dir = kwargs["wheel_download_dir"] @@ -1476,17 +1474,22 @@ def from_ireq(cls, ireq, subdir=None, finder=None): raise RequirementError( "The file URL points to a directory not installable: {}".format(ireq.link) ) - ireq.build_location(kwargs["build_dir"]) - src_dir = ireq.ensure_has_source_dir(kwargs["src_dir"]) - ireq._temp_build_dir.path = kwargs["build_dir"] + # this ensures the build dir is treated as the temporary build location + # and the source dir is treated as permanent / not deleted by pip + build_location_func = getattr(ireq, "build_location", None) + if build_location_func is None: + build_location_func = getattr(ireq, "ensure_build_location", None) + build_location_func(kwargs["build_dir"]) + ireq.ensure_has_source_dir(kwargs["src_dir"]) + src_dir = ireq.source_dir ireq.populate_link(finder, False, False) - pip_shims.shims.unpack_url( - ireq.link, - src_dir, - download_dir, + pip_shims.shims.shim_unpack( + link=ireq.link, + location=kwargs["src_dir"], + download_dir=download_dir, only_download=only_download, - session=finder.session, + session=session, hashes=ireq.hashes(False), progress_bar="off", ) diff --git a/pipenv/vendor/requirementslib/models/url.py b/pipenv/vendor/requirementslib/models/url.py index 889a4bdd87..4f8c010117 100644 --- a/pipenv/vendor/requirementslib/models/url.py +++ b/pipenv/vendor/requirementslib/models/url.py @@ -12,7 +12,7 @@ from ..environment import MYPY_RUNNING if MYPY_RUNNING: - from typing import List, Tuple, Text, Union, TypeVar, Optional + from typing import Dict, List, Optional, Text, Tuple, TypeVar, Union from pip_shims.shims import Link from vistir.compat import Path @@ -41,6 +41,8 @@ def _get_parsed_url(url): auth, _, url = url.rpartition("@") url = "{scheme}://{url}".format(scheme=scheme, url=url) parsed = urllib3_parse(url)._replace(auth=auth) + if parsed.auth and unquote_plus(parsed.auth) != parsed.auth: + return parsed._replace(auth=unquote_plus(parsed.auth)) return parsed @@ -62,7 +64,7 @@ def remove_password_from_url(url): return parsed.url -@attr.s +@attr.s(hash=True) class URI(object): #: The target hostname, e.g. `amazon.com` host = attr.ib(type=str) @@ -96,6 +98,8 @@ class URI(object): is_implicit_ssh = attr.ib(default=False, type=bool) _auth = attr.ib(default=None, type=str, repr=False) _fragment_dict = attr.ib(factory=dict, type=dict) + _username_is_quoted = attr.ib(type=bool, default=False) + _password_is_quoted = attr.ib(type=bool, default=False) def _parse_query(self): # type: () -> URI @@ -145,19 +149,37 @@ def _parse_auth(self): # type: () -> URI if self._auth: username, _, password = self._auth.partition(":") - password = quote_plus(password) - return attr.evolve(self, username=username, password=password) + username_is_quoted, password_is_quoted = False, False + quoted_username, quoted_password = "", "" + if password: + quoted_password = quote_plus(password) + password_is_quoted = quoted_password != password + if username: + quoted_username = quote_plus(username) + username_is_quoted = quoted_username != username + return attr.evolve( + self, + username=quoted_username, + password=quoted_password, + username_is_quoted=username_is_quoted, + password_is_quoted=password_is_quoted, + ) return self def get_password(self, unquote=False, include_token=True): # type: (bool, bool) -> str - password = self.password - if password and unquote: + password = self.password if self.password else "" + if password and unquote and self._password_is_quoted: password = unquote_plus(password) - else: - password = "" return password + def get_username(self, unquote=False): + # type: (bool) -> str + username = self.username if self.username else "" + if username and unquote and self._username_is_quoted: + username = unquote_plus(username) + return username + @staticmethod def parse_subdirectory(url_part): # type: (str) -> Tuple[str, Optional[str]] @@ -167,6 +189,22 @@ def parse_subdirectory(url_part): subdir = "&{0}".format(subdir.strip()) return url_part.strip(), subdir + @classmethod + def get_parsed_url(cls, url): + # if there is a "#" in the auth section, this could break url parsing + parsed_url = _get_parsed_url(url) + if "@" in url and "#" in url: + scheme = "{0}://".format(parsed_url.scheme) + if parsed_url.scheme == "file": + scheme = "{0}/".format(scheme) + url_without_scheme = url.replace(scheme, "") + maybe_auth, _, maybe_url = url_without_scheme.partition("@") + if "#" in maybe_auth and (not parsed_url.host or "." not in parsed_url.host): + new_parsed_url = _get_parsed_url("{0}{1}".format(scheme, maybe_url)) + new_parsed_url = new_parsed_url._replace(auth=maybe_auth) + return new_parsed_url + return parsed_url + @classmethod def parse(cls, url): # type: (S) -> URI @@ -187,8 +225,9 @@ def parse(cls, url): url, ref = split_ref_from_uri(url.strip()) if "file:/" in url and "file:///" not in url: url = url.replace("file:/", "file:///") - parsed = _get_parsed_url(url) - if not (parsed.scheme and parsed.host and parsed.path): + parsed = cls.get_parsed_url(url) + # if there is a "#" in the auth section, this could break url parsing + if not (parsed.scheme and parsed.host): # check if this is a file uri if not ( parsed.scheme @@ -199,24 +238,9 @@ def parse(cls, url): parsed_dict = dict(parsed._asdict()).copy() parsed_dict["is_direct_url"] = is_direct_url parsed_dict["is_implicit_ssh"] = is_implicit_ssh - if name_with_extras: - fragment = "" - if parsed_dict["fragment"] is not None: - fragment = "{0}".format(parsed_dict["fragment"]) - if fragment.startswith("egg="): - name, extras = pip_shims.shims._strip_extras(name_with_extras) - fragment_name, fragment_extras = pip_shims.shims._strip_extras(fragment) - if fragment_extras and not extras: - name_with_extras = "{0}{1}".format(name, fragment_extras) - fragment = "" - elif "&subdirectory" in parsed_dict["path"]: - path, fragment = cls.parse_subdirectory(parsed_dict["path"]) - parsed_dict["path"] = path - elif ref is not None and "&subdirectory" in ref: - ref, fragment = cls.parse_subdirectory(ref) - parsed_dict["fragment"] = "egg={0}{1}".format(name_with_extras, fragment) - if ref is not None: - parsed_dict["ref"] = ref.strip() + parsed_dict.update( + **update_url_name_and_fragment(name_with_extras, ref, parsed_dict) + ) # type: ignore return cls(**parsed_dict)._parse_auth()._parse_query()._parse_fragment() def to_string( @@ -249,15 +273,19 @@ def to_string( if direct is None: direct = self.is_direct_url if escape_password: - password = "----" if (self.password or self.username) else "" + password = "----" if self.password else "" + username = self.get_username(unquote=unquote) if password else "----" else: password = self.get_password(unquote=unquote) + username = self.get_username(unquote=unquote) auth = "" - if self.username: + if username: if password: - auth = "{self.username}:{password}@".format(password=password, self=self) + auth = "{username}:{password}@".format( + password=password, username=username + ) else: - auth = "{self.username}@".format(self=self) + auth = "{username}@".format(username=username) query = "" if self.query: query = "{query}?{self.query}".format(query=query, self=self) @@ -292,13 +320,25 @@ def to_string( def get_host_port_path(self, strip_ref=False): # type: (bool) -> str host = self.host if self.host else "" - if self.port: + if self.port is not None: host = "{host}:{self.port!s}".format(host=host, self=self) - path = "{self.path}".format(self=self) + path = "{self.path}".format(self=self) if self.path else "" if self.ref and not strip_ref: path = "{path}@{self.ref}".format(path=path, self=self) return "{host}{path}".format(host=host, path=path) + @property + def hidden_auth(self): + # type: () -> str + auth = "" + if self.username and self.password: + password = "****" + username = self.get_username(unquote=True) + auth = "{username}:{password}".format(username=username, password=password) + elif self.username and not self.password: + auth = "****" + return auth + @property def name_with_extras(self): # type: () -> str @@ -364,7 +404,10 @@ def url_without_ref(self): def base_url(self): # type: () -> str return self.to_string( - escape_password=False, strip_ssh=self.is_implicit_ssh, direct=False + escape_password=False, + strip_ssh=self.is_implicit_ssh, + direct=False, + unquote=False, ) @property @@ -372,6 +415,11 @@ def full_url(self): # type: () -> str return self.to_string(escape_password=False, strip_ssh=False, direct=False) + @property + def secret(self): + # type: () -> str + return self.full_url + @property def safe_string(self): # type: () -> str @@ -402,3 +450,27 @@ def is_file_url(self): def __str__(self): # type: () -> str return self.to_string(escape_password=True, unquote=True) + + +def update_url_name_and_fragment(name_with_extras, ref, parsed_dict): + # type: (Optional[str], Optional[str], Dict[str, Optional[str]]) -> Dict[str, Optional[str]] + if name_with_extras: + fragment = "" # type: Optional[str] + if parsed_dict["fragment"] is not None: + fragment = "{0}".format(parsed_dict["fragment"]) + if fragment.startswith("egg="): + name, extras = pip_shims.shims._strip_extras(name_with_extras) + fragment_name, fragment_extras = pip_shims.shims._strip_extras(fragment) + if fragment_extras and not extras: + name_with_extras = "{0}{1}".format(name, fragment_extras) + fragment = "" + elif ( + parsed_dict.get("path") is not None and "&subdirectory" in parsed_dict["path"] + ): + path, fragment = URI.parse_subdirectory(parsed_dict["path"]) # type: ignore + parsed_dict["path"] = path + elif ref is not None and "&subdirectory" in ref: + ref, fragment = URI.parse_subdirectory(ref) + if ref: + parsed_dict["ref"] = ref.strip() + return parsed_dict diff --git a/pipenv/vendor/requirementslib/models/utils.py b/pipenv/vendor/requirementslib/models/utils.py index e79ea7e959..5d4708647b 100644 --- a/pipenv/vendor/requirementslib/models/utils.py +++ b/pipenv/vendor/requirementslib/models/utils.py @@ -8,7 +8,6 @@ import sys from collections import defaultdict from itertools import chain, groupby -from operator import attrgetter import six import tomlkit @@ -18,10 +17,10 @@ from packaging.specifiers import InvalidSpecifier, Specifier, SpecifierSet from packaging.version import parse as parse_version from plette.models import Package, PackageCollection -from six.moves.urllib import parse as urllib_parse from tomlkit.container import Container from tomlkit.items import AoT, Array, Bool, InlineTable, Item, String, Table from urllib3 import util as urllib3_util +from urllib3.util import parse_url as urllib3_parse from vistir.compat import lru_cache from vistir.misc import dedup from vistir.path import is_valid_url @@ -30,6 +29,16 @@ from ..utils import SCHEME_LIST, VCS_LIST, is_star if MYPY_RUNNING: + from attr import _ValidatorType # noqa + from packaging.requirements import Requirement as PackagingRequirement + from pip_shims.shims import Link + from pkg_resources import Requirement as PkgResourcesRequirement + from pkg_resources.extern.packaging.markers import ( + Op as PkgResourcesOp, + Variable as PkgResourcesVariable, + Value as PkgResourcesValue, + Marker as PkgResourcesMarker, + ) from typing import ( Union, Optional, @@ -45,16 +54,7 @@ Match, Iterable, # noqa ) - from attr import _ValidatorType # noqa - from packaging.requirements import Requirement as PackagingRequirement - from pkg_resources import Requirement as PkgResourcesRequirement - from pkg_resources.extern.packaging.markers import ( - Op as PkgResourcesOp, - Variable as PkgResourcesVariable, - Value as PkgResourcesValue, - Marker as PkgResourcesMarker, - ) - from pip_shims.shims import Link + from urllib3.util.url import Url from vistir.compat import Path _T = TypeVar("_T") @@ -282,6 +282,29 @@ def build_vcs_uri( return uri +def _get_parsed_url(url): + # type: (S) -> Url + """ + This is a stand-in function for `urllib3.util.parse_url` + + The orignal function doesn't handle special characters very well, this simply splits + out the authentication section, creates the parsed url, then puts the authentication + section back in, bypassing validation. + + :return: The new, parsed URL object + :rtype: :class:`~urllib3.util.url.Url` + """ + + try: + parsed = urllib3_parse(url) + except ValueError: + scheme, _, url = url.partition("://") + auth, _, url = url.rpartition("@") + url = "{scheme}://{url}".format(scheme=scheme, url=url) + parsed = urllib3_parse(url)._replace(auth=auth) + return parsed + + def convert_direct_url_to_url(direct_url): # type: (AnyStr) -> AnyStr """Converts direct URLs to standard, link-style URLs @@ -544,13 +567,14 @@ def split_ref_from_uri(uri): """ if not isinstance(uri, six.string_types): raise TypeError("Expected a string, received {0!r}".format(uri)) - parsed = urllib_parse.urlparse(uri) - path = parsed.path + parsed = _get_parsed_url(uri) + path = parsed.path if parsed.path else "" + scheme = parsed.scheme if parsed.scheme else "" ref = None - if parsed.scheme != "file" and "@" in path: + if scheme != "file" and "@" in path: path, _, ref = path.rpartition("@") parsed = parsed._replace(path=path) - return (urllib_parse.urlunparse(parsed), ref) + return (parsed.url, ref) def validate_vcs(instance, attr_, value): @@ -885,6 +909,15 @@ def version_from_ireq(ireq): return first(ireq.specifier._specs).version +def _get_requires_python(candidate): + # type: (Any) -> str + requires_python = getattr(candidate, "requires_python", None) + if requires_python is not None: + link = getattr(candidate, "location", getattr(candidate, "link", None)) + requires_python = getattr(link, "requires_python", None) + return requires_python + + def clean_requires_python(candidates): """Get a cleaned list of all the candidates with valid specifiers in the `requires_python` attributes.""" all_candidates = [] @@ -893,8 +926,7 @@ def clean_requires_python(candidates): py_version = parse_version(os.environ.get("PIP_PYTHON_VERSION", sys_version)) for c in candidates: - from_location = attrgetter("location.requires_python") - requires_python = getattr(c, "requires_python", from_location(c)) + requires_python = _get_requires_python(c) if requires_python: # Old specifications had people setting this to single digits # which is effectively the same as '>=digit,<digit+1' diff --git a/pipenv/vendor/requirementslib/models/vcs.py b/pipenv/vendor/requirementslib/models/vcs.py index 9296f605b2..447e54e475 100644 --- a/pipenv/vendor/requirementslib/models/vcs.py +++ b/pipenv/vendor/requirementslib/models/vcs.py @@ -1,55 +1,78 @@ # -*- coding=utf-8 -*- from __future__ import absolute_import, print_function -import attr import importlib import os +import sys + +import attr import pip_shims import six -import sys + +from ..environment import MYPY_RUNNING +from .url import URI + +if MYPY_RUNNING: + from typing import Any, Optional, Tuple @attr.s(hash=True) class VCSRepository(object): DEFAULT_RUN_ARGS = None - url = attr.ib() - name = attr.ib() - checkout_directory = attr.ib() - vcs_type = attr.ib() - subdirectory = attr.ib(default=None) - commit_sha = attr.ib(default=None) - ref = attr.ib(default=None) - repo_instance = attr.ib() - clone_log = attr.ib(default=None) - - @repo_instance.default - def get_repo_instance(self): + url = attr.ib() # type: str + name = attr.ib() # type: str + checkout_directory = attr.ib() # type: str + vcs_type = attr.ib() # type: str + parsed_url = attr.ib() # type: URI + subdirectory = attr.ib(default=None) # type: Optional[str] + commit_sha = attr.ib(default=None) # type: Optional[str] + ref = attr.ib(default=None) # type: Optional[str] + repo_backend = attr.ib() # type: Any + clone_log = attr.ib(default=None) # type: Optional[str] + + @parsed_url.default + def get_parsed_url(self): + # type: () -> URI + return URI.parse(self.url) + + @repo_backend.default + def get_repo_backend(self): if self.DEFAULT_RUN_ARGS is None: default_run_args = self.monkeypatch_pip() else: default_run_args = self.DEFAULT_RUN_ARGS from pip_shims.shims import VcsSupport + VCS_SUPPORT = VcsSupport() - backend = VCS_SUPPORT._registry.get(self.vcs_type) - repo = backend(url=self.url) - if repo.run_command.__func__.__defaults__ != default_run_args: - repo.run_command.__func__.__defaults__ = default_run_args - return repo + backend = VCS_SUPPORT.get_backend(self.vcs_type) + # repo = backend(url=self.url) + if backend.run_command.__func__.__defaults__ != default_run_args: + backend.run_command.__func__.__defaults__ = default_run_args + return backend @property def is_local(self): + # type: () -> bool url = self.url - if '+' in url: - url = url.split('+')[1] + if "+" in url: + url = url.split("+")[1] return url.startswith("file") def obtain(self): - if (os.path.exists(self.checkout_directory) and not - self.repo_instance.is_repository_directory(self.checkout_directory)): - self.repo_instance.unpack(self.checkout_directory) + # type: () -> None + lte_pip_19 = ( + pip_shims.parsed_pip_version.parsed_version < pip_shims.parse_version("19.0") + ) + if os.path.exists( + self.checkout_directory + ) and not self.repo_backend.is_repository_directory(self.checkout_directory): + self.repo_backend.unpack(self.checkout_directory) elif not os.path.exists(self.checkout_directory): - self.repo_instance.obtain(self.checkout_directory) + if lte_pip_19: + self.repo_backend.obtain(self.checkout_directory) + else: + self.repo_backend.obtain(self.checkout_directory, self.parsed_url) else: if self.ref: self.checkout_ref(self.ref) @@ -57,30 +80,40 @@ def obtain(self): self.commit_sha = self.get_commit_hash() def checkout_ref(self, ref): - if not self.repo_instance.is_commit_id_equal( - self.checkout_directory, self.get_commit_hash() - ) and not self.repo_instance.is_commit_id_equal(self.checkout_directory, ref): - if not self.is_local: - self.update(ref) + # type: (str) -> None + rev_opts = self.repo_backend.make_rev_options(ref) + if not any( + [ + self.repo_backend.is_commit_id_equal(self.checkout_directory, ref), + self.repo_backend.is_commit_id_equal(self.checkout_directory, rev_opts), + self.is_local, + ] + ): + self.update(ref) def update(self, ref): - target_ref = self.repo_instance.make_rev_options(ref) - if pip_shims.parse_version(pip_shims.pip_version) > pip_shims.parse_version("18.0"): - self.repo_instance.update(self.checkout_directory, self.url, target_ref) + # type: (str) -> None + target_ref = self.repo_backend.make_rev_options(ref) + if pip_shims.parse_version(pip_shims.pip_version) > pip_shims.parse_version( + "18.0" + ): + self.repo_backend.update(self.checkout_directory, self.url, target_ref) else: - self.repo_instance.update(self.checkout_directory, target_ref) + self.repo_backend.update(self.checkout_directory, target_ref) self.commit_sha = self.get_commit_hash() def get_commit_hash(self, ref=None): - return self.repo_instance.get_revision(self.checkout_directory) + # type: (Optional[str]) -> str + return self.repo_backend.get_revision(self.checkout_directory) @classmethod def monkeypatch_pip(cls): + # type: () -> Tuple[Any, ...] target_module = pip_shims.shims.VcsSupport.__module__ pip_vcs = importlib.import_module(target_module) run_command_defaults = pip_vcs.VersionControl.run_command.__defaults__ # set the default to not write stdout, the first option sets this value - new_defaults = [False,] + list(run_command_defaults)[1:] + new_defaults = [False] + list(run_command_defaults)[1:] new_defaults = tuple(new_defaults) if six.PY3: try: diff --git a/pipenv/vendor/requirementslib/utils.py b/pipenv/vendor/requirementslib/utils.py index 503a13d071..b9db5d1686 100644 --- a/pipenv/vendor/requirementslib/utils.py +++ b/pipenv/vendor/requirementslib/utils.py @@ -11,7 +11,7 @@ import tomlkit import vistir from six.moves.urllib.parse import urlparse, urlsplit, urlunparse -from vistir.compat import Path +from vistir.compat import Path, fs_decode from vistir.path import ensure_mkdir_p, is_valid_url from .environment import MYPY_RUNNING @@ -180,7 +180,9 @@ def convert_entry_to_path(path): elif "path" in path: path = path["path"] - return path + if not os.name == "nt": + return fs_decode(path) + return Path(fs_decode(path)).as_posix() def is_installable_file(path): diff --git a/pipenv/vendor/semver.py b/pipenv/vendor/semver.py index 5f5be2c272..6bc9fcab41 100644 --- a/pipenv/vendor/semver.py +++ b/pipenv/vendor/semver.py @@ -1,12 +1,16 @@ """ Python helper for Semantic Versioning (http://semver.org/) """ +from __future__ import print_function +import argparse import collections +from functools import wraps import re +import sys -__version__ = '2.8.1' +__version__ = '2.9.0' __author__ = 'Kostiantyn Rybnikov' __author_email__ = 'k-bx@k-bx.com' __maintainer__ = 'Sebastien Celles' @@ -33,6 +37,10 @@ _LAST_NUMBER = re.compile(r'(?:[^\d]*(\d+)[^\d]*)+') +#: Contains the implemented semver.org version of the spec +SEMVER_SPEC_VERSION = "2.0.0" + + if not hasattr(__builtins__, 'cmp'): def cmp(a, b): return (a > b) - (a < b) @@ -47,7 +55,6 @@ def parse(version): if not provided :rtype: dict - >>> import semver >>> ver = semver.parse('3.4.5-pre.2+build.4') >>> ver['major'] 3 @@ -73,6 +80,18 @@ def parse(version): return version_parts +def comparator(operator): + """ Wrap a VersionInfo binary op method in a type-check """ + @wraps(operator) + def wrapper(self, other): + comparable_types = (VersionInfo, dict, tuple) + if not isinstance(other, comparable_types): + raise TypeError("other type %r must be in %r" + % (type(other), comparable_types)) + return operator(self, other) + return wrapper + + class VersionInfo(object): """ :param int major: version when you make incompatible API changes. @@ -84,33 +103,58 @@ class VersionInfo(object): """ __slots__ = ('_major', '_minor', '_patch', '_prerelease', '_build') - def __init__(self, major, minor, patch, prerelease=None, build=None): - self._major = major - self._minor = minor - self._patch = patch - self._prerelease = prerelease - self._build = build + def __init__(self, major, minor=0, patch=0, prerelease=None, build=None): + self._major = int(major) + self._minor = int(minor) + self._patch = int(patch) + self._prerelease = None if prerelease is None else str(prerelease) + self._build = None if build is None else str(build) @property def major(self): + """The major part of a version""" return self._major + @major.setter + def major(self, value): + raise AttributeError("attribute 'major' is readonly") + @property def minor(self): + """The minor part of a version""" return self._minor + @minor.setter + def minor(self, value): + raise AttributeError("attribute 'minor' is readonly") + @property def patch(self): + """The patch part of a version""" return self._patch + @patch.setter + def patch(self, value): + raise AttributeError("attribute 'patch' is readonly") + @property def prerelease(self): + """The prerelease part of a version""" return self._prerelease + @prerelease.setter + def prerelease(self, value): + raise AttributeError("attribute 'prerelease' is readonly") + @property def build(self): + """The build part of a version""" return self._build + @build.setter + def build(self, value): + raise AttributeError("attribute 'build' is readonly") + def _astuple(self): return (self.major, self.minor, self.patch, self.prerelease, self.build) @@ -124,40 +168,109 @@ def _asdict(self): ("build", self.build) )) + def __iter__(self): + """Implement iter(self).""" + # As long as we support Py2.7, we can't use the "yield from" syntax + for v in self._astuple(): + yield v + + def bump_major(self): + """Raise the major part of the version, return a new object + but leave self untouched + + :return: new object with the raised major part + :rtype: VersionInfo + + >>> ver = semver.parse_version_info("3.4.5") + >>> ver.bump_major() + VersionInfo(major=4, minor=0, patch=0, prerelease=None, build=None) + """ + return parse_version_info(bump_major(str(self))) + + def bump_minor(self): + """Raise the minor part of the version, return a new object + but leave self untouched + + :return: new object with the raised minor part + :rtype: VersionInfo + + >>> ver = semver.parse_version_info("3.4.5") + >>> ver.bump_minor() + VersionInfo(major=3, minor=5, patch=0, prerelease=None, build=None) + """ + return parse_version_info(bump_minor(str(self))) + + def bump_patch(self): + """Raise the patch part of the version, return a new object + but leave self untouched + + :return: new object with the raised patch part + :rtype: VersionInfo + + >>> ver = semver.parse_version_info("3.4.5") + >>> ver.bump_patch() + VersionInfo(major=3, minor=4, patch=6, prerelease=None, build=None) + """ + return parse_version_info(bump_patch(str(self))) + + def bump_prerelease(self, token='rc'): + """Raise the prerelease part of the version, return a new object + but leave self untouched + + :param token: defaults to 'rc' + :return: new object with the raised prerelease part + :rtype: str + + >>> ver = semver.parse_version_info("3.4.5-rc.1") + >>> ver.bump_prerelease() + VersionInfo(major=3, minor=4, patch=5, prerelease='rc.2', \ +build=None) + """ + return parse_version_info(bump_prerelease(str(self), token)) + + def bump_build(self, token='build'): + """Raise the build part of the version, return a new object + but leave self untouched + + :param token: defaults to 'build' + :return: new object with the raised build part + :rtype: str + + >>> ver = semver.parse_version_info("3.4.5-rc.1+build.9") + >>> ver.bump_build() + VersionInfo(major=3, minor=4, patch=5, prerelease='rc.1', \ +build='build.10') + """ + return parse_version_info(bump_build(str(self), token)) + + @comparator def __eq__(self, other): - if not isinstance(other, (VersionInfo, dict)): - return NotImplemented return _compare_by_keys(self._asdict(), _to_dict(other)) == 0 + @comparator def __ne__(self, other): - if not isinstance(other, (VersionInfo, dict)): - return NotImplemented return _compare_by_keys(self._asdict(), _to_dict(other)) != 0 + @comparator def __lt__(self, other): - if not isinstance(other, (VersionInfo, dict)): - return NotImplemented return _compare_by_keys(self._asdict(), _to_dict(other)) < 0 + @comparator def __le__(self, other): - if not isinstance(other, (VersionInfo, dict)): - return NotImplemented return _compare_by_keys(self._asdict(), _to_dict(other)) <= 0 + @comparator def __gt__(self, other): - if not isinstance(other, (VersionInfo, dict)): - return NotImplemented return _compare_by_keys(self._asdict(), _to_dict(other)) > 0 + @comparator def __ge__(self, other): - if not isinstance(other, (VersionInfo, dict)): - return NotImplemented return _compare_by_keys(self._asdict(), _to_dict(other)) >= 0 def __repr__(self): s = ", ".join("%s=%r" % (key, val) for key, val in self._asdict().items()) - return "VersionInfo(%s)" % s + return "%s(%s)" % (type(self).__name__, s) def __str__(self): return format_version(*(self._astuple())) @@ -169,21 +282,44 @@ def __hash__(self): def parse(version): """Parse version string to a VersionInfo instance. - >>> from semver import VersionInfo - >>> VersionInfo.parse('3.4.5-pre.2+build.4') + :param version: version string + :return: a :class:`semver.VersionInfo` instance + :rtype: :class:`semver.VersionInfo` + + >>> semver.VersionInfo.parse('3.4.5-pre.2+build.4') VersionInfo(major=3, minor=4, patch=5, \ prerelease='pre.2', build='build.4') - - :param version: version string - :return: a :class:`VersionInfo` instance - :rtype: :class:`VersionInfo` """ return parse_version_info(version) + def replace(self, **parts): + """Replace one or more parts of a version and return a new + :class:`semver.VersionInfo` object, but leave self untouched + + :param dict parts: the parts to be updated. Valid keys are: + ``major``, ``minor``, ``patch``, ``prerelease``, or ``build`` + :return: the new :class:`semver.VersionInfo` object with the changed + parts + :raises: TypeError, if ``parts`` contains invalid keys + """ + version = self._asdict() + version.update(parts) + try: + return VersionInfo(**version) + except TypeError: + unknownkeys = set(parts) - set(self._asdict()) + error = ("replace() got %d unexpected keyword " + "argument(s): %s" % (len(unknownkeys), + ", ".join(unknownkeys)) + ) + raise TypeError(error) + def _to_dict(obj): if isinstance(obj, VersionInfo): return obj._asdict() + elif isinstance(obj, tuple): + return VersionInfo(*obj)._asdict() return obj @@ -194,7 +330,6 @@ def parse_version_info(version): :return: a :class:`VersionInfo` instance :rtype: :class:`VersionInfo` - >>> import semver >>> version_info = semver.parse_version_info("3.4.5-pre.2+build.4") >>> version_info.major 3 @@ -270,7 +405,6 @@ def compare(ver1, ver2): zero if ver1 == ver2 and strictly positive if ver1 > ver2 :rtype: int - >>> import semver >>> semver.compare("1.0.0", "2.0.0") -1 >>> semver.compare("2.0.0", "1.0.0") @@ -298,7 +432,6 @@ def match(version, match_expr): :return: True if the expression matches the version, otherwise False :rtype: bool - >>> import semver >>> semver.match("2.0.0", ">=1.0.0") True >>> semver.match("1.0.0", ">1.0.0") @@ -339,7 +472,6 @@ def max_ver(ver1, ver2): :return: the greater version of the two :rtype: :class:`VersionInfo` - >>> import semver >>> semver.max_ver("1.0.0", "2.0.0") '2.0.0' """ @@ -358,7 +490,6 @@ def min_ver(ver1, ver2): :return: the smaller version of the two :rtype: :class:`VersionInfo` - >>> import semver >>> semver.min_ver("1.0.0", "2.0.0") '1.0.0' """ @@ -372,15 +503,14 @@ def min_ver(ver1, ver2): def format_version(major, minor, patch, prerelease=None, build=None): """Format a version according to the Semantic Versioning specification - :param str major: the required major part of a version - :param str minor: the required minor part of a version - :param str patch: the required patch part of a version + :param int major: the required major part of a version + :param int minor: the required minor part of a version + :param int patch: the required patch part of a version :param str prerelease: the optional prerelease part of a version :param str build: the optional build part of a version :return: the formatted string :rtype: str - >>> import semver >>> semver.format_version(3, 4, 5, 'pre.2', 'build.4') '3.4.5-pre.2+build.4' """ @@ -414,7 +544,6 @@ def bump_major(version): :return: the raised version string :rtype: str - >>> import semver >>> semver.bump_major("3.4.5") '4.0.0' """ @@ -429,7 +558,6 @@ def bump_minor(version): :return: the raised version string :rtype: str - >>> import semver >>> semver.bump_minor("3.4.5") '3.5.0' """ @@ -444,7 +572,6 @@ def bump_patch(version): :return: the raised version string :rtype: str - >>> import semver >>> semver.bump_patch("3.4.5") '3.4.6' """ @@ -461,7 +588,7 @@ def bump_prerelease(version, token='rc'): :return: the raised version string :rtype: str - >>> bump_prerelease('3.4.5', 'dev') + >>> semver.bump_prerelease('3.4.5', 'dev') '3.4.5-dev.1' """ verinfo = parse(version) @@ -480,7 +607,7 @@ def bump_build(version, token='build'): :return: the raised version string :rtype: str - >>> bump_build('3.4.5-rc.1+build.9') + >>> semver.bump_build('3.4.5-rc.1+build.9') '3.4.5-rc.1+build.10' """ verinfo = parse(version) @@ -498,13 +625,125 @@ def finalize_version(version): :return: the finalized version string :rtype: str - >>> finalize_version('1.2.3-rc.5') + >>> semver.finalize_version('1.2.3-rc.5') '1.2.3' """ verinfo = parse(version) return format_version(verinfo['major'], verinfo['minor'], verinfo['patch']) +def createparser(): + """Create an :class:`argparse.ArgumentParser` instance + + :return: parser instance + :rtype: :class:`argparse.ArgumentParser` + """ + parser = argparse.ArgumentParser(prog=__package__, + description=__doc__) + s = parser.add_subparsers() + + # create compare subcommand + parser_compare = s.add_parser("compare", + help="Compare two versions" + ) + parser_compare.set_defaults(which="compare") + parser_compare.add_argument("version1", + help="First version" + ) + parser_compare.add_argument("version2", + help="Second version" + ) + + # create bump subcommand + parser_bump = s.add_parser("bump", + help="Bumps a version" + ) + parser_bump.set_defaults(which="bump") + sb = parser_bump.add_subparsers(title="Bump commands", + dest="bump") + + # Create subparsers for the bump subparser: + for p in (sb.add_parser("major", + help="Bump the major part of the version"), + sb.add_parser("minor", + help="Bump the minor part of the version"), + sb.add_parser("patch", + help="Bump the patch part of the version"), + sb.add_parser("prerelease", + help="Bump the prerelease part of the version"), + sb.add_parser("build", + help="Bump the build part of the version")): + p.add_argument("version", + help="Version to raise" + ) + + return parser + + +def process(args): + """Process the input from the CLI + + :param args: The parsed arguments + :type args: :class:`argparse.Namespace` + :param parser: the parser instance + :type parser: :class:`argparse.ArgumentParser` + :return: result of the selected action + :rtype: str + """ + if args.which == "bump": + maptable = {'major': 'bump_major', + 'minor': 'bump_minor', + 'patch': 'bump_patch', + 'prerelease': 'bump_prerelease', + 'build': 'bump_build', + } + ver = parse_version_info(args.version) + # get the respective method and call it + func = getattr(ver, maptable[args.bump]) + return str(func()) + + elif args.which == "compare": + return str(compare(args.version1, args.version2)) + + +def main(cliargs=None): + """Entry point for the application script + + :param list cliargs: Arguments to parse or None (=use :class:`sys.argv`) + :return: error code + :rtype: int + """ + try: + parser = createparser() + args = parser.parse_args(args=cliargs) + # args.parser = parser + result = process(args) + print(result) + return 0 + + except (ValueError, TypeError) as err: + print("ERROR", err, file=sys.stderr) + return 2 + + +def replace(version, **parts): + """Replace one or more parts of a version and return the new string + + :param str version: the version string to replace + :param dict parts: the parts to be updated. Valid keys are: + ``major``, ``minor``, ``patch``, ``prerelease``, or ``build`` + :return: the replaced version string + :raises: TypeError, if ``parts`` contains invalid keys + :rtype: str + + >>> import semver + >>> semver.replace("1.2.3", major=2, patch=10) + '2.2.10' + """ + version = parse_version_info(version) + return str(version.replace(**parts)) + + if __name__ == "__main__": import doctest doctest.testmod() diff --git a/pipenv/vendor/six.LICENSE b/pipenv/vendor/six.LICENSE index 365d10741b..4b05a54526 100644 --- a/pipenv/vendor/six.LICENSE +++ b/pipenv/vendor/six.LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2010-2018 Benjamin Peterson +Copyright (c) 2010-2019 Benjamin Peterson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/pipenv/vendor/six.py b/pipenv/vendor/six.py index 89b2188fd6..357e624abc 100644 --- a/pipenv/vendor/six.py +++ b/pipenv/vendor/six.py @@ -1,4 +1,4 @@ -# Copyright (c) 2010-2018 Benjamin Peterson +# Copyright (c) 2010-2019 Benjamin Peterson # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal @@ -29,7 +29,7 @@ import types __author__ = "Benjamin Peterson <benjamin@python.org>" -__version__ = "1.12.0" +__version__ = "1.13.0" # Useful for very coarse version differentiation. @@ -255,8 +255,10 @@ class _MovedItems(_LazyModule): MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), MovedModule("builtins", "__builtin__"), MovedModule("configparser", "ConfigParser"), + MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"), MovedModule("copyreg", "copy_reg"), MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), + MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"), MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), MovedModule("http_cookies", "Cookie", "http.cookies"), @@ -637,6 +639,7 @@ def u(s): import io StringIO = io.StringIO BytesIO = io.BytesIO + del io _assertCountEqual = "assertCountEqual" if sys.version_info[1] <= 1: _assertRaisesRegex = "assertRaisesRegexp" @@ -824,7 +827,15 @@ def with_metaclass(meta, *bases): class metaclass(type): def __new__(cls, name, this_bases, d): - return meta(name, bases, d) + if sys.version_info[:2] >= (3, 7): + # This version introduced PEP 560 that requires a bit + # of extra care (we mimic what is done by __build_class__). + resolved_bases = types.resolve_bases(bases) + if resolved_bases is not bases: + d['__orig_bases__'] = bases + else: + resolved_bases = bases + return meta(name, resolved_bases, d) @classmethod def __prepare__(cls, name, this_bases): diff --git a/pipenv/vendor/tomlkit/__init__.py b/pipenv/vendor/tomlkit/__init__.py index 9ab90e0a70..be5c373f4b 100644 --- a/pipenv/vendor/tomlkit/__init__.py +++ b/pipenv/vendor/tomlkit/__init__.py @@ -22,4 +22,4 @@ from .api import ws -__version__ = "0.5.3" +__version__ = "0.5.8" diff --git a/pipenv/vendor/tomlkit/_compat.py b/pipenv/vendor/tomlkit/_compat.py index b7407af696..768ce338d7 100644 --- a/pipenv/vendor/tomlkit/_compat.py +++ b/pipenv/vendor/tomlkit/_compat.py @@ -67,12 +67,12 @@ def __repr__(self): if self._name is None: return "%s.%s(%r)" % ( self.__class__.__module__, - self.__class__.__qualname__, + self.__class__.__name__, self._offset, ) return "%s.%s(%r, %r)" % ( self.__class__.__module__, - self.__class__.__qualname__, + self.__class__.__name__, self._offset, self._name, ) @@ -137,6 +137,7 @@ def _name_from_offset(delta): PY2 = sys.version_info[0] == 2 PY36 = sys.version_info >= (3, 6) +PY38 = sys.version_info >= (3, 8) if PY2: unicode = unicode diff --git a/pipenv/vendor/tomlkit/api.py b/pipenv/vendor/tomlkit/api.py index 0ac2675262..c9621830d0 100644 --- a/pipenv/vendor/tomlkit/api.py +++ b/pipenv/vendor/tomlkit/api.py @@ -109,7 +109,7 @@ def table(): # type: () -> Table def inline_table(): # type: () -> InlineTable - return InlineTable(Container(), Trivia()) + return InlineTable(Container(), Trivia(), new=True) def aot(): # type: () -> AoT diff --git a/pipenv/vendor/tomlkit/container.py b/pipenv/vendor/tomlkit/container.py index 340491c1d2..b4e7cf2cd6 100644 --- a/pipenv/vendor/tomlkit/container.py +++ b/pipenv/vendor/tomlkit/container.py @@ -11,7 +11,6 @@ from .items import Key from .items import Null from .items import Table -from .items import Trivia from .items import Whitespace from .items import item as _item @@ -245,6 +244,9 @@ def _insert_after( item = _item(item) idx = self._map[key] + # Insert after the max index if there are many. + if isinstance(idx, tuple): + idx = max(idx) current_item = self._body[idx][1] if "\n" not in current_item.trivia.trail: current_item.trivia.trail += "\n" @@ -487,6 +489,23 @@ def update(self, other): # type: (Dict) -> None for k, v in other.items(): self[k] = v + def get(self, key, default=None): # type: (Any, Optional[Any]) -> Any + if not isinstance(key, Key): + key = Key(key) + + if key not in self: + return default + + return self[key] + + def setdefault( + self, key, default=None + ): # type: (Union[Key, str], Any) -> Union[Item, Container] + if key not in self: + self[key] = default + + return self[key] + def __contains__(self, key): # type: (Union[Key, str]) -> bool if not isinstance(key, Key): key = Key(key) @@ -517,7 +536,10 @@ def __getitem__(self, key): # type: (Union[Key, str]) -> Union[Item, Container] item = self._body[idx][1] - return item.value + if item.is_boolean(): + return item.value + + return item def __setitem__(self, key, value): # type: (Union[Key, str], Any) -> None if key is not None and key in self: diff --git a/pipenv/vendor/tomlkit/items.py b/pipenv/vendor/tomlkit/items.py index f199e8dfbd..6588cda92f 100644 --- a/pipenv/vendor/tomlkit/items.py +++ b/pipenv/vendor/tomlkit/items.py @@ -8,6 +8,7 @@ from datetime import time from ._compat import PY2 +from ._compat import PY38 from ._compat import decode from ._compat import long from ._compat import unicode @@ -70,13 +71,32 @@ def item(value, _parent=None): elif isinstance(value, (str, unicode)): escaped = escape_string(value) - return String(StringType.SLB, value, escaped, Trivia()) + return String(StringType.SLB, decode(value), escaped, Trivia()) elif isinstance(value, datetime): - return DateTime(value, Trivia(), value.isoformat().replace("+00:00", "Z")) + return DateTime( + value.year, + value.month, + value.day, + value.hour, + value.minute, + value.second, + value.microsecond, + value.tzinfo, + Trivia(), + value.isoformat().replace("+00:00", "Z"), + ) elif isinstance(value, date): - return Date(value, Trivia(), value.isoformat()) + return Date(value.year, value.month, value.day, Trivia(), value.isoformat()) elif isinstance(value, time): - return Time(value, Trivia(), value.isoformat()) + return Time( + value.hour, + value.minute, + value.second, + value.microsecond, + value.tzinfo, + Trivia(), + value.isoformat(), + ) raise ValueError("Invalid type {}".format(type(value))) @@ -260,6 +280,15 @@ def indent(self, indent): # type: (int) -> Item return self + def is_boolean(self): # type: () -> bool + return isinstance(self, Bool) + + def is_table(self): # type: () -> bool + return isinstance(self, Table) + + def is_inline_table(self): # type: () -> bool + return isinstance(self, InlineTable) + def _getstate(self, protocol=3): return (self._trivia,) @@ -465,7 +494,7 @@ class Bool(Item): A boolean literal. """ - def __init__(self, t, trivia): # type: (float, Trivia) -> None + def __init__(self, t, trivia): # type: (int, Trivia) -> None super(Bool, self).__init__(trivia) self._value = bool(t) @@ -484,26 +513,53 @@ def as_string(self): # type: () -> str def _getstate(self, protocol=3): return self._value, self._trivia + def __bool__(self): + return self._value + + __nonzero__ = __bool__ + + def __eq__(self, other): + if not isinstance(other, bool): + return NotImplemented + + return other == self._value + class DateTime(Item, datetime): """ A datetime literal. """ - def __new__(cls, value, *_): # type: (..., datetime, ...) -> datetime + def __new__( + cls, + year, + month, + day, + hour, + minute, + second, + microsecond, + tzinfo, + trivia, + raw, + **kwargs + ): # type: (int, int, int, int, int, int, int, ..., Trivia, ...) -> datetime return datetime.__new__( cls, - value.year, - value.month, - value.day, - value.hour, - value.minute, - value.second, - value.microsecond, - tzinfo=value.tzinfo, + year, + month, + day, + hour, + minute, + second, + microsecond, + tzinfo=tzinfo, + **kwargs ) - def __init__(self, _, trivia, raw): # type: (datetime, Trivia, str) -> None + def __init__( + self, year, month, day, hour, minute, second, microsecond, tzinfo, trivia, raw + ): # type: (int, int, int, int, int, int, int, ..., Trivia) -> None super(DateTime, self).__init__(trivia) self._raw = raw @@ -520,12 +576,36 @@ def as_string(self): # type: () -> str return self._raw def __add__(self, other): - result = super(DateTime, self).__add__(other) + if PY38: + result = datetime( + self.year, + self.month, + self.day, + self.hour, + self.minute, + self.second, + self.microsecond, + self.tzinfo, + ).__add__(other) + else: + result = super(DateTime, self).__add__(other) return self._new(result) def __sub__(self, other): - result = super(DateTime, self).__sub__(other) + if PY38: + result = datetime( + self.year, + self.month, + self.day, + self.hour, + self.minute, + self.second, + self.microsecond, + self.tzinfo, + ).__sub__(other) + else: + result = super(DateTime, self).__sub__(other) if isinstance(result, datetime): result = self._new(result) @@ -535,20 +615,29 @@ def __sub__(self, other): def _new(self, result): raw = result.isoformat() - return DateTime(result, self._trivia, raw) + return DateTime( + result.year, + result.month, + result.day, + result.hour, + result.minute, + result.second, + result.microsecond, + result.tzinfo, + self._trivia, + raw, + ) def _getstate(self, protocol=3): return ( - datetime( - self.year, - self.month, - self.day, - self.hour, - self.minute, - self.second, - self.microsecond, - self.tzinfo, - ), + self.year, + self.month, + self.day, + self.hour, + self.minute, + self.second, + self.microsecond, + self.tzinfo, self._trivia, self._raw, ) @@ -559,10 +648,12 @@ class Date(Item, date): A date literal. """ - def __new__(cls, value, *_): # type: (..., date, ...) -> date - return date.__new__(cls, value.year, value.month, value.day) + def __new__(cls, year, month, day, *_): # type: (int, int, int, ...) -> date + return date.__new__(cls, year, month, day) - def __init__(self, _, trivia, raw): # type: (date, Trivia, str) -> None + def __init__( + self, year, month, day, trivia, raw + ): # type: (int, int, int, Trivia, str) -> None super(Date, self).__init__(trivia) self._raw = raw @@ -579,22 +670,31 @@ def as_string(self): # type: () -> str return self._raw def __add__(self, other): - result = super(Date, self).__add__(other) + if PY38: + result = date(self.year, self.month, self.day).__add__(other) + else: + result = super(Date, self).__add__(other) return self._new(result) def __sub__(self, other): - result = super(Date, self).__sub__(other) + if PY38: + result = date(self.year, self.month, self.day).__sub__(other) + else: + result = super(Date, self).__sub__(other) - return self._new(result) + if isinstance(result, date): + result = self._new(result) + + return result def _new(self, result): raw = result.isoformat() - return Date(result, self._trivia, raw) + return Date(result.year, result.month, result.day, self._trivia, raw) def _getstate(self, protocol=3): - return (datetime(self.year, self.month, self.day), self._trivia, self._raw) + return (self.year, self.month, self.day, self._trivia, self._raw) class Time(Item, time): @@ -602,12 +702,14 @@ class Time(Item, time): A time literal. """ - def __new__(cls, value, *_): # type: (time, ...) -> time - return time.__new__( - cls, value.hour, value.minute, value.second, value.microsecond - ) + def __new__( + cls, hour, minute, second, microsecond, tzinfo, *_ + ): # type: (int, int, int, int, ...) -> time + return time.__new__(cls, hour, minute, second, microsecond, tzinfo) - def __init__(self, _, trivia, raw): # type: (time, Trivia, str) -> None + def __init__( + self, hour, minute, second, microsecond, tzinfo, trivia, raw + ): # type: (int, int, int, int, Trivia, str) -> None super(Time, self).__init__(trivia) self._raw = raw @@ -625,7 +727,11 @@ def as_string(self): # type: () -> str def _getstate(self, protocol=3): return ( - time(self.hour, self.minute, self.second, self.microsecond, self.tzinfo), + self.hour, + self.minute, + self.second, + self.microsecond, + self.tzinfo, self._trivia, self._raw, ) @@ -636,7 +742,7 @@ class Array(Item, list): An array literal """ - def __init__(self, value, trivia): # type: (list, Trivia) -> None + def __init__(self, value, trivia, multiline=False): # type: (list, Trivia) -> None super(Array, self).__init__(trivia) list.__init__( @@ -644,6 +750,7 @@ def __init__(self, value, trivia): # type: (list, Trivia) -> None ) self._value = value + self._multiline = multiline @property def discriminant(self): # type: () -> int @@ -665,8 +772,23 @@ def is_homogeneous(self): # type: () -> bool return len(set(discriminants)) == 1 + def multiline(self, multiline): # type: (bool) -> self + self._multiline = multiline + + return self + def as_string(self): # type: () -> str - return "[{}]".format("".join(v.as_string() for v in self._value)) + if not self._multiline: + return "[{}]".format("".join(v.as_string() for v in self._value)) + + s = "[\n" + self.trivia.indent + " " * 4 + s += (",\n" + self.trivia.indent + " " * 4).join( + v.as_string() for v in self._value if not isinstance(v, Whitespace) + ) + s += ",\n" + s += "]" + + return s def append(self, _item): # type: () -> None if self._value: @@ -807,6 +929,20 @@ def append(self, key, _item): # type: (Union[Key, str], Any) -> Table return self + def raw_append(self, key, _item): # type: (Union[Key, str], Any) -> Table + if not isinstance(_item, Item): + _item = item(_item) + + self._value.append(key, _item) + + if isinstance(key, Key): + key = key.key + + if key is not None: + super(Table, self).__setitem__(key, _item) + + return self + def remove(self, key): # type: (Union[Key, str]) -> Table self._value.remove(key) @@ -860,6 +996,9 @@ def update(self, other): # type: (Dict) -> None for k, v in other.items(): self[k] = v + def get(self, key, default=None): # type: (Any, Optional[Any]) -> Any + return self._value.get(key, default) + def __contains__(self, key): # type: (Union[Key, str]) -> bool return key in self._value @@ -894,6 +1033,9 @@ def __delitem__(self, key): # type: (Union[Key, str]) -> None def __repr__(self): return super(Table, self).__repr__() + def __str__(self): + return str(self.value) + def _getstate(self, protocol=3): return ( self._value, @@ -911,11 +1053,12 @@ class InlineTable(Item, dict): """ def __init__( - self, value, trivia - ): # type: (tomlkit.container.Container, Trivia) -> None + self, value, trivia, new=False + ): # type: (tomlkit.container.Container, Trivia, bool) -> None super(InlineTable, self).__init__(trivia) self._value = value + self._new = new for k, v in self._value.body: if k is not None: @@ -937,7 +1080,7 @@ def append(self, key, _item): # type: (Union[Key, str], Any) -> InlineTable _item = item(_item) if not isinstance(_item, (Whitespace, Comment)): - if not _item.trivia.indent and len(self._value) > 0: + if not _item.trivia.indent and len(self._value) > 0 and not self._new: _item.trivia.indent = " " if _item.trivia.comment: _item.trivia.comment = "" @@ -968,7 +1111,10 @@ def as_string(self): # type: () -> str for i, (k, v) in enumerate(self._value.body): if k is None: if i == len(self._value.body) - 1: - buf = buf.rstrip(",") + if self._new: + buf = buf.rstrip(", ") + else: + buf = buf.rstrip(",") buf += v.as_string() @@ -985,6 +1131,8 @@ def as_string(self): # type: () -> str if i != len(self._value.body) - 1: buf += "," + if self._new: + buf += " " buf += "}" @@ -1006,6 +1154,9 @@ def update(self, other): # type: (Dict) -> None for k, v in other.items(): self[k] = v + def get(self, key, default=None): # type: (Any, Optional[Any]) -> Any + return self._value.get(key, default) + def __contains__(self, key): # type: (Union[Key, str]) -> bool return key in self._value diff --git a/pipenv/vendor/tomlkit/parser.py b/pipenv/vendor/tomlkit/parser.py index 3f507bb4ca..0fb5068400 100644 --- a/pipenv/vendor/tomlkit/parser.py +++ b/pipenv/vendor/tomlkit/parser.py @@ -434,15 +434,21 @@ def _parse_bare_key(self): # type: () -> Key def _handle_dotted_key( self, container, key, value - ): # type: (Container, Key, Any) -> None + ): # type: (Union[Container, Table], Key, Any) -> None names = tuple(self._split_table_name(key.key)) name = names[0] name._dotted = True if name in container: - table = container.item(name) + if isinstance(container, Table): + table = container.value.item(name) + else: + table = container.item(name) else: table = Table(Container(True), Trivia(), False, is_super_table=True) - container.append(name, table) + if isinstance(container, Table): + container.raw_append(name, table) + else: + container.append(name, table) for i, _name in enumerate(names[1:]): if i == len(names) - 2: @@ -517,19 +523,41 @@ def _parse_value(self): # type: () -> Item if m.group(1) and m.group(5): # datetime try: - return DateTime(parse_rfc3339(raw), trivia, raw) + dt = parse_rfc3339(raw) + return DateTime( + dt.year, + dt.month, + dt.day, + dt.hour, + dt.minute, + dt.second, + dt.microsecond, + dt.tzinfo, + trivia, + raw, + ) except ValueError: raise self.parse_error(InvalidDateTimeError) if m.group(1): try: - return Date(parse_rfc3339(raw), trivia, raw) + dt = parse_rfc3339(raw) + return Date(dt.year, dt.month, dt.day, trivia, raw) except ValueError: raise self.parse_error(InvalidDateError) if m.group(5): try: - return Time(parse_rfc3339(raw), trivia, raw) + t = parse_rfc3339(raw) + return Time( + t.hour, + t.minute, + t.second, + t.microsecond, + t.tzinfo, + trivia, + raw, + ) except ValueError: raise self.parse_error(InvalidTimeError) @@ -911,6 +939,13 @@ def _parse_table( cws, comment, trail = self._parse_comment_trail() result = Null() + table = Table( + values, + Trivia(indent, cws, comment, trail), + is_aot, + name=name, + display_name=name, + ) if len(name_parts) > 1: if missing_table: @@ -960,9 +995,9 @@ def _parse_table( _key, item = item if not self._merge_ws(item, values): if _key is not None and _key.is_dotted(): - self._handle_dotted_key(values, _key, item) + self._handle_dotted_key(table, _key, item) else: - values.append(_key, item) + table.raw_append(_key, item) else: if self._current == "[": is_aot_next, name_next = self._peek_table() @@ -970,7 +1005,7 @@ def _parse_table( if self._is_child(name, name_next): key_next, table_next = self._parse_table(name) - values.append(key_next, table_next) + table.raw_append(key_next, table_next) # Picking up any sibling while not self.end(): @@ -981,7 +1016,7 @@ def _parse_table( key_next, table_next = self._parse_table(name) - values.append(key_next, table_next) + table.raw_append(key_next, table_next) break else: @@ -991,13 +1026,7 @@ def _parse_table( ) if isinstance(result, Null): - result = Table( - values, - Trivia(indent, cws, comment, trail), - is_aot, - name=name, - display_name=name, - ) + result = table if is_aot and (not self._aot_stack or name != self._aot_stack[-1]): result = self._parse_aot(result, name) diff --git a/pipenv/vendor/urllib3/__init__.py b/pipenv/vendor/urllib3/__init__.py index eb9158867a..96474d3680 100644 --- a/pipenv/vendor/urllib3/__init__.py +++ b/pipenv/vendor/urllib3/__init__.py @@ -4,11 +4,7 @@ from __future__ import absolute_import import warnings -from .connectionpool import ( - HTTPConnectionPool, - HTTPSConnectionPool, - connection_from_url -) +from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, connection_from_url from . import exceptions from .filepost import encode_multipart_formdata @@ -24,25 +20,25 @@ import logging from logging import NullHandler -__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)' -__license__ = 'MIT' -__version__ = '1.25.2' +__author__ = "Andrey Petrov (andrey.petrov@shazow.net)" +__license__ = "MIT" +__version__ = "1.25.7" __all__ = ( - 'HTTPConnectionPool', - 'HTTPSConnectionPool', - 'PoolManager', - 'ProxyManager', - 'HTTPResponse', - 'Retry', - 'Timeout', - 'add_stderr_logger', - 'connection_from_url', - 'disable_warnings', - 'encode_multipart_formdata', - 'get_host', - 'make_headers', - 'proxy_from_url', + "HTTPConnectionPool", + "HTTPSConnectionPool", + "PoolManager", + "ProxyManager", + "HTTPResponse", + "Retry", + "Timeout", + "add_stderr_logger", + "connection_from_url", + "disable_warnings", + "encode_multipart_formdata", + "get_host", + "make_headers", + "proxy_from_url", ) logging.getLogger(__name__).addHandler(NullHandler()) @@ -59,10 +55,10 @@ def add_stderr_logger(level=logging.DEBUG): # even if urllib3 is vendored within another package. logger = logging.getLogger(__name__) handler = logging.StreamHandler() - handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s')) + handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(message)s")) logger.addHandler(handler) logger.setLevel(level) - logger.debug('Added a stderr logging handler to logger: %s', __name__) + logger.debug("Added a stderr logging handler to logger: %s", __name__) return handler @@ -74,18 +70,17 @@ def add_stderr_logger(level=logging.DEBUG): # shouldn't be: otherwise, it's very hard for users to use most Python # mechanisms to silence them. # SecurityWarning's always go off by default. -warnings.simplefilter('always', exceptions.SecurityWarning, append=True) +warnings.simplefilter("always", exceptions.SecurityWarning, append=True) # SubjectAltNameWarning's should go off once per host -warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True) +warnings.simplefilter("default", exceptions.SubjectAltNameWarning, append=True) # InsecurePlatformWarning's don't vary between requests, so we keep it default. -warnings.simplefilter('default', exceptions.InsecurePlatformWarning, - append=True) +warnings.simplefilter("default", exceptions.InsecurePlatformWarning, append=True) # SNIMissingWarnings should go off only once. -warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True) +warnings.simplefilter("default", exceptions.SNIMissingWarning, append=True) def disable_warnings(category=exceptions.HTTPWarning): """ Helper for quickly disabling all urllib3 warnings. """ - warnings.simplefilter('ignore', category) + warnings.simplefilter("ignore", category) diff --git a/pipenv/vendor/urllib3/_collections.py b/pipenv/vendor/urllib3/_collections.py index 34f23811c6..019d1511d5 100644 --- a/pipenv/vendor/urllib3/_collections.py +++ b/pipenv/vendor/urllib3/_collections.py @@ -1,4 +1,5 @@ from __future__ import absolute_import + try: from collections.abc import Mapping, MutableMapping except ImportError: @@ -6,6 +7,7 @@ try: from threading import RLock except ImportError: # Platform-specific: No threads available + class RLock: def __enter__(self): pass @@ -19,7 +21,7 @@ def __exit__(self, exc_type, exc_value, traceback): from .packages.six import iterkeys, itervalues, PY3 -__all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict'] +__all__ = ["RecentlyUsedContainer", "HTTPHeaderDict"] _Null = object() @@ -82,7 +84,9 @@ def __len__(self): return len(self._container) def __iter__(self): - raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.') + raise NotImplementedError( + "Iteration over this class is unlikely to be threadsafe." + ) def clear(self): with self.lock: @@ -150,7 +154,7 @@ def __setitem__(self, key, val): def __getitem__(self, key): val = self._container[key.lower()] - return ', '.join(val[1:]) + return ", ".join(val[1:]) def __delitem__(self, key): del self._container[key.lower()] @@ -159,12 +163,13 @@ def __contains__(self, key): return key.lower() in self._container def __eq__(self, other): - if not isinstance(other, Mapping) and not hasattr(other, 'keys'): + if not isinstance(other, Mapping) and not hasattr(other, "keys"): return False if not isinstance(other, type(self)): other = type(self)(other) - return (dict((k.lower(), v) for k, v in self.itermerged()) == - dict((k.lower(), v) for k, v in other.itermerged())) + return dict((k.lower(), v) for k, v in self.itermerged()) == dict( + (k.lower(), v) for k, v in other.itermerged() + ) def __ne__(self, other): return not self.__eq__(other) @@ -184,9 +189,9 @@ def __iter__(self): yield vals[0] def pop(self, key, default=__marker): - '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value. + """D.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. - ''' + """ # Using the MutableMapping function directly fails due to the private marker. # Using ordinary dict.pop would expose the internal structures. # So let's reinvent the wheel. @@ -228,8 +233,10 @@ def extend(self, *args, **kwargs): with self.add instead of self.__setitem__ """ if len(args) > 1: - raise TypeError("extend() takes at most 1 positional " - "arguments ({0} given)".format(len(args))) + raise TypeError( + "extend() takes at most 1 positional " + "arguments ({0} given)".format(len(args)) + ) other = args[0] if len(args) >= 1 else () if isinstance(other, HTTPHeaderDict): @@ -295,7 +302,7 @@ def itermerged(self): """Iterate over all headers, merging duplicate ones together.""" for key in self: val = self._container[key.lower()] - yield val[0], ', '.join(val[1:]) + yield val[0], ", ".join(val[1:]) def items(self): return list(self.iteritems()) @@ -306,7 +313,7 @@ def from_httplib(cls, message): # Python 2 # python2.7 does not expose a proper API for exporting multiheaders # efficiently. This function re-reads raw lines from the message # object and extracts the multiheaders properly. - obs_fold_continued_leaders = (' ', '\t') + obs_fold_continued_leaders = (" ", "\t") headers = [] for line in message.headers: @@ -316,14 +323,14 @@ def from_httplib(cls, message): # Python 2 # in RFC-7230 S3.2.4. This indicates a multiline header, but # there exists no previous header to which we can attach it. raise InvalidHeader( - 'Header continuation with no previous header: %s' % line + "Header continuation with no previous header: %s" % line ) else: key, value = headers[-1] - headers[-1] = (key, value + ' ' + line.strip()) + headers[-1] = (key, value + " " + line.strip()) continue - key, value = line.split(':', 1) + key, value = line.split(":", 1) headers.append((key, value.strip())) return cls(headers) diff --git a/pipenv/vendor/urllib3/connection.py b/pipenv/vendor/urllib3/connection.py index f816ee807d..f5c946adf7 100644 --- a/pipenv/vendor/urllib3/connection.py +++ b/pipenv/vendor/urllib3/connection.py @@ -11,6 +11,7 @@ try: # Compiled with SSL? import ssl + BaseSSLError = ssl.SSLError except (ImportError, AttributeError): # Platform-specific: No SSL. ssl = None @@ -41,7 +42,7 @@ class ConnectionError(Exception): resolve_ssl_version, assert_fingerprint, create_urllib3_context, - ssl_wrap_socket + ssl_wrap_socket, ) @@ -51,20 +52,16 @@ class ConnectionError(Exception): log = logging.getLogger(__name__) -port_by_scheme = { - 'http': 80, - 'https': 443, -} +port_by_scheme = {"http": 80, "https": 443} -# When updating RECENT_DATE, move it to within two years of the current date, -# and not less than 6 months ago. -# Example: if Today is 2018-01-01, then RECENT_DATE should be any date on or -# after 2016-01-01 (today - 2 years) AND before 2017-07-01 (today - 6 months) -RECENT_DATE = datetime.date(2017, 6, 30) +# When it comes time to update this value as a part of regular maintenance +# (ie test_recent_date is failing) update it to ~6 months before the current date. +RECENT_DATE = datetime.date(2019, 1, 1) class DummyConnection(object): """Used to detect a failed ConnectionCls import.""" + pass @@ -92,7 +89,7 @@ class HTTPConnection(_HTTPConnection, object): Or you may want to disable the defaults by passing an empty list (e.g., ``[]``). """ - default_port = port_by_scheme['http'] + default_port = port_by_scheme["http"] #: Disable Nagle's algorithm by default. #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]`` @@ -102,15 +99,15 @@ class HTTPConnection(_HTTPConnection, object): is_verified = False def __init__(self, *args, **kw): - if six.PY3: - kw.pop('strict', None) + if not six.PY2: + kw.pop("strict", None) # Pre-set source_address. - self.source_address = kw.get('source_address') + self.source_address = kw.get("source_address") #: The socket options provided by the user. If no options are #: provided, we use the default options. - self.socket_options = kw.pop('socket_options', self.default_socket_options) + self.socket_options = kw.pop("socket_options", self.default_socket_options) _HTTPConnection.__init__(self, *args, **kw) @@ -131,7 +128,7 @@ def host(self): those cases where it's appropriate (i.e., when doing DNS lookup to establish the actual TCP connection across which we're going to send HTTP requests). """ - return self._dns_host.rstrip('.') + return self._dns_host.rstrip(".") @host.setter def host(self, value): @@ -150,30 +147,34 @@ def _new_conn(self): """ extra_kw = {} if self.source_address: - extra_kw['source_address'] = self.source_address + extra_kw["source_address"] = self.source_address if self.socket_options: - extra_kw['socket_options'] = self.socket_options + extra_kw["socket_options"] = self.socket_options try: conn = connection.create_connection( - (self._dns_host, self.port), self.timeout, **extra_kw) + (self._dns_host, self.port), self.timeout, **extra_kw + ) except SocketTimeout: raise ConnectTimeoutError( - self, "Connection to %s timed out. (connect timeout=%s)" % - (self.host, self.timeout)) + self, + "Connection to %s timed out. (connect timeout=%s)" + % (self.host, self.timeout), + ) except SocketError as e: raise NewConnectionError( - self, "Failed to establish a new connection: %s" % e) + self, "Failed to establish a new connection: %s" % e + ) return conn def _prepare_conn(self, conn): self.sock = conn # Google App Engine's httplib does not define _tunnel_host - if getattr(self, '_tunnel_host', None): + if getattr(self, "_tunnel_host", None): # TODO: Fix tunnel so it doesn't depend on self.sock state. self._tunnel() # Mark this connection as not reusable @@ -189,18 +190,15 @@ def request_chunked(self, method, url, body=None, headers=None): body with chunked encoding and not as one block """ headers = HTTPHeaderDict(headers if headers is not None else {}) - skip_accept_encoding = 'accept-encoding' in headers - skip_host = 'host' in headers + skip_accept_encoding = "accept-encoding" in headers + skip_host = "host" in headers self.putrequest( - method, - url, - skip_accept_encoding=skip_accept_encoding, - skip_host=skip_host + method, url, skip_accept_encoding=skip_accept_encoding, skip_host=skip_host ) for header, value in headers.items(): self.putheader(header, value) - if 'transfer-encoding' not in headers: - self.putheader('Transfer-Encoding', 'chunked') + if "transfer-encoding" not in headers: + self.putheader("Transfer-Encoding", "chunked") self.endheaders() if body is not None: @@ -211,29 +209,37 @@ def request_chunked(self, method, url, body=None, headers=None): if not chunk: continue if not isinstance(chunk, bytes): - chunk = chunk.encode('utf8') + chunk = chunk.encode("utf8") len_str = hex(len(chunk))[2:] - self.send(len_str.encode('utf-8')) - self.send(b'\r\n') + self.send(len_str.encode("utf-8")) + self.send(b"\r\n") self.send(chunk) - self.send(b'\r\n') + self.send(b"\r\n") # After the if clause, to always have a closed body - self.send(b'0\r\n\r\n') + self.send(b"0\r\n\r\n") class HTTPSConnection(HTTPConnection): - default_port = port_by_scheme['https'] + default_port = port_by_scheme["https"] ssl_version = None - def __init__(self, host, port=None, key_file=None, cert_file=None, - key_password=None, strict=None, - timeout=socket._GLOBAL_DEFAULT_TIMEOUT, - ssl_context=None, server_hostname=None, **kw): - - HTTPConnection.__init__(self, host, port, strict=strict, - timeout=timeout, **kw) + def __init__( + self, + host, + port=None, + key_file=None, + cert_file=None, + key_password=None, + strict=None, + timeout=socket._GLOBAL_DEFAULT_TIMEOUT, + ssl_context=None, + server_hostname=None, + **kw + ): + + HTTPConnection.__init__(self, host, port, strict=strict, timeout=timeout, **kw) self.key_file = key_file self.cert_file = cert_file @@ -243,25 +249,40 @@ def __init__(self, host, port=None, key_file=None, cert_file=None, # Required property for Google AppEngine 1.9.0 which otherwise causes # HTTPS requests to go out as HTTP. (See Issue #356) - self._protocol = 'https' + self._protocol = "https" def connect(self): conn = self._new_conn() self._prepare_conn(conn) + # Wrap socket using verification with the root certs in + # trusted_root_certs + default_ssl_context = False if self.ssl_context is None: + default_ssl_context = True self.ssl_context = create_urllib3_context( - ssl_version=resolve_ssl_version(None), - cert_reqs=resolve_cert_reqs(None), + ssl_version=resolve_ssl_version(self.ssl_version), + cert_reqs=resolve_cert_reqs(self.cert_reqs), ) + # Try to load OS default certs if none are given. + # Works well on Windows (requires Python3.4+) + context = self.ssl_context + if ( + not self.ca_certs + and not self.ca_cert_dir + and default_ssl_context + and hasattr(context, "load_default_certs") + ): + context.load_default_certs() + self.sock = ssl_wrap_socket( sock=conn, keyfile=self.key_file, certfile=self.cert_file, key_password=self.key_password, ssl_context=self.ssl_context, - server_hostname=self.server_hostname + server_hostname=self.server_hostname, ) @@ -270,16 +291,24 @@ class VerifiedHTTPSConnection(HTTPSConnection): Based on httplib.HTTPSConnection but wraps the socket with SSL certification. """ + cert_reqs = None ca_certs = None ca_cert_dir = None ssl_version = None assert_fingerprint = None - def set_cert(self, key_file=None, cert_file=None, - cert_reqs=None, key_password=None, ca_certs=None, - assert_hostname=None, assert_fingerprint=None, - ca_cert_dir=None): + def set_cert( + self, + key_file=None, + cert_file=None, + cert_reqs=None, + key_password=None, + ca_certs=None, + assert_hostname=None, + assert_fingerprint=None, + ca_cert_dir=None, + ): """ This method should only be called once, before the connection is used. """ @@ -306,7 +335,7 @@ def connect(self): hostname = self.host # Google App Engine's httplib does not define _tunnel_host - if getattr(self, '_tunnel_host', None): + if getattr(self, "_tunnel_host", None): self.sock = conn # Calls self._set_hostport(), so self.host is # self._tunnel_host below. @@ -323,15 +352,19 @@ def connect(self): is_time_off = datetime.date.today() < RECENT_DATE if is_time_off: - warnings.warn(( - 'System time is way off (before {0}). This will probably ' - 'lead to SSL verification errors').format(RECENT_DATE), - SystemTimeWarning + warnings.warn( + ( + "System time is way off (before {0}). This will probably " + "lead to SSL verification errors" + ).format(RECENT_DATE), + SystemTimeWarning, ) # Wrap socket using verification with the root certs in # trusted_root_certs + default_ssl_context = False if self.ssl_context is None: + default_ssl_context = True self.ssl_context = create_urllib3_context( ssl_version=resolve_ssl_version(self.ssl_version), cert_reqs=resolve_cert_reqs(self.cert_reqs), @@ -339,6 +372,17 @@ def connect(self): context = self.ssl_context context.verify_mode = resolve_cert_reqs(self.cert_reqs) + + # Try to load OS default certs if none are given. + # Works well on Windows (requires Python3.4+) + if ( + not self.ca_certs + and not self.ca_cert_dir + and default_ssl_context + and hasattr(context, "load_default_certs") + ): + context.load_default_certs() + self.sock = ssl_wrap_socket( sock=conn, keyfile=self.key_file, @@ -347,31 +391,37 @@ def connect(self): ca_certs=self.ca_certs, ca_cert_dir=self.ca_cert_dir, server_hostname=server_hostname, - ssl_context=context) + ssl_context=context, + ) if self.assert_fingerprint: - assert_fingerprint(self.sock.getpeercert(binary_form=True), - self.assert_fingerprint) - elif context.verify_mode != ssl.CERT_NONE \ - and not getattr(context, 'check_hostname', False) \ - and self.assert_hostname is not False: + assert_fingerprint( + self.sock.getpeercert(binary_form=True), self.assert_fingerprint + ) + elif ( + context.verify_mode != ssl.CERT_NONE + and not getattr(context, "check_hostname", False) + and self.assert_hostname is not False + ): # While urllib3 attempts to always turn off hostname matching from # the TLS library, this cannot always be done. So we check whether # the TLS Library still thinks it's matching hostnames. cert = self.sock.getpeercert() - if not cert.get('subjectAltName', ()): - warnings.warn(( - 'Certificate for {0} has no `subjectAltName`, falling back to check for a ' - '`commonName` for now. This feature is being removed by major browsers and ' - 'deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 ' - 'for details.)'.format(hostname)), - SubjectAltNameWarning + if not cert.get("subjectAltName", ()): + warnings.warn( + ( + "Certificate for {0} has no `subjectAltName`, falling back to check for a " + "`commonName` for now. This feature is being removed by major browsers and " + "deprecated by RFC 2818. (See https://github.com/urllib3/urllib3/issues/497 " + "for details.)".format(hostname) + ), + SubjectAltNameWarning, ) _match_hostname(cert, self.assert_hostname or server_hostname) self.is_verified = ( - context.verify_mode == ssl.CERT_REQUIRED or - self.assert_fingerprint is not None + context.verify_mode == ssl.CERT_REQUIRED + or self.assert_fingerprint is not None ) @@ -379,9 +429,10 @@ def _match_hostname(cert, asserted_hostname): try: match_hostname(cert, asserted_hostname) except CertificateError as e: - log.error( - 'Certificate did not match expected hostname: %s. ' - 'Certificate: %s', asserted_hostname, cert + log.warning( + "Certificate did not match expected hostname: %s. Certificate: %s", + asserted_hostname, + cert, ) # Add cert to exception and reraise so client code can inspect # the cert when catching the exception, if they want to diff --git a/pipenv/vendor/urllib3/connectionpool.py b/pipenv/vendor/urllib3/connectionpool.py index 157568a395..31696460f0 100644 --- a/pipenv/vendor/urllib3/connectionpool.py +++ b/pipenv/vendor/urllib3/connectionpool.py @@ -26,12 +26,14 @@ from .packages.ssl_match_hostname import CertificateError from .packages import six from .packages.six.moves import queue -from .packages.rfc3986.normalizers import normalize_host from .connection import ( port_by_scheme, DummyConnection, - HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection, - HTTPException, BaseSSLError, + HTTPConnection, + HTTPSConnection, + VerifiedHTTPSConnection, + HTTPException, + BaseSSLError, ) from .request import RequestMethods from .response import HTTPResponse @@ -41,7 +43,13 @@ from .util.response import assert_header_parsing from .util.retry import Retry from .util.timeout import Timeout -from .util.url import get_host, Url, NORMALIZABLE_SCHEMES +from .util.url import ( + get_host, + parse_url, + Url, + _normalize_host as normalize_host, + _encode_target, +) from .util.queue import LifoQueue @@ -71,8 +79,7 @@ def __init__(self, host, port=None): self.port = port def __str__(self): - return '%s(host=%r, port=%r)' % (type(self).__name__, - self.host, self.port) + return "%s(host=%r, port=%r)" % (type(self).__name__, self.host, self.port) def __enter__(self): return self @@ -153,15 +160,24 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): :class:`urllib3.connection.HTTPSConnection` instances. """ - scheme = 'http' + scheme = "http" ConnectionCls = HTTPConnection ResponseCls = HTTPResponse - def __init__(self, host, port=None, strict=False, - timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False, - headers=None, retries=None, - _proxy=None, _proxy_headers=None, - **conn_kw): + def __init__( + self, + host, + port=None, + strict=False, + timeout=Timeout.DEFAULT_TIMEOUT, + maxsize=1, + block=False, + headers=None, + retries=None, + _proxy=None, + _proxy_headers=None, + **conn_kw + ): ConnectionPool.__init__(self, host, port) RequestMethods.__init__(self, headers) @@ -195,19 +211,27 @@ def __init__(self, host, port=None, strict=False, # Enable Nagle's algorithm for proxies, to avoid packet fragmentation. # We cannot know if the user has added default socket options, so we cannot replace the # list. - self.conn_kw.setdefault('socket_options', []) + self.conn_kw.setdefault("socket_options", []) def _new_conn(self): """ Return a fresh :class:`HTTPConnection`. """ self.num_connections += 1 - log.debug("Starting new HTTP connection (%d): %s:%s", - self.num_connections, self.host, self.port or "80") - - conn = self.ConnectionCls(host=self.host, port=self.port, - timeout=self.timeout.connect_timeout, - strict=self.strict, **self.conn_kw) + log.debug( + "Starting new HTTP connection (%d): %s:%s", + self.num_connections, + self.host, + self.port or "80", + ) + + conn = self.ConnectionCls( + host=self.host, + port=self.port, + timeout=self.timeout.connect_timeout, + strict=self.strict, + **self.conn_kw + ) return conn def _get_conn(self, timeout=None): @@ -231,16 +255,17 @@ def _get_conn(self, timeout=None): except queue.Empty: if self.block: - raise EmptyPoolError(self, - "Pool reached maximum size and no more " - "connections are allowed.") + raise EmptyPoolError( + self, + "Pool reached maximum size and no more connections are allowed.", + ) pass # Oh well, we'll create a new connection then # If this is a persistent connection, check if it got disconnected if conn and is_connection_dropped(conn): log.debug("Resetting dropped connection: %s", self.host) conn.close() - if getattr(conn, 'auto_open', 1) == 0: + if getattr(conn, "auto_open", 1) == 0: # This is a proxied connection that has been mutated by # httplib._tunnel() and cannot be reused (since it would # attempt to bypass the proxy) @@ -270,9 +295,7 @@ def _put_conn(self, conn): pass except queue.Full: # This should never happen if self.block == True - log.warning( - "Connection pool is full, discarding connection: %s", - self.host) + log.warning("Connection pool is full, discarding connection: %s", self.host) # Connection never got put back into the pool, close it. if conn: @@ -304,21 +327,30 @@ def _raise_timeout(self, err, url, timeout_value): """Is the error actually a timeout? Will raise a ReadTimeout or pass""" if isinstance(err, SocketTimeout): - raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) + raise ReadTimeoutError( + self, url, "Read timed out. (read timeout=%s)" % timeout_value + ) # See the above comment about EAGAIN in Python 3. In Python 2 we have # to specifically catch it and throw the timeout error - if hasattr(err, 'errno') and err.errno in _blocking_errnos: - raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) + if hasattr(err, "errno") and err.errno in _blocking_errnos: + raise ReadTimeoutError( + self, url, "Read timed out. (read timeout=%s)" % timeout_value + ) # Catch possible read timeouts thrown as SSL errors. If not the # case, rethrow the original. We need to do this because of: # http://bugs.python.org/issue10272 - if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python < 2.7.4 - raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) - - def _make_request(self, conn, method, url, timeout=_Default, chunked=False, - **httplib_request_kw): + if "timed out" in str(err) or "did not complete (read)" in str( + err + ): # Python < 2.7.4 + raise ReadTimeoutError( + self, url, "Read timed out. (read timeout=%s)" % timeout_value + ) + + def _make_request( + self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw + ): """ Perform a request on a given urllib connection object taken from our pool. @@ -358,7 +390,7 @@ def _make_request(self, conn, method, url, timeout=_Default, chunked=False, read_timeout = timeout_obj.read_timeout # App Engine doesn't have a sock attr - if getattr(conn, 'sock', None): + if getattr(conn, "sock", None): # In Python 3 socket.py will catch EAGAIN and return None when you # try and read into the file pointer created by http.client, which # instead raises a BadStatusLine exception. Instead of catching @@ -366,7 +398,8 @@ def _make_request(self, conn, method, url, timeout=_Default, chunked=False, # timeouts, check for a zero timeout before making the request. if read_timeout == 0: raise ReadTimeoutError( - self, url, "Read timed out. (read timeout=%s)" % read_timeout) + self, url, "Read timed out. (read timeout=%s)" % read_timeout + ) if read_timeout is Timeout.DEFAULT_TIMEOUT: conn.sock.settimeout(socket.getdefaulttimeout()) else: # None or a value @@ -381,26 +414,38 @@ def _make_request(self, conn, method, url, timeout=_Default, chunked=False, # Python 3 try: httplib_response = conn.getresponse() - except Exception as e: - # Remove the TypeError from the exception chain in Python 3; - # otherwise it looks like a programming error was the cause. + except BaseException as e: + # Remove the TypeError from the exception chain in + # Python 3 (including for exceptions like SystemExit). + # Otherwise it looks like a bug in the code. six.raise_from(e, None) except (SocketTimeout, BaseSSLError, SocketError) as e: self._raise_timeout(err=e, url=url, timeout_value=read_timeout) raise # AppEngine doesn't have a version attr. - http_version = getattr(conn, '_http_vsn_str', 'HTTP/?') - log.debug("%s://%s:%s \"%s %s %s\" %s %s", self.scheme, self.host, self.port, - method, url, http_version, httplib_response.status, - httplib_response.length) + http_version = getattr(conn, "_http_vsn_str", "HTTP/?") + log.debug( + '%s://%s:%s "%s %s %s" %s %s', + self.scheme, + self.host, + self.port, + method, + url, + http_version, + httplib_response.status, + httplib_response.length, + ) try: assert_header_parsing(httplib_response.msg) except (HeaderParsingError, TypeError) as hpe: # Platform-specific: Python 3 log.warning( - 'Failed to parse headers (url=%s): %s', - self._absolute_url(url), hpe, exc_info=True) + "Failed to parse headers (url=%s): %s", + self._absolute_url(url), + hpe, + exc_info=True, + ) return httplib_response @@ -430,7 +475,7 @@ def is_same_host(self, url): Check if the given ``url`` is a member of the same host as this connection pool. """ - if url.startswith('/'): + if url.startswith("/"): return True # TODO: Add optional support for socket.gethostbyname checking. @@ -446,10 +491,22 @@ def is_same_host(self, url): return (scheme, host, port) == (self.scheme, self.host, self.port) - def urlopen(self, method, url, body=None, headers=None, retries=None, - redirect=True, assert_same_host=True, timeout=_Default, - pool_timeout=None, release_conn=None, chunked=False, - body_pos=None, **response_kw): + def urlopen( + self, + method, + url, + body=None, + headers=None, + retries=None, + redirect=True, + assert_same_host=True, + timeout=_Default, + pool_timeout=None, + release_conn=None, + chunked=False, + body_pos=None, + **response_kw + ): """ Get a connection from the pool and perform an HTTP request. This is the lowest level call for making a request, so you'll need to specify all @@ -547,12 +604,18 @@ def urlopen(self, method, url, body=None, headers=None, retries=None, retries = Retry.from_int(retries, redirect=redirect, default=self.retries) if release_conn is None: - release_conn = response_kw.get('preload_content', True) + release_conn = response_kw.get("preload_content", True) # Check host if assert_same_host and not self.is_same_host(url): raise HostChangedError(self, url, retries) + # Ensure that the URL we're connecting to is properly encoded + if url.startswith("/"): + url = six.ensure_str(_encode_target(url)) + else: + url = six.ensure_str(parse_url(url).url) + conn = None # Track whether `conn` needs to be released before @@ -563,13 +626,13 @@ def urlopen(self, method, url, body=None, headers=None, retries=None, # # See issue #651 [1] for details. # - # [1] <https://github.com/shazow/urllib3/issues/651> + # [1] <https://github.com/urllib3/urllib3/issues/651> release_this_conn = release_conn # Merge the proxy headers. Only do this in HTTP. We have to copy the # headers dict so we can safely change it without those changes being # reflected in anyone else's copy. - if self.scheme == 'http': + if self.scheme == "http": headers = headers.copy() headers.update(self.proxy_headers) @@ -592,15 +655,22 @@ def urlopen(self, method, url, body=None, headers=None, retries=None, conn.timeout = timeout_obj.connect_timeout - is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None) + is_new_proxy_conn = self.proxy is not None and not getattr( + conn, "sock", None + ) if is_new_proxy_conn: self._prepare_proxy(conn) # Make the request on the httplib connection object. - httplib_response = self._make_request(conn, method, url, - timeout=timeout_obj, - body=body, headers=headers, - chunked=chunked) + httplib_response = self._make_request( + conn, + method, + url, + timeout=timeout_obj, + body=body, + headers=headers, + chunked=chunked, + ) # If we're going to release the connection in ``finally:``, then # the response doesn't need to know about the connection. Otherwise @@ -609,14 +679,16 @@ def urlopen(self, method, url, body=None, headers=None, retries=None, response_conn = conn if not release_conn else None # Pass method to Response for length checking - response_kw['request_method'] = method + response_kw["request_method"] = method # Import httplib's response into our own wrapper object - response = self.ResponseCls.from_httplib(httplib_response, - pool=self, - connection=response_conn, - retries=retries, - **response_kw) + response = self.ResponseCls.from_httplib( + httplib_response, + pool=self, + connection=response_conn, + retries=retries, + **response_kw + ) # Everything went great! clean_exit = True @@ -625,20 +697,28 @@ def urlopen(self, method, url, body=None, headers=None, retries=None, # Timed out by queue. raise EmptyPoolError(self, "No pool connections are available.") - except (TimeoutError, HTTPException, SocketError, ProtocolError, - BaseSSLError, SSLError, CertificateError) as e: + except ( + TimeoutError, + HTTPException, + SocketError, + ProtocolError, + BaseSSLError, + SSLError, + CertificateError, + ) as e: # Discard the connection for these exceptions. It will be # replaced during the next _get_conn() call. clean_exit = False if isinstance(e, (BaseSSLError, CertificateError)): e = SSLError(e) elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy: - e = ProxyError('Cannot connect to proxy.', e) + e = ProxyError("Cannot connect to proxy.", e) elif isinstance(e, (SocketError, HTTPException)): - e = ProtocolError('Connection aborted.', e) + e = ProtocolError("Connection aborted.", e) - retries = retries.increment(method, url, error=e, _pool=self, - _stacktrace=sys.exc_info()[2]) + retries = retries.increment( + method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2] + ) retries.sleep() # Keep track of the error for the retry warning. @@ -661,28 +741,45 @@ def urlopen(self, method, url, body=None, headers=None, retries=None, if not conn: # Try again - log.warning("Retrying (%r) after connection " - "broken by '%r': %s", retries, err, url) - return self.urlopen(method, url, body, headers, retries, - redirect, assert_same_host, - timeout=timeout, pool_timeout=pool_timeout, - release_conn=release_conn, body_pos=body_pos, - **response_kw) + log.warning( + "Retrying (%r) after connection broken by '%r': %s", retries, err, url + ) + return self.urlopen( + method, + url, + body, + headers, + retries, + redirect, + assert_same_host, + timeout=timeout, + pool_timeout=pool_timeout, + release_conn=release_conn, + chunked=chunked, + body_pos=body_pos, + **response_kw + ) def drain_and_release_conn(response): try: # discard any remaining response body, the connection will be # released back to the pool once the entire response is read response.read() - except (TimeoutError, HTTPException, SocketError, ProtocolError, - BaseSSLError, SSLError): + except ( + TimeoutError, + HTTPException, + SocketError, + ProtocolError, + BaseSSLError, + SSLError, + ): pass # Handle redirect? redirect_location = redirect and response.get_redirect_location() if redirect_location: if response.status == 303: - method = 'GET' + method = "GET" try: retries = retries.increment(method, url, response=response, _pool=self) @@ -700,15 +797,23 @@ def drain_and_release_conn(response): retries.sleep_for_retry(response) log.debug("Redirecting %s -> %s", url, redirect_location) return self.urlopen( - method, redirect_location, body, headers, - retries=retries, redirect=redirect, + method, + redirect_location, + body, + headers, + retries=retries, + redirect=redirect, assert_same_host=assert_same_host, - timeout=timeout, pool_timeout=pool_timeout, - release_conn=release_conn, body_pos=body_pos, - **response_kw) + timeout=timeout, + pool_timeout=pool_timeout, + release_conn=release_conn, + chunked=chunked, + body_pos=body_pos, + **response_kw + ) # Check if we should retry the HTTP response. - has_retry_after = bool(response.getheader('Retry-After')) + has_retry_after = bool(response.getheader("Retry-After")) if retries.is_retry(method, response.status, has_retry_after): try: retries = retries.increment(method, url, response=response, _pool=self) @@ -726,12 +831,20 @@ def drain_and_release_conn(response): retries.sleep(response) log.debug("Retry: %s", url) return self.urlopen( - method, url, body, headers, - retries=retries, redirect=redirect, + method, + url, + body, + headers, + retries=retries, + redirect=redirect, assert_same_host=assert_same_host, - timeout=timeout, pool_timeout=pool_timeout, + timeout=timeout, + pool_timeout=pool_timeout, release_conn=release_conn, - body_pos=body_pos, **response_kw) + chunked=chunked, + body_pos=body_pos, + **response_kw + ) return response @@ -754,21 +867,47 @@ class HTTPSConnectionPool(HTTPConnectionPool): the connection socket into an SSL socket. """ - scheme = 'https' + scheme = "https" ConnectionCls = HTTPSConnection - def __init__(self, host, port=None, - strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, - block=False, headers=None, retries=None, - _proxy=None, _proxy_headers=None, - key_file=None, cert_file=None, cert_reqs=None, - key_password=None, ca_certs=None, ssl_version=None, - assert_hostname=None, assert_fingerprint=None, - ca_cert_dir=None, **conn_kw): - - HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize, - block, headers, retries, _proxy, _proxy_headers, - **conn_kw) + def __init__( + self, + host, + port=None, + strict=False, + timeout=Timeout.DEFAULT_TIMEOUT, + maxsize=1, + block=False, + headers=None, + retries=None, + _proxy=None, + _proxy_headers=None, + key_file=None, + cert_file=None, + cert_reqs=None, + key_password=None, + ca_certs=None, + ssl_version=None, + assert_hostname=None, + assert_fingerprint=None, + ca_cert_dir=None, + **conn_kw + ): + + HTTPConnectionPool.__init__( + self, + host, + port, + strict, + timeout, + maxsize, + block, + headers, + retries, + _proxy, + _proxy_headers, + **conn_kw + ) self.key_file = key_file self.cert_file = cert_file @@ -787,14 +926,16 @@ def _prepare_conn(self, conn): """ if isinstance(conn, VerifiedHTTPSConnection): - conn.set_cert(key_file=self.key_file, - key_password=self.key_password, - cert_file=self.cert_file, - cert_reqs=self.cert_reqs, - ca_certs=self.ca_certs, - ca_cert_dir=self.ca_cert_dir, - assert_hostname=self.assert_hostname, - assert_fingerprint=self.assert_fingerprint) + conn.set_cert( + key_file=self.key_file, + key_password=self.key_password, + cert_file=self.cert_file, + cert_reqs=self.cert_reqs, + ca_certs=self.ca_certs, + ca_cert_dir=self.ca_cert_dir, + assert_hostname=self.assert_hostname, + assert_fingerprint=self.assert_fingerprint, + ) conn.ssl_version = self.ssl_version return conn @@ -811,12 +952,17 @@ def _new_conn(self): Return a fresh :class:`httplib.HTTPSConnection`. """ self.num_connections += 1 - log.debug("Starting new HTTPS connection (%d): %s:%s", - self.num_connections, self.host, self.port or "443") + log.debug( + "Starting new HTTPS connection (%d): %s:%s", + self.num_connections, + self.host, + self.port or "443", + ) if not self.ConnectionCls or self.ConnectionCls is DummyConnection: - raise SSLError("Can't connect to HTTPS URL because the SSL " - "module is not available.") + raise SSLError( + "Can't connect to HTTPS URL because the SSL module is not available." + ) actual_host = self.host actual_port = self.port @@ -824,11 +970,16 @@ def _new_conn(self): actual_host = self.proxy.host actual_port = self.proxy.port - conn = self.ConnectionCls(host=actual_host, port=actual_port, - timeout=self.timeout.connect_timeout, - strict=self.strict, cert_file=self.cert_file, - key_file=self.key_file, key_password=self.key_password, - **self.conn_kw) + conn = self.ConnectionCls( + host=actual_host, + port=actual_port, + timeout=self.timeout.connect_timeout, + strict=self.strict, + cert_file=self.cert_file, + key_file=self.key_file, + key_password=self.key_password, + **self.conn_kw + ) return self._prepare_conn(conn) @@ -839,16 +990,19 @@ def _validate_conn(self, conn): super(HTTPSConnectionPool, self)._validate_conn(conn) # Force connect early to allow us to validate the connection. - if not getattr(conn, 'sock', None): # AppEngine might not have `.sock` + if not getattr(conn, "sock", None): # AppEngine might not have `.sock` conn.connect() if not conn.is_verified: - warnings.warn(( - 'Unverified HTTPS request is being made. ' - 'Adding certificate verification is strongly advised. See: ' - 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' - '#ssl-warnings'), - InsecureRequestWarning) + warnings.warn( + ( + "Unverified HTTPS request is being made. " + "Adding certificate verification is strongly advised. See: " + "https://urllib3.readthedocs.io/en/latest/advanced-usage.html" + "#ssl-warnings" + ), + InsecureRequestWarning, + ) def connection_from_url(url, **kw): @@ -873,7 +1027,7 @@ def connection_from_url(url, **kw): """ scheme, host, port = get_host(url) port = port or port_by_scheme.get(scheme, 80) - if scheme == 'https': + if scheme == "https": return HTTPSConnectionPool(host, port=port, **kw) else: return HTTPConnectionPool(host, port=port, **kw) @@ -884,14 +1038,14 @@ def _normalize_host(host, scheme): Normalize hosts for comparisons and use with sockets. """ + host = normalize_host(host, scheme) + # httplib doesn't like it when we include brackets in IPv6 addresses # Specifically, if we include brackets but also pass the port then # httplib crazily doubles up the square brackets on the Host header. # Instead, we need to make sure we never pass ``None`` as the port. # However, for backward compatibility reasons we can't actually # *assert* that. See http://bugs.python.org/issue28539 - if host.startswith('[') and host.endswith(']'): - host = host.strip('[]') - if scheme in NORMALIZABLE_SCHEMES: - host = normalize_host(host) + if host.startswith("[") and host.endswith("]"): + host = host[1:-1] return host diff --git a/pipenv/vendor/urllib3/contrib/_appengine_environ.py b/pipenv/vendor/urllib3/contrib/_appengine_environ.py index f3e00942cb..119efaeeb6 100644 --- a/pipenv/vendor/urllib3/contrib/_appengine_environ.py +++ b/pipenv/vendor/urllib3/contrib/_appengine_environ.py @@ -6,25 +6,31 @@ def is_appengine(): - return (is_local_appengine() or - is_prod_appengine() or - is_prod_appengine_mvms()) + return "APPENGINE_RUNTIME" in os.environ def is_appengine_sandbox(): - return is_appengine() and not is_prod_appengine_mvms() + """Reports if the app is running in the first generation sandbox. + + The second generation runtimes are technically still in a sandbox, but it + is much less restrictive, so generally you shouldn't need to check for it. + see https://cloud.google.com/appengine/docs/standard/runtimes + """ + return is_appengine() and os.environ["APPENGINE_RUNTIME"] == "python27" def is_local_appengine(): - return ('APPENGINE_RUNTIME' in os.environ and - 'Development/' in os.environ['SERVER_SOFTWARE']) + return is_appengine() and os.environ.get("SERVER_SOFTWARE", "").startswith( + "Development/" + ) def is_prod_appengine(): - return ('APPENGINE_RUNTIME' in os.environ and - 'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and - not is_prod_appengine_mvms()) + return is_appengine() and os.environ.get("SERVER_SOFTWARE", "").startswith( + "Google App Engine/" + ) def is_prod_appengine_mvms(): - return os.environ.get('GAE_VM', False) == 'true' + """Deprecated.""" + return False diff --git a/pipenv/vendor/urllib3/contrib/_securetransport/bindings.py b/pipenv/vendor/urllib3/contrib/_securetransport/bindings.py index be34215359..d9b6733318 100644 --- a/pipenv/vendor/urllib3/contrib/_securetransport/bindings.py +++ b/pipenv/vendor/urllib3/contrib/_securetransport/bindings.py @@ -34,29 +34,35 @@ import platform from ctypes.util import find_library from ctypes import ( - c_void_p, c_int32, c_char_p, c_size_t, c_byte, c_uint32, c_ulong, c_long, - c_bool + c_void_p, + c_int32, + c_char_p, + c_size_t, + c_byte, + c_uint32, + c_ulong, + c_long, + c_bool, ) from ctypes import CDLL, POINTER, CFUNCTYPE -security_path = find_library('Security') +security_path = find_library("Security") if not security_path: - raise ImportError('The library Security could not be found') + raise ImportError("The library Security could not be found") -core_foundation_path = find_library('CoreFoundation') +core_foundation_path = find_library("CoreFoundation") if not core_foundation_path: - raise ImportError('The library CoreFoundation could not be found') + raise ImportError("The library CoreFoundation could not be found") version = platform.mac_ver()[0] -version_info = tuple(map(int, version.split('.'))) +version_info = tuple(map(int, version.split("."))) if version_info < (10, 8): raise OSError( - 'Only OS X 10.8 and newer are supported, not %s.%s' % ( - version_info[0], version_info[1] - ) + "Only OS X 10.8 and newer are supported, not %s.%s" + % (version_info[0], version_info[1]) ) Security = CDLL(security_path, use_errno=True) @@ -129,27 +135,19 @@ Security.SecKeyGetTypeID.argtypes = [] Security.SecKeyGetTypeID.restype = CFTypeID - Security.SecCertificateCreateWithData.argtypes = [ - CFAllocatorRef, - CFDataRef - ] + Security.SecCertificateCreateWithData.argtypes = [CFAllocatorRef, CFDataRef] Security.SecCertificateCreateWithData.restype = SecCertificateRef - Security.SecCertificateCopyData.argtypes = [ - SecCertificateRef - ] + Security.SecCertificateCopyData.argtypes = [SecCertificateRef] Security.SecCertificateCopyData.restype = CFDataRef - Security.SecCopyErrorMessageString.argtypes = [ - OSStatus, - c_void_p - ] + Security.SecCopyErrorMessageString.argtypes = [OSStatus, c_void_p] Security.SecCopyErrorMessageString.restype = CFStringRef Security.SecIdentityCreateWithCertificate.argtypes = [ CFTypeRef, SecCertificateRef, - POINTER(SecIdentityRef) + POINTER(SecIdentityRef), ] Security.SecIdentityCreateWithCertificate.restype = OSStatus @@ -159,201 +157,126 @@ c_void_p, Boolean, c_void_p, - POINTER(SecKeychainRef) + POINTER(SecKeychainRef), ] Security.SecKeychainCreate.restype = OSStatus - Security.SecKeychainDelete.argtypes = [ - SecKeychainRef - ] + Security.SecKeychainDelete.argtypes = [SecKeychainRef] Security.SecKeychainDelete.restype = OSStatus Security.SecPKCS12Import.argtypes = [ CFDataRef, CFDictionaryRef, - POINTER(CFArrayRef) + POINTER(CFArrayRef), ] Security.SecPKCS12Import.restype = OSStatus SSLReadFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, c_void_p, POINTER(c_size_t)) - SSLWriteFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t)) + SSLWriteFunc = CFUNCTYPE( + OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t) + ) - Security.SSLSetIOFuncs.argtypes = [ - SSLContextRef, - SSLReadFunc, - SSLWriteFunc - ] + Security.SSLSetIOFuncs.argtypes = [SSLContextRef, SSLReadFunc, SSLWriteFunc] Security.SSLSetIOFuncs.restype = OSStatus - Security.SSLSetPeerID.argtypes = [ - SSLContextRef, - c_char_p, - c_size_t - ] + Security.SSLSetPeerID.argtypes = [SSLContextRef, c_char_p, c_size_t] Security.SSLSetPeerID.restype = OSStatus - Security.SSLSetCertificate.argtypes = [ - SSLContextRef, - CFArrayRef - ] + Security.SSLSetCertificate.argtypes = [SSLContextRef, CFArrayRef] Security.SSLSetCertificate.restype = OSStatus - Security.SSLSetCertificateAuthorities.argtypes = [ - SSLContextRef, - CFTypeRef, - Boolean - ] + Security.SSLSetCertificateAuthorities.argtypes = [SSLContextRef, CFTypeRef, Boolean] Security.SSLSetCertificateAuthorities.restype = OSStatus - Security.SSLSetConnection.argtypes = [ - SSLContextRef, - SSLConnectionRef - ] + Security.SSLSetConnection.argtypes = [SSLContextRef, SSLConnectionRef] Security.SSLSetConnection.restype = OSStatus - Security.SSLSetPeerDomainName.argtypes = [ - SSLContextRef, - c_char_p, - c_size_t - ] + Security.SSLSetPeerDomainName.argtypes = [SSLContextRef, c_char_p, c_size_t] Security.SSLSetPeerDomainName.restype = OSStatus - Security.SSLHandshake.argtypes = [ - SSLContextRef - ] + Security.SSLHandshake.argtypes = [SSLContextRef] Security.SSLHandshake.restype = OSStatus - Security.SSLRead.argtypes = [ - SSLContextRef, - c_char_p, - c_size_t, - POINTER(c_size_t) - ] + Security.SSLRead.argtypes = [SSLContextRef, c_char_p, c_size_t, POINTER(c_size_t)] Security.SSLRead.restype = OSStatus - Security.SSLWrite.argtypes = [ - SSLContextRef, - c_char_p, - c_size_t, - POINTER(c_size_t) - ] + Security.SSLWrite.argtypes = [SSLContextRef, c_char_p, c_size_t, POINTER(c_size_t)] Security.SSLWrite.restype = OSStatus - Security.SSLClose.argtypes = [ - SSLContextRef - ] + Security.SSLClose.argtypes = [SSLContextRef] Security.SSLClose.restype = OSStatus - Security.SSLGetNumberSupportedCiphers.argtypes = [ - SSLContextRef, - POINTER(c_size_t) - ] + Security.SSLGetNumberSupportedCiphers.argtypes = [SSLContextRef, POINTER(c_size_t)] Security.SSLGetNumberSupportedCiphers.restype = OSStatus Security.SSLGetSupportedCiphers.argtypes = [ SSLContextRef, POINTER(SSLCipherSuite), - POINTER(c_size_t) + POINTER(c_size_t), ] Security.SSLGetSupportedCiphers.restype = OSStatus Security.SSLSetEnabledCiphers.argtypes = [ SSLContextRef, POINTER(SSLCipherSuite), - c_size_t + c_size_t, ] Security.SSLSetEnabledCiphers.restype = OSStatus - Security.SSLGetNumberEnabledCiphers.argtype = [ - SSLContextRef, - POINTER(c_size_t) - ] + Security.SSLGetNumberEnabledCiphers.argtype = [SSLContextRef, POINTER(c_size_t)] Security.SSLGetNumberEnabledCiphers.restype = OSStatus Security.SSLGetEnabledCiphers.argtypes = [ SSLContextRef, POINTER(SSLCipherSuite), - POINTER(c_size_t) + POINTER(c_size_t), ] Security.SSLGetEnabledCiphers.restype = OSStatus - Security.SSLGetNegotiatedCipher.argtypes = [ - SSLContextRef, - POINTER(SSLCipherSuite) - ] + Security.SSLGetNegotiatedCipher.argtypes = [SSLContextRef, POINTER(SSLCipherSuite)] Security.SSLGetNegotiatedCipher.restype = OSStatus Security.SSLGetNegotiatedProtocolVersion.argtypes = [ SSLContextRef, - POINTER(SSLProtocol) + POINTER(SSLProtocol), ] Security.SSLGetNegotiatedProtocolVersion.restype = OSStatus - Security.SSLCopyPeerTrust.argtypes = [ - SSLContextRef, - POINTER(SecTrustRef) - ] + Security.SSLCopyPeerTrust.argtypes = [SSLContextRef, POINTER(SecTrustRef)] Security.SSLCopyPeerTrust.restype = OSStatus - Security.SecTrustSetAnchorCertificates.argtypes = [ - SecTrustRef, - CFArrayRef - ] + Security.SecTrustSetAnchorCertificates.argtypes = [SecTrustRef, CFArrayRef] Security.SecTrustSetAnchorCertificates.restype = OSStatus - Security.SecTrustSetAnchorCertificatesOnly.argstypes = [ - SecTrustRef, - Boolean - ] + Security.SecTrustSetAnchorCertificatesOnly.argstypes = [SecTrustRef, Boolean] Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus - Security.SecTrustEvaluate.argtypes = [ - SecTrustRef, - POINTER(SecTrustResultType) - ] + Security.SecTrustEvaluate.argtypes = [SecTrustRef, POINTER(SecTrustResultType)] Security.SecTrustEvaluate.restype = OSStatus - Security.SecTrustGetCertificateCount.argtypes = [ - SecTrustRef - ] + Security.SecTrustGetCertificateCount.argtypes = [SecTrustRef] Security.SecTrustGetCertificateCount.restype = CFIndex - Security.SecTrustGetCertificateAtIndex.argtypes = [ - SecTrustRef, - CFIndex - ] + Security.SecTrustGetCertificateAtIndex.argtypes = [SecTrustRef, CFIndex] Security.SecTrustGetCertificateAtIndex.restype = SecCertificateRef Security.SSLCreateContext.argtypes = [ CFAllocatorRef, SSLProtocolSide, - SSLConnectionType + SSLConnectionType, ] Security.SSLCreateContext.restype = SSLContextRef - Security.SSLSetSessionOption.argtypes = [ - SSLContextRef, - SSLSessionOption, - Boolean - ] + Security.SSLSetSessionOption.argtypes = [SSLContextRef, SSLSessionOption, Boolean] Security.SSLSetSessionOption.restype = OSStatus - Security.SSLSetProtocolVersionMin.argtypes = [ - SSLContextRef, - SSLProtocol - ] + Security.SSLSetProtocolVersionMin.argtypes = [SSLContextRef, SSLProtocol] Security.SSLSetProtocolVersionMin.restype = OSStatus - Security.SSLSetProtocolVersionMax.argtypes = [ - SSLContextRef, - SSLProtocol - ] + Security.SSLSetProtocolVersionMax.argtypes = [SSLContextRef, SSLProtocol] Security.SSLSetProtocolVersionMax.restype = OSStatus - Security.SecCopyErrorMessageString.argtypes = [ - OSStatus, - c_void_p - ] + Security.SecCopyErrorMessageString.argtypes = [OSStatus, c_void_p] Security.SecCopyErrorMessageString.restype = CFStringRef Security.SSLReadFunc = SSLReadFunc @@ -369,64 +292,47 @@ Security.OSStatus = OSStatus Security.kSecImportExportPassphrase = CFStringRef.in_dll( - Security, 'kSecImportExportPassphrase' + Security, "kSecImportExportPassphrase" ) Security.kSecImportItemIdentity = CFStringRef.in_dll( - Security, 'kSecImportItemIdentity' + Security, "kSecImportItemIdentity" ) # CoreFoundation time! - CoreFoundation.CFRetain.argtypes = [ - CFTypeRef - ] + CoreFoundation.CFRetain.argtypes = [CFTypeRef] CoreFoundation.CFRetain.restype = CFTypeRef - CoreFoundation.CFRelease.argtypes = [ - CFTypeRef - ] + CoreFoundation.CFRelease.argtypes = [CFTypeRef] CoreFoundation.CFRelease.restype = None - CoreFoundation.CFGetTypeID.argtypes = [ - CFTypeRef - ] + CoreFoundation.CFGetTypeID.argtypes = [CFTypeRef] CoreFoundation.CFGetTypeID.restype = CFTypeID CoreFoundation.CFStringCreateWithCString.argtypes = [ CFAllocatorRef, c_char_p, - CFStringEncoding + CFStringEncoding, ] CoreFoundation.CFStringCreateWithCString.restype = CFStringRef - CoreFoundation.CFStringGetCStringPtr.argtypes = [ - CFStringRef, - CFStringEncoding - ] + CoreFoundation.CFStringGetCStringPtr.argtypes = [CFStringRef, CFStringEncoding] CoreFoundation.CFStringGetCStringPtr.restype = c_char_p CoreFoundation.CFStringGetCString.argtypes = [ CFStringRef, c_char_p, CFIndex, - CFStringEncoding + CFStringEncoding, ] CoreFoundation.CFStringGetCString.restype = c_bool - CoreFoundation.CFDataCreate.argtypes = [ - CFAllocatorRef, - c_char_p, - CFIndex - ] + CoreFoundation.CFDataCreate.argtypes = [CFAllocatorRef, c_char_p, CFIndex] CoreFoundation.CFDataCreate.restype = CFDataRef - CoreFoundation.CFDataGetLength.argtypes = [ - CFDataRef - ] + CoreFoundation.CFDataGetLength.argtypes = [CFDataRef] CoreFoundation.CFDataGetLength.restype = CFIndex - CoreFoundation.CFDataGetBytePtr.argtypes = [ - CFDataRef - ] + CoreFoundation.CFDataGetBytePtr.argtypes = [CFDataRef] CoreFoundation.CFDataGetBytePtr.restype = c_void_p CoreFoundation.CFDictionaryCreate.argtypes = [ @@ -435,14 +341,11 @@ POINTER(CFTypeRef), CFIndex, CFDictionaryKeyCallBacks, - CFDictionaryValueCallBacks + CFDictionaryValueCallBacks, ] CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef - CoreFoundation.CFDictionaryGetValue.argtypes = [ - CFDictionaryRef, - CFTypeRef - ] + CoreFoundation.CFDictionaryGetValue.argtypes = [CFDictionaryRef, CFTypeRef] CoreFoundation.CFDictionaryGetValue.restype = CFTypeRef CoreFoundation.CFArrayCreate.argtypes = [ @@ -456,36 +359,30 @@ CoreFoundation.CFArrayCreateMutable.argtypes = [ CFAllocatorRef, CFIndex, - CFArrayCallBacks + CFArrayCallBacks, ] CoreFoundation.CFArrayCreateMutable.restype = CFMutableArrayRef - CoreFoundation.CFArrayAppendValue.argtypes = [ - CFMutableArrayRef, - c_void_p - ] + CoreFoundation.CFArrayAppendValue.argtypes = [CFMutableArrayRef, c_void_p] CoreFoundation.CFArrayAppendValue.restype = None - CoreFoundation.CFArrayGetCount.argtypes = [ - CFArrayRef - ] + CoreFoundation.CFArrayGetCount.argtypes = [CFArrayRef] CoreFoundation.CFArrayGetCount.restype = CFIndex - CoreFoundation.CFArrayGetValueAtIndex.argtypes = [ - CFArrayRef, - CFIndex - ] + CoreFoundation.CFArrayGetValueAtIndex.argtypes = [CFArrayRef, CFIndex] CoreFoundation.CFArrayGetValueAtIndex.restype = c_void_p CoreFoundation.kCFAllocatorDefault = CFAllocatorRef.in_dll( - CoreFoundation, 'kCFAllocatorDefault' + CoreFoundation, "kCFAllocatorDefault" + ) + CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll( + CoreFoundation, "kCFTypeArrayCallBacks" ) - CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll(CoreFoundation, 'kCFTypeArrayCallBacks') CoreFoundation.kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll( - CoreFoundation, 'kCFTypeDictionaryKeyCallBacks' + CoreFoundation, "kCFTypeDictionaryKeyCallBacks" ) CoreFoundation.kCFTypeDictionaryValueCallBacks = c_void_p.in_dll( - CoreFoundation, 'kCFTypeDictionaryValueCallBacks' + CoreFoundation, "kCFTypeDictionaryValueCallBacks" ) CoreFoundation.CFTypeRef = CFTypeRef @@ -494,7 +391,7 @@ CoreFoundation.CFDictionaryRef = CFDictionaryRef except (AttributeError): - raise ImportError('Error initializing ctypes') + raise ImportError("Error initializing ctypes") class CFConst(object): @@ -502,6 +399,7 @@ class CFConst(object): A class object that acts as essentially a namespace for CoreFoundation constants. """ + kCFStringEncodingUTF8 = CFStringEncoding(0x08000100) @@ -509,6 +407,7 @@ class SecurityConst(object): """ A class object that acts as essentially a namespace for Security constants. """ + kSSLSessionOptionBreakOnServerAuth = 0 kSSLProtocol2 = 1 @@ -516,6 +415,7 @@ class SecurityConst(object): kTLSProtocol1 = 4 kTLSProtocol11 = 7 kTLSProtocol12 = 8 + # SecureTransport does not support TLS 1.3 even if there's a constant for it kTLSProtocol13 = 10 kTLSProtocolMaxSupported = 999 diff --git a/pipenv/vendor/urllib3/contrib/_securetransport/low_level.py b/pipenv/vendor/urllib3/contrib/_securetransport/low_level.py index b13cd9e72c..e60168cac1 100644 --- a/pipenv/vendor/urllib3/contrib/_securetransport/low_level.py +++ b/pipenv/vendor/urllib3/contrib/_securetransport/low_level.py @@ -66,22 +66,18 @@ def _cf_string_to_unicode(value): value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p)) string = CoreFoundation.CFStringGetCStringPtr( - value_as_void_p, - CFConst.kCFStringEncodingUTF8 + value_as_void_p, CFConst.kCFStringEncodingUTF8 ) if string is None: buffer = ctypes.create_string_buffer(1024) result = CoreFoundation.CFStringGetCString( - value_as_void_p, - buffer, - 1024, - CFConst.kCFStringEncodingUTF8 + value_as_void_p, buffer, 1024, CFConst.kCFStringEncodingUTF8 ) if not result: - raise OSError('Error copying C string from CFStringRef') + raise OSError("Error copying C string from CFStringRef") string = buffer.value if string is not None: - string = string.decode('utf-8') + string = string.decode("utf-8") return string @@ -97,8 +93,8 @@ def _assert_no_error(error, exception_class=None): output = _cf_string_to_unicode(cf_error_string) CoreFoundation.CFRelease(cf_error_string) - if output is None or output == u'': - output = u'OSStatus %s' % error + if output is None or output == u"": + output = u"OSStatus %s" % error if exception_class is None: exception_class = ssl.SSLError @@ -115,8 +111,7 @@ def _cert_array_from_pem(pem_bundle): pem_bundle = pem_bundle.replace(b"\r\n", b"\n") der_certs = [ - base64.b64decode(match.group(1)) - for match in _PEM_CERTS_RE.finditer(pem_bundle) + base64.b64decode(match.group(1)) for match in _PEM_CERTS_RE.finditer(pem_bundle) ] if not der_certs: raise ssl.SSLError("No root certificates specified") @@ -124,7 +119,7 @@ def _cert_array_from_pem(pem_bundle): cert_array = CoreFoundation.CFArrayCreateMutable( CoreFoundation.kCFAllocatorDefault, 0, - ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks) + ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks), ) if not cert_array: raise ssl.SSLError("Unable to allocate memory!") @@ -186,21 +181,16 @@ def _temporary_keychain(): # some random bytes to password-protect the keychain we're creating, so we # ask for 40 random bytes. random_bytes = os.urandom(40) - filename = base64.b16encode(random_bytes[:8]).decode('utf-8') + filename = base64.b16encode(random_bytes[:8]).decode("utf-8") password = base64.b16encode(random_bytes[8:]) # Must be valid UTF-8 tempdirectory = tempfile.mkdtemp() - keychain_path = os.path.join(tempdirectory, filename).encode('utf-8') + keychain_path = os.path.join(tempdirectory, filename).encode("utf-8") # We now want to create the keychain itself. keychain = Security.SecKeychainRef() status = Security.SecKeychainCreate( - keychain_path, - len(password), - password, - False, - None, - ctypes.byref(keychain) + keychain_path, len(password), password, False, None, ctypes.byref(keychain) ) _assert_no_error(status) @@ -219,14 +209,12 @@ def _load_items_from_file(keychain, path): identities = [] result_array = None - with open(path, 'rb') as f: + with open(path, "rb") as f: raw_filedata = f.read() try: filedata = CoreFoundation.CFDataCreate( - CoreFoundation.kCFAllocatorDefault, - raw_filedata, - len(raw_filedata) + CoreFoundation.kCFAllocatorDefault, raw_filedata, len(raw_filedata) ) result_array = CoreFoundation.CFArrayRef() result = Security.SecItemImport( @@ -237,7 +225,7 @@ def _load_items_from_file(keychain, path): 0, # import flags None, # key params, can include passphrase in the future keychain, # The keychain to insert into - ctypes.byref(result_array) # Results + ctypes.byref(result_array), # Results ) _assert_no_error(result) @@ -247,9 +235,7 @@ def _load_items_from_file(keychain, path): # keychain already has them! result_count = CoreFoundation.CFArrayGetCount(result_array) for index in range(result_count): - item = CoreFoundation.CFArrayGetValueAtIndex( - result_array, index - ) + item = CoreFoundation.CFArrayGetValueAtIndex(result_array, index) item = ctypes.cast(item, CoreFoundation.CFTypeRef) if _is_cert(item): @@ -307,9 +293,7 @@ def _load_client_cert_chain(keychain, *paths): try: for file_path in paths: - new_identities, new_certs = _load_items_from_file( - keychain, file_path - ) + new_identities, new_certs = _load_items_from_file(keychain, file_path) identities.extend(new_identities) certificates.extend(new_certs) @@ -318,9 +302,7 @@ def _load_client_cert_chain(keychain, *paths): if not identities: new_identity = Security.SecIdentityRef() status = Security.SecIdentityCreateWithCertificate( - keychain, - certificates[0], - ctypes.byref(new_identity) + keychain, certificates[0], ctypes.byref(new_identity) ) _assert_no_error(status) identities.append(new_identity) diff --git a/pipenv/vendor/urllib3/contrib/appengine.py b/pipenv/vendor/urllib3/contrib/appengine.py index 2952f114df..9b7044ffb0 100644 --- a/pipenv/vendor/urllib3/contrib/appengine.py +++ b/pipenv/vendor/urllib3/contrib/appengine.py @@ -50,7 +50,7 @@ MaxRetryError, ProtocolError, TimeoutError, - SSLError + SSLError, ) from ..request import RequestMethods @@ -96,23 +96,24 @@ class AppEngineManager(RequestMethods): Beyond those cases, it will raise normal urllib3 errors. """ - def __init__(self, headers=None, retries=None, validate_certificate=True, - urlfetch_retries=True): + def __init__( + self, + headers=None, + retries=None, + validate_certificate=True, + urlfetch_retries=True, + ): if not urlfetch: raise AppEnginePlatformError( - "URLFetch is not available in this environment.") - - if is_prod_appengine_mvms(): - raise AppEnginePlatformError( - "Use normal urllib3.PoolManager instead of AppEngineManager" - "on Managed VMs, as using URLFetch is not necessary in " - "this environment.") + "URLFetch is not available in this environment." + ) warnings.warn( "urllib3 is using URLFetch on Google App Engine sandbox instead " "of sockets. To use sockets directly instead of URLFetch see " "https://urllib3.readthedocs.io/en/latest/reference/urllib3.contrib.html.", - AppEnginePlatformWarning) + AppEnginePlatformWarning, + ) RequestMethods.__init__(self, headers) self.validate_certificate = validate_certificate @@ -127,17 +128,22 @@ def __exit__(self, exc_type, exc_val, exc_tb): # Return False to re-raise any potential exceptions return False - def urlopen(self, method, url, body=None, headers=None, - retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT, - **response_kw): + def urlopen( + self, + method, + url, + body=None, + headers=None, + retries=None, + redirect=True, + timeout=Timeout.DEFAULT_TIMEOUT, + **response_kw + ): retries = self._get_retries(retries, redirect) try: - follow_redirects = ( - redirect and - retries.redirect != 0 and - retries.total) + follow_redirects = redirect and retries.redirect != 0 and retries.total response = urlfetch.fetch( url, payload=body, @@ -152,44 +158,52 @@ def urlopen(self, method, url, body=None, headers=None, raise TimeoutError(self, e) except urlfetch.InvalidURLError as e: - if 'too large' in str(e): + if "too large" in str(e): raise AppEnginePlatformError( "URLFetch request too large, URLFetch only " - "supports requests up to 10mb in size.", e) + "supports requests up to 10mb in size.", + e, + ) raise ProtocolError(e) except urlfetch.DownloadError as e: - if 'Too many redirects' in str(e): + if "Too many redirects" in str(e): raise MaxRetryError(self, url, reason=e) raise ProtocolError(e) except urlfetch.ResponseTooLargeError as e: raise AppEnginePlatformError( "URLFetch response too large, URLFetch only supports" - "responses up to 32mb in size.", e) + "responses up to 32mb in size.", + e, + ) except urlfetch.SSLCertificateError as e: raise SSLError(e) except urlfetch.InvalidMethodError as e: raise AppEnginePlatformError( - "URLFetch does not support method: %s" % method, e) + "URLFetch does not support method: %s" % method, e + ) http_response = self._urlfetch_response_to_http_response( - response, retries=retries, **response_kw) + response, retries=retries, **response_kw + ) # Handle redirect? redirect_location = redirect and http_response.get_redirect_location() if redirect_location: # Check for redirect response - if (self.urlfetch_retries and retries.raise_on_redirect): + if self.urlfetch_retries and retries.raise_on_redirect: raise MaxRetryError(self, url, "too many redirects") else: if http_response.status == 303: - method = 'GET' + method = "GET" try: - retries = retries.increment(method, url, response=http_response, _pool=self) + retries = retries.increment( + method, url, response=http_response, _pool=self + ) except MaxRetryError: if retries.raise_on_redirect: raise MaxRetryError(self, url, "too many redirects") @@ -199,22 +213,32 @@ def urlopen(self, method, url, body=None, headers=None, log.debug("Redirecting %s -> %s", url, redirect_location) redirect_url = urljoin(url, redirect_location) return self.urlopen( - method, redirect_url, body, headers, - retries=retries, redirect=redirect, - timeout=timeout, **response_kw) + method, + redirect_url, + body, + headers, + retries=retries, + redirect=redirect, + timeout=timeout, + **response_kw + ) # Check if we should retry the HTTP response. - has_retry_after = bool(http_response.getheader('Retry-After')) + has_retry_after = bool(http_response.getheader("Retry-After")) if retries.is_retry(method, http_response.status, has_retry_after): - retries = retries.increment( - method, url, response=http_response, _pool=self) + retries = retries.increment(method, url, response=http_response, _pool=self) log.debug("Retry: %s", url) retries.sleep(http_response) return self.urlopen( - method, url, - body=body, headers=headers, - retries=retries, redirect=redirect, - timeout=timeout, **response_kw) + method, + url, + body=body, + headers=headers, + retries=retries, + redirect=redirect, + timeout=timeout, + **response_kw + ) return http_response @@ -223,18 +247,18 @@ def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw): if is_prod_appengine(): # Production GAE handles deflate encoding automatically, but does # not remove the encoding header. - content_encoding = urlfetch_resp.headers.get('content-encoding') + content_encoding = urlfetch_resp.headers.get("content-encoding") - if content_encoding == 'deflate': - del urlfetch_resp.headers['content-encoding'] + if content_encoding == "deflate": + del urlfetch_resp.headers["content-encoding"] - transfer_encoding = urlfetch_resp.headers.get('transfer-encoding') + transfer_encoding = urlfetch_resp.headers.get("transfer-encoding") # We have a full response's content, # so let's make sure we don't report ourselves as chunked data. - if transfer_encoding == 'chunked': + if transfer_encoding == "chunked": encodings = transfer_encoding.split(",") - encodings.remove('chunked') - urlfetch_resp.headers['transfer-encoding'] = ','.join(encodings) + encodings.remove("chunked") + urlfetch_resp.headers["transfer-encoding"] = ",".join(encodings) original_response = HTTPResponse( # In order for decoding to work, we must present the content as @@ -262,20 +286,21 @@ def _get_absolute_timeout(self, timeout): warnings.warn( "URLFetch does not support granular timeout settings, " "reverting to total or default URLFetch timeout.", - AppEnginePlatformWarning) + AppEnginePlatformWarning, + ) return timeout.total return timeout def _get_retries(self, retries, redirect): if not isinstance(retries, Retry): - retries = Retry.from_int( - retries, redirect=redirect, default=self.retries) + retries = Retry.from_int(retries, redirect=redirect, default=self.retries) if retries.connect or retries.read or retries.redirect: warnings.warn( "URLFetch only supports total retries and does not " "recognize connect, read, or redirect retry parameters.", - AppEnginePlatformWarning) + AppEnginePlatformWarning, + ) return retries diff --git a/pipenv/vendor/urllib3/contrib/ntlmpool.py b/pipenv/vendor/urllib3/contrib/ntlmpool.py index 8ea127c583..1fd242a6e0 100644 --- a/pipenv/vendor/urllib3/contrib/ntlmpool.py +++ b/pipenv/vendor/urllib3/contrib/ntlmpool.py @@ -20,7 +20,7 @@ class NTLMConnectionPool(HTTPSConnectionPool): Implements an NTLM authentication version of an urllib3 connection pool """ - scheme = 'https' + scheme = "https" def __init__(self, user, pw, authurl, *args, **kwargs): """ @@ -31,7 +31,7 @@ def __init__(self, user, pw, authurl, *args, **kwargs): super(NTLMConnectionPool, self).__init__(*args, **kwargs) self.authurl = authurl self.rawuser = user - user_parts = user.split('\\', 1) + user_parts = user.split("\\", 1) self.domain = user_parts[0].upper() self.user = user_parts[1] self.pw = pw @@ -40,72 +40,82 @@ def _new_conn(self): # Performs the NTLM handshake that secures the connection. The socket # must be kept open while requests are performed. self.num_connections += 1 - log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s', - self.num_connections, self.host, self.authurl) + log.debug( + "Starting NTLM HTTPS connection no. %d: https://%s%s", + self.num_connections, + self.host, + self.authurl, + ) - headers = {'Connection': 'Keep-Alive'} - req_header = 'Authorization' - resp_header = 'www-authenticate' + headers = {"Connection": "Keep-Alive"} + req_header = "Authorization" + resp_header = "www-authenticate" conn = HTTPSConnection(host=self.host, port=self.port) # Send negotiation message - headers[req_header] = ( - 'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser)) - log.debug('Request headers: %s', headers) - conn.request('GET', self.authurl, None, headers) + headers[req_header] = "NTLM %s" % ntlm.create_NTLM_NEGOTIATE_MESSAGE( + self.rawuser + ) + log.debug("Request headers: %s", headers) + conn.request("GET", self.authurl, None, headers) res = conn.getresponse() reshdr = dict(res.getheaders()) - log.debug('Response status: %s %s', res.status, res.reason) - log.debug('Response headers: %s', reshdr) - log.debug('Response data: %s [...]', res.read(100)) + log.debug("Response status: %s %s", res.status, res.reason) + log.debug("Response headers: %s", reshdr) + log.debug("Response data: %s [...]", res.read(100)) # Remove the reference to the socket, so that it can not be closed by # the response object (we want to keep the socket open) res.fp = None # Server should respond with a challenge message - auth_header_values = reshdr[resp_header].split(', ') + auth_header_values = reshdr[resp_header].split(", ") auth_header_value = None for s in auth_header_values: - if s[:5] == 'NTLM ': + if s[:5] == "NTLM ": auth_header_value = s[5:] if auth_header_value is None: - raise Exception('Unexpected %s response header: %s' % - (resp_header, reshdr[resp_header])) + raise Exception( + "Unexpected %s response header: %s" % (resp_header, reshdr[resp_header]) + ) # Send authentication message - ServerChallenge, NegotiateFlags = \ - ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value) - auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge, - self.user, - self.domain, - self.pw, - NegotiateFlags) - headers[req_header] = 'NTLM %s' % auth_msg - log.debug('Request headers: %s', headers) - conn.request('GET', self.authurl, None, headers) + ServerChallenge, NegotiateFlags = ntlm.parse_NTLM_CHALLENGE_MESSAGE( + auth_header_value + ) + auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE( + ServerChallenge, self.user, self.domain, self.pw, NegotiateFlags + ) + headers[req_header] = "NTLM %s" % auth_msg + log.debug("Request headers: %s", headers) + conn.request("GET", self.authurl, None, headers) res = conn.getresponse() - log.debug('Response status: %s %s', res.status, res.reason) - log.debug('Response headers: %s', dict(res.getheaders())) - log.debug('Response data: %s [...]', res.read()[:100]) + log.debug("Response status: %s %s", res.status, res.reason) + log.debug("Response headers: %s", dict(res.getheaders())) + log.debug("Response data: %s [...]", res.read()[:100]) if res.status != 200: if res.status == 401: - raise Exception('Server rejected request: wrong ' - 'username or password') - raise Exception('Wrong server response: %s %s' % - (res.status, res.reason)) + raise Exception("Server rejected request: wrong username or password") + raise Exception("Wrong server response: %s %s" % (res.status, res.reason)) res.fp = None - log.debug('Connection established') + log.debug("Connection established") return conn - def urlopen(self, method, url, body=None, headers=None, retries=3, - redirect=True, assert_same_host=True): + def urlopen( + self, + method, + url, + body=None, + headers=None, + retries=3, + redirect=True, + assert_same_host=True, + ): if headers is None: headers = {} - headers['Connection'] = 'Keep-Alive' - return super(NTLMConnectionPool, self).urlopen(method, url, body, - headers, retries, - redirect, - assert_same_host) + headers["Connection"] = "Keep-Alive" + return super(NTLMConnectionPool, self).urlopen( + method, url, body, headers, retries, redirect, assert_same_host + ) diff --git a/pipenv/vendor/urllib3/contrib/pyopenssl.py b/pipenv/vendor/urllib3/contrib/pyopenssl.py index 821c174fdc..3051ef3af2 100644 --- a/pipenv/vendor/urllib3/contrib/pyopenssl.py +++ b/pipenv/vendor/urllib3/contrib/pyopenssl.py @@ -47,6 +47,7 @@ from cryptography import x509 from cryptography.hazmat.backends.openssl import backend as openssl_backend from cryptography.hazmat.backends.openssl.x509 import _Certificate + try: from cryptography.x509 import UnsupportedExtension except ImportError: @@ -54,6 +55,7 @@ class UnsupportedExtension(Exception): pass + from socket import timeout, error as SocketError from io import BytesIO @@ -71,7 +73,7 @@ class UnsupportedExtension(Exception): from .. import util -__all__ = ['inject_into_urllib3', 'extract_from_urllib3'] +__all__ = ["inject_into_urllib3", "extract_from_urllib3"] # SNI always works. HAS_SNI = True @@ -82,25 +84,23 @@ class UnsupportedExtension(Exception): ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD, } -if hasattr(ssl, 'PROTOCOL_SSLv3') and hasattr(OpenSSL.SSL, 'SSLv3_METHOD'): +if hasattr(ssl, "PROTOCOL_SSLv3") and hasattr(OpenSSL.SSL, "SSLv3_METHOD"): _openssl_versions[ssl.PROTOCOL_SSLv3] = OpenSSL.SSL.SSLv3_METHOD -if hasattr(ssl, 'PROTOCOL_TLSv1_1') and hasattr(OpenSSL.SSL, 'TLSv1_1_METHOD'): +if hasattr(ssl, "PROTOCOL_TLSv1_1") and hasattr(OpenSSL.SSL, "TLSv1_1_METHOD"): _openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD -if hasattr(ssl, 'PROTOCOL_TLSv1_2') and hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD'): +if hasattr(ssl, "PROTOCOL_TLSv1_2") and hasattr(OpenSSL.SSL, "TLSv1_2_METHOD"): _openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD _stdlib_to_openssl_verify = { ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE, ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER, - ssl.CERT_REQUIRED: - OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT, + ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER + + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT, } -_openssl_to_stdlib_verify = dict( - (v, k) for k, v in _stdlib_to_openssl_verify.items() -) +_openssl_to_stdlib_verify = dict((v, k) for k, v in _stdlib_to_openssl_verify.items()) # OpenSSL will only write 16K at a time SSL_WRITE_BLOCKSIZE = 16384 @@ -113,7 +113,7 @@ class UnsupportedExtension(Exception): def inject_into_urllib3(): - 'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.' + "Monkey-patch urllib3 with PyOpenSSL-backed SSL-support." _validate_dependencies_met() @@ -126,7 +126,7 @@ def inject_into_urllib3(): def extract_from_urllib3(): - 'Undo monkey-patching by :func:`inject_into_urllib3`.' + "Undo monkey-patching by :func:`inject_into_urllib3`." util.SSLContext = orig_util_SSLContext util.ssl_.SSLContext = orig_util_SSLContext @@ -143,17 +143,23 @@ def _validate_dependencies_met(): """ # Method added in `cryptography==1.1`; not available in older versions from cryptography.x509.extensions import Extensions + if getattr(Extensions, "get_extension_for_class", None) is None: - raise ImportError("'cryptography' module missing required functionality. " - "Try upgrading to v1.3.4 or newer.") + raise ImportError( + "'cryptography' module missing required functionality. " + "Try upgrading to v1.3.4 or newer." + ) # pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509 # attribute is only present on those versions. from OpenSSL.crypto import X509 + x509 = X509() if getattr(x509, "_x509", None) is None: - raise ImportError("'pyOpenSSL' module missing required functionality. " - "Try upgrading to v0.14 or newer.") + raise ImportError( + "'pyOpenSSL' module missing required functionality. " + "Try upgrading to v0.14 or newer." + ) def _dnsname_to_stdlib(name): @@ -169,6 +175,7 @@ def _dnsname_to_stdlib(name): If the name cannot be idna-encoded then we return None signalling that the name given should be skipped. """ + def idna_encode(name): """ Borrowed wholesale from the Python Cryptography Project. It turns out @@ -178,23 +185,23 @@ def idna_encode(name): import idna try: - for prefix in [u'*.', u'.']: + for prefix in [u"*.", u"."]: if name.startswith(prefix): - name = name[len(prefix):] - return prefix.encode('ascii') + idna.encode(name) + name = name[len(prefix) :] + return prefix.encode("ascii") + idna.encode(name) return idna.encode(name) except idna.core.IDNAError: return None # Don't send IPv6 addresses through the IDNA encoder. - if ':' in name: + if ":" in name: return name name = idna_encode(name) if name is None: return None elif sys.version_info >= (3, 0): - name = name.decode('utf-8') + name = name.decode("utf-8") return name @@ -213,14 +220,16 @@ def get_subj_alt_name(peer_cert): # We want to find the SAN extension. Ask Cryptography to locate it (it's # faster than looping in Python) try: - ext = cert.extensions.get_extension_for_class( - x509.SubjectAlternativeName - ).value + ext = cert.extensions.get_extension_for_class(x509.SubjectAlternativeName).value except x509.ExtensionNotFound: # No such extension, return the empty list. return [] - except (x509.DuplicateExtension, UnsupportedExtension, - x509.UnsupportedGeneralNameType, UnicodeError) as e: + except ( + x509.DuplicateExtension, + UnsupportedExtension, + x509.UnsupportedGeneralNameType, + UnicodeError, + ) as e: # A problem has been found with the quality of the certificate. Assume # no SAN field is present. log.warning( @@ -239,23 +248,23 @@ def get_subj_alt_name(peer_cert): # does with certificates, and so we need to attempt to do the same. # We also want to skip over names which cannot be idna encoded. names = [ - ('DNS', name) for name in map(_dnsname_to_stdlib, ext.get_values_for_type(x509.DNSName)) + ("DNS", name) + for name in map(_dnsname_to_stdlib, ext.get_values_for_type(x509.DNSName)) if name is not None ] names.extend( - ('IP Address', str(name)) - for name in ext.get_values_for_type(x509.IPAddress) + ("IP Address", str(name)) for name in ext.get_values_for_type(x509.IPAddress) ) return names class WrappedSocket(object): - '''API-compatibility wrapper for Python OpenSSL's Connection-class. + """API-compatibility wrapper for Python OpenSSL's Connection-class. Note: _makefile_refs, _drop() and _reuse() are needed for the garbage collector of pypy. - ''' + """ def __init__(self, connection, socket, suppress_ragged_eofs=True): self.connection = connection @@ -278,18 +287,18 @@ def recv(self, *args, **kwargs): try: data = self.connection.recv(*args, **kwargs) except OpenSSL.SSL.SysCallError as e: - if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'): - return b'' + if self.suppress_ragged_eofs and e.args == (-1, "Unexpected EOF"): + return b"" else: raise SocketError(str(e)) except OpenSSL.SSL.ZeroReturnError: if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN: - return b'' + return b"" else: raise except OpenSSL.SSL.WantReadError: if not util.wait_for_read(self.socket, self.socket.gettimeout()): - raise timeout('The read operation timed out') + raise timeout("The read operation timed out") else: return self.recv(*args, **kwargs) @@ -303,7 +312,7 @@ def recv_into(self, *args, **kwargs): try: return self.connection.recv_into(*args, **kwargs) except OpenSSL.SSL.SysCallError as e: - if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'): + if self.suppress_ragged_eofs and e.args == (-1, "Unexpected EOF"): return 0 else: raise SocketError(str(e)) @@ -314,7 +323,7 @@ def recv_into(self, *args, **kwargs): raise except OpenSSL.SSL.WantReadError: if not util.wait_for_read(self.socket, self.socket.gettimeout()): - raise timeout('The read operation timed out') + raise timeout("The read operation timed out") else: return self.recv_into(*args, **kwargs) @@ -339,7 +348,9 @@ def _send_until_done(self, data): def sendall(self, data): total_sent = 0 while total_sent < len(data): - sent = self._send_until_done(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE]) + sent = self._send_until_done( + data[total_sent : total_sent + SSL_WRITE_BLOCKSIZE] + ) total_sent += sent def shutdown(self): @@ -363,15 +374,11 @@ def getpeercert(self, binary_form=False): return x509 if binary_form: - return OpenSSL.crypto.dump_certificate( - OpenSSL.crypto.FILETYPE_ASN1, - x509) + return OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_ASN1, x509) return { - 'subject': ( - (('commonName', x509.get_subject().CN),), - ), - 'subjectAltName': get_subj_alt_name(x509) + "subject": ((("commonName", x509.get_subject().CN),),), + "subjectAltName": get_subj_alt_name(x509), } def version(self): @@ -388,9 +395,12 @@ def _drop(self): if _fileobject: # Platform-specific: Python 2 + def makefile(self, mode, bufsize=-1): self._makefile_refs += 1 return _fileobject(self, mode, bufsize, close=True) + + else: # Platform-specific: Python 3 makefile = backport_makefile @@ -403,6 +413,7 @@ class PyOpenSSLContext(object): for translating the interface of the standard library ``SSLContext`` object to calls into PyOpenSSL. """ + def __init__(self, protocol): self.protocol = _openssl_versions[protocol] self._ctx = OpenSSL.SSL.Context(self.protocol) @@ -424,24 +435,21 @@ def verify_mode(self): @verify_mode.setter def verify_mode(self, value): - self._ctx.set_verify( - _stdlib_to_openssl_verify[value], - _verify_callback - ) + self._ctx.set_verify(_stdlib_to_openssl_verify[value], _verify_callback) def set_default_verify_paths(self): self._ctx.set_default_verify_paths() def set_ciphers(self, ciphers): if isinstance(ciphers, six.text_type): - ciphers = ciphers.encode('utf-8') + ciphers = ciphers.encode("utf-8") self._ctx.set_cipher_list(ciphers) def load_verify_locations(self, cafile=None, capath=None, cadata=None): if cafile is not None: - cafile = cafile.encode('utf-8') + cafile = cafile.encode("utf-8") if capath is not None: - capath = capath.encode('utf-8') + capath = capath.encode("utf-8") self._ctx.load_verify_locations(cafile, capath) if cadata is not None: self._ctx.load_verify_locations(BytesIO(cadata)) @@ -450,17 +458,22 @@ def load_cert_chain(self, certfile, keyfile=None, password=None): self._ctx.use_certificate_chain_file(certfile) if password is not None: if not isinstance(password, six.binary_type): - password = password.encode('utf-8') + password = password.encode("utf-8") self._ctx.set_passwd_cb(lambda *_: password) self._ctx.use_privatekey_file(keyfile or certfile) - def wrap_socket(self, sock, server_side=False, - do_handshake_on_connect=True, suppress_ragged_eofs=True, - server_hostname=None): + def wrap_socket( + self, + sock, + server_side=False, + do_handshake_on_connect=True, + suppress_ragged_eofs=True, + server_hostname=None, + ): cnx = OpenSSL.SSL.Connection(self._ctx, sock) if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3 - server_hostname = server_hostname.encode('utf-8') + server_hostname = server_hostname.encode("utf-8") if server_hostname is not None: cnx.set_tlsext_host_name(server_hostname) @@ -472,10 +485,10 @@ def wrap_socket(self, sock, server_side=False, cnx.do_handshake() except OpenSSL.SSL.WantReadError: if not util.wait_for_read(sock, sock.gettimeout()): - raise timeout('select timed out') + raise timeout("select timed out") continue except OpenSSL.SSL.Error as e: - raise ssl.SSLError('bad handshake: %r' % e) + raise ssl.SSLError("bad handshake: %r" % e) break return WrappedSocket(cnx, sock) diff --git a/pipenv/vendor/urllib3/contrib/securetransport.py b/pipenv/vendor/urllib3/contrib/securetransport.py index 4dc4848416..87d844afa7 100644 --- a/pipenv/vendor/urllib3/contrib/securetransport.py +++ b/pipenv/vendor/urllib3/contrib/securetransport.py @@ -62,12 +62,12 @@ import weakref from .. import util -from ._securetransport.bindings import ( - Security, SecurityConst, CoreFoundation -) +from ._securetransport.bindings import Security, SecurityConst, CoreFoundation from ._securetransport.low_level import ( - _assert_no_error, _cert_array_from_pem, _temporary_keychain, - _load_client_cert_chain + _assert_no_error, + _cert_array_from_pem, + _temporary_keychain, + _load_client_cert_chain, ) try: # Platform-specific: Python 2 @@ -76,7 +76,7 @@ _fileobject = None from ..packages.backports.makefile import backport_makefile -__all__ = ['inject_into_urllib3', 'extract_from_urllib3'] +__all__ = ["inject_into_urllib3", "extract_from_urllib3"] # SNI always works HAS_SNI = True @@ -144,31 +144,36 @@ ] # Basically this is simple: for PROTOCOL_SSLv23 we turn it into a low of -# TLSv1 and a high of TLSv1.3. For everything else, we pin to that version. -# TLSv1 to 1.2 are supported on macOS 10.8+ and TLSv1.3 is macOS 10.13+ +# TLSv1 and a high of TLSv1.2. For everything else, we pin to that version. +# TLSv1 to 1.2 are supported on macOS 10.8+ _protocol_to_min_max = { - util.PROTOCOL_TLS: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocolMaxSupported), + util.PROTOCOL_TLS: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12) } if hasattr(ssl, "PROTOCOL_SSLv2"): _protocol_to_min_max[ssl.PROTOCOL_SSLv2] = ( - SecurityConst.kSSLProtocol2, SecurityConst.kSSLProtocol2 + SecurityConst.kSSLProtocol2, + SecurityConst.kSSLProtocol2, ) if hasattr(ssl, "PROTOCOL_SSLv3"): _protocol_to_min_max[ssl.PROTOCOL_SSLv3] = ( - SecurityConst.kSSLProtocol3, SecurityConst.kSSLProtocol3 + SecurityConst.kSSLProtocol3, + SecurityConst.kSSLProtocol3, ) if hasattr(ssl, "PROTOCOL_TLSv1"): _protocol_to_min_max[ssl.PROTOCOL_TLSv1] = ( - SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol1 + SecurityConst.kTLSProtocol1, + SecurityConst.kTLSProtocol1, ) if hasattr(ssl, "PROTOCOL_TLSv1_1"): _protocol_to_min_max[ssl.PROTOCOL_TLSv1_1] = ( - SecurityConst.kTLSProtocol11, SecurityConst.kTLSProtocol11 + SecurityConst.kTLSProtocol11, + SecurityConst.kTLSProtocol11, ) if hasattr(ssl, "PROTOCOL_TLSv1_2"): _protocol_to_min_max[ssl.PROTOCOL_TLSv1_2] = ( - SecurityConst.kTLSProtocol12, SecurityConst.kTLSProtocol12 + SecurityConst.kTLSProtocol12, + SecurityConst.kTLSProtocol12, ) @@ -218,7 +223,7 @@ def _read_callback(connection_id, data_buffer, data_length_pointer): while read_count < requested_length: if timeout is None or timeout >= 0: if not util.wait_for_read(base_socket, timeout): - raise socket.error(errno.EAGAIN, 'timed out') + raise socket.error(errno.EAGAIN, "timed out") remaining = requested_length - read_count buffer = (ctypes.c_char * remaining).from_address( @@ -274,7 +279,7 @@ def _write_callback(connection_id, data_buffer, data_length_pointer): while sent < bytes_to_write: if timeout is None or timeout >= 0: if not util.wait_for_write(base_socket, timeout): - raise socket.error(errno.EAGAIN, 'timed out') + raise socket.error(errno.EAGAIN, "timed out") chunk_sent = base_socket.send(data) sent += chunk_sent @@ -316,6 +321,7 @@ class WrappedSocket(object): Note: _makefile_refs, _drop(), and _reuse() are needed for the garbage collector of PyPy. """ + def __init__(self, socket): self.socket = socket self.context = None @@ -380,7 +386,7 @@ def _custom_validate(self, verify, trust_bundle): # We want data in memory, so load it up. if os.path.isfile(trust_bundle): - with open(trust_bundle, 'rb') as f: + with open(trust_bundle, "rb") as f: trust_bundle = f.read() cert_array = None @@ -394,9 +400,7 @@ def _custom_validate(self, verify, trust_bundle): # created for this connection, shove our CAs into it, tell ST to # ignore everything else it knows, and then ask if it can build a # chain. This is a buuuunch of code. - result = Security.SSLCopyPeerTrust( - self.context, ctypes.byref(trust) - ) + result = Security.SSLCopyPeerTrust(self.context, ctypes.byref(trust)) _assert_no_error(result) if not trust: raise ssl.SSLError("Failed to copy trust reference") @@ -408,9 +412,7 @@ def _custom_validate(self, verify, trust_bundle): _assert_no_error(result) trust_result = Security.SecTrustResultType() - result = Security.SecTrustEvaluate( - trust, ctypes.byref(trust_result) - ) + result = Security.SecTrustEvaluate(trust, ctypes.byref(trust_result)) _assert_no_error(result) finally: if trust: @@ -422,23 +424,24 @@ def _custom_validate(self, verify, trust_bundle): # Ok, now we can look at what the result was. successes = ( SecurityConst.kSecTrustResultUnspecified, - SecurityConst.kSecTrustResultProceed + SecurityConst.kSecTrustResultProceed, ) if trust_result.value not in successes: raise ssl.SSLError( - "certificate verify failed, error code: %d" % - trust_result.value + "certificate verify failed, error code: %d" % trust_result.value ) - def handshake(self, - server_hostname, - verify, - trust_bundle, - min_version, - max_version, - client_cert, - client_key, - client_key_passphrase): + def handshake( + self, + server_hostname, + verify, + trust_bundle, + min_version, + max_version, + client_cert, + client_key, + client_key_passphrase, + ): """ Actually performs the TLS handshake. This is run automatically by wrapped socket, and shouldn't be needed in user code. @@ -468,7 +471,7 @@ def handshake(self, # If we have a server hostname, we should set that too. if server_hostname: if not isinstance(server_hostname, bytes): - server_hostname = server_hostname.encode('utf-8') + server_hostname = server_hostname.encode("utf-8") result = Security.SSLSetPeerDomainName( self.context, server_hostname, len(server_hostname) @@ -482,13 +485,7 @@ def handshake(self, result = Security.SSLSetProtocolVersionMin(self.context, min_version) _assert_no_error(result) - # TLS 1.3 isn't necessarily enabled by the OS - # so we have to detect when we error out and try - # setting TLS 1.3 if it's allowed. kTLSProtocolMaxSupported - # was added in macOS 10.13 along with kTLSProtocol13. result = Security.SSLSetProtocolVersionMax(self.context, max_version) - if result != 0 and max_version == SecurityConst.kTLSProtocolMaxSupported: - result = Security.SSLSetProtocolVersionMax(self.context, SecurityConst.kTLSProtocol12) _assert_no_error(result) # If there's a trust DB, we need to use it. We do that by telling @@ -497,9 +494,7 @@ def handshake(self, # authing in that case. if not verify or trust_bundle is not None: result = Security.SSLSetSessionOption( - self.context, - SecurityConst.kSSLSessionOptionBreakOnServerAuth, - True + self.context, SecurityConst.kSSLSessionOptionBreakOnServerAuth, True ) _assert_no_error(result) @@ -509,9 +504,7 @@ def handshake(self, self._client_cert_chain = _load_client_cert_chain( self._keychain, client_cert, client_key ) - result = Security.SSLSetCertificate( - self.context, self._client_cert_chain - ) + result = Security.SSLSetCertificate(self.context, self._client_cert_chain) _assert_no_error(result) while True: @@ -562,7 +555,7 @@ def recv_into(self, buffer, nbytes=None): # There are some result codes that we want to treat as "not always # errors". Specifically, those are errSSLWouldBlock, # errSSLClosedGraceful, and errSSLClosedNoNotify. - if (result == SecurityConst.errSSLWouldBlock): + if result == SecurityConst.errSSLWouldBlock: # If we didn't process any bytes, then this was just a time out. # However, we can get errSSLWouldBlock in situations when we *did* # read some data, and in those cases we should just read "short" @@ -570,7 +563,10 @@ def recv_into(self, buffer, nbytes=None): if processed_bytes.value == 0: # Timed out, no data read. raise socket.timeout("recv timed out") - elif result in (SecurityConst.errSSLClosedGraceful, SecurityConst.errSSLClosedNoNotify): + elif result in ( + SecurityConst.errSSLClosedGraceful, + SecurityConst.errSSLClosedNoNotify, + ): # The remote peer has closed this connection. We should do so as # well. Note that we don't actually return here because in # principle this could actually be fired along with return data. @@ -609,7 +605,7 @@ def send(self, data): def sendall(self, data): total_sent = 0 while total_sent < len(data): - sent = self.send(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE]) + sent = self.send(data[total_sent : total_sent + SSL_WRITE_BLOCKSIZE]) total_sent += sent def shutdown(self): @@ -656,18 +652,14 @@ def getpeercert(self, binary_form=False): # instead to just flag to urllib3 that it shouldn't do its own hostname # validation when using SecureTransport. if not binary_form: - raise ValueError( - "SecureTransport only supports dumping binary certs" - ) + raise ValueError("SecureTransport only supports dumping binary certs") trust = Security.SecTrustRef() certdata = None der_bytes = None try: # Grab the trust store. - result = Security.SSLCopyPeerTrust( - self.context, ctypes.byref(trust) - ) + result = Security.SSLCopyPeerTrust(self.context, ctypes.byref(trust)) _assert_no_error(result) if not trust: # Probably we haven't done the handshake yet. No biggie. @@ -699,22 +691,24 @@ def getpeercert(self, binary_form=False): def version(self): protocol = Security.SSLProtocol() - result = Security.SSLGetNegotiatedProtocolVersion(self.context, ctypes.byref(protocol)) + result = Security.SSLGetNegotiatedProtocolVersion( + self.context, ctypes.byref(protocol) + ) _assert_no_error(result) if protocol.value == SecurityConst.kTLSProtocol13: - return 'TLSv1.3' + raise ssl.SSLError("SecureTransport does not support TLS 1.3") elif protocol.value == SecurityConst.kTLSProtocol12: - return 'TLSv1.2' + return "TLSv1.2" elif protocol.value == SecurityConst.kTLSProtocol11: - return 'TLSv1.1' + return "TLSv1.1" elif protocol.value == SecurityConst.kTLSProtocol1: - return 'TLSv1' + return "TLSv1" elif protocol.value == SecurityConst.kSSLProtocol3: - return 'SSLv3' + return "SSLv3" elif protocol.value == SecurityConst.kSSLProtocol2: - return 'SSLv2' + return "SSLv2" else: - raise ssl.SSLError('Unknown TLS version: %r' % protocol) + raise ssl.SSLError("Unknown TLS version: %r" % protocol) def _reuse(self): self._makefile_refs += 1 @@ -727,16 +721,21 @@ def _drop(self): if _fileobject: # Platform-specific: Python 2 + def makefile(self, mode, bufsize=-1): self._makefile_refs += 1 return _fileobject(self, mode, bufsize, close=True) + + else: # Platform-specific: Python 3 + def makefile(self, mode="r", buffering=None, *args, **kwargs): # We disable buffering with SecureTransport because it conflicts with # the buffering that ST does internally (see issue #1153 for more). buffering = 0 return backport_makefile(self, mode, buffering, *args, **kwargs) + WrappedSocket.makefile = makefile @@ -746,6 +745,7 @@ class SecureTransportContext(object): interface of the standard library ``SSLContext`` object to calls into SecureTransport. """ + def __init__(self, protocol): self._min_version, self._max_version = _protocol_to_min_max[protocol] self._options = 0 @@ -812,16 +812,12 @@ def load_default_certs(self): def set_ciphers(self, ciphers): # For now, we just require the default cipher string. if ciphers != util.ssl_.DEFAULT_CIPHERS: - raise ValueError( - "SecureTransport doesn't support custom cipher strings" - ) + raise ValueError("SecureTransport doesn't support custom cipher strings") def load_verify_locations(self, cafile=None, capath=None, cadata=None): # OK, we only really support cadata and cafile. if capath is not None: - raise ValueError( - "SecureTransport does not support cert directories" - ) + raise ValueError("SecureTransport does not support cert directories") self._trust_bundle = cafile or cadata @@ -830,9 +826,14 @@ def load_cert_chain(self, certfile, keyfile=None, password=None): self._client_key = keyfile self._client_cert_passphrase = password - def wrap_socket(self, sock, server_side=False, - do_handshake_on_connect=True, suppress_ragged_eofs=True, - server_hostname=None): + def wrap_socket( + self, + sock, + server_side=False, + do_handshake_on_connect=True, + suppress_ragged_eofs=True, + server_hostname=None, + ): # So, what do we do here? Firstly, we assert some properties. This is a # stripped down shim, so there is some functionality we don't support. # See PEP 543 for the real deal. @@ -846,8 +847,13 @@ def wrap_socket(self, sock, server_side=False, # Now we can handshake wrapped_socket.handshake( - server_hostname, self._verify, self._trust_bundle, - self._min_version, self._max_version, self._client_cert, - self._client_key, self._client_key_passphrase + server_hostname, + self._verify, + self._trust_bundle, + self._min_version, + self._max_version, + self._client_cert, + self._client_key, + self._client_key_passphrase, ) return wrapped_socket diff --git a/pipenv/vendor/urllib3/contrib/socks.py b/pipenv/vendor/urllib3/contrib/socks.py index 636d261fb0..9e97f7aa98 100644 --- a/pipenv/vendor/urllib3/contrib/socks.py +++ b/pipenv/vendor/urllib3/contrib/socks.py @@ -42,23 +42,20 @@ import warnings from ..exceptions import DependencyWarning - warnings.warn(( - 'SOCKS support in urllib3 requires the installation of optional ' - 'dependencies: specifically, PySocks. For more information, see ' - 'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies' + warnings.warn( + ( + "SOCKS support in urllib3 requires the installation of optional " + "dependencies: specifically, PySocks. For more information, see " + "https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies" ), - DependencyWarning + DependencyWarning, ) raise from socket import error as SocketError, timeout as SocketTimeout -from ..connection import ( - HTTPConnection, HTTPSConnection -) -from ..connectionpool import ( - HTTPConnectionPool, HTTPSConnectionPool -) +from ..connection import HTTPConnection, HTTPSConnection +from ..connectionpool import HTTPConnectionPool, HTTPSConnectionPool from ..exceptions import ConnectTimeoutError, NewConnectionError from ..poolmanager import PoolManager from ..util.url import parse_url @@ -73,8 +70,9 @@ class SOCKSConnection(HTTPConnection): """ A plain-text HTTP connection that connects via a SOCKS proxy. """ + def __init__(self, *args, **kwargs): - self._socks_options = kwargs.pop('_socks_options') + self._socks_options = kwargs.pop("_socks_options") super(SOCKSConnection, self).__init__(*args, **kwargs) def _new_conn(self): @@ -83,28 +81,30 @@ def _new_conn(self): """ extra_kw = {} if self.source_address: - extra_kw['source_address'] = self.source_address + extra_kw["source_address"] = self.source_address if self.socket_options: - extra_kw['socket_options'] = self.socket_options + extra_kw["socket_options"] = self.socket_options try: conn = socks.create_connection( (self.host, self.port), - proxy_type=self._socks_options['socks_version'], - proxy_addr=self._socks_options['proxy_host'], - proxy_port=self._socks_options['proxy_port'], - proxy_username=self._socks_options['username'], - proxy_password=self._socks_options['password'], - proxy_rdns=self._socks_options['rdns'], + proxy_type=self._socks_options["socks_version"], + proxy_addr=self._socks_options["proxy_host"], + proxy_port=self._socks_options["proxy_port"], + proxy_username=self._socks_options["username"], + proxy_password=self._socks_options["password"], + proxy_rdns=self._socks_options["rdns"], timeout=self.timeout, **extra_kw ) except SocketTimeout: raise ConnectTimeoutError( - self, "Connection to %s timed out. (connect timeout=%s)" % - (self.host, self.timeout)) + self, + "Connection to %s timed out. (connect timeout=%s)" + % (self.host, self.timeout), + ) except socks.ProxyError as e: # This is fragile as hell, but it seems to be the only way to raise @@ -114,23 +114,22 @@ def _new_conn(self): if isinstance(error, SocketTimeout): raise ConnectTimeoutError( self, - "Connection to %s timed out. (connect timeout=%s)" % - (self.host, self.timeout) + "Connection to %s timed out. (connect timeout=%s)" + % (self.host, self.timeout), ) else: raise NewConnectionError( - self, - "Failed to establish a new connection: %s" % error + self, "Failed to establish a new connection: %s" % error ) else: raise NewConnectionError( - self, - "Failed to establish a new connection: %s" % e + self, "Failed to establish a new connection: %s" % e ) except SocketError as e: # Defensive: PySocks should catch all these. raise NewConnectionError( - self, "Failed to establish a new connection: %s" % e) + self, "Failed to establish a new connection: %s" % e + ) return conn @@ -156,47 +155,53 @@ class SOCKSProxyManager(PoolManager): A version of the urllib3 ProxyManager that routes connections via the defined SOCKS proxy. """ + pool_classes_by_scheme = { - 'http': SOCKSHTTPConnectionPool, - 'https': SOCKSHTTPSConnectionPool, + "http": SOCKSHTTPConnectionPool, + "https": SOCKSHTTPSConnectionPool, } - def __init__(self, proxy_url, username=None, password=None, - num_pools=10, headers=None, **connection_pool_kw): + def __init__( + self, + proxy_url, + username=None, + password=None, + num_pools=10, + headers=None, + **connection_pool_kw + ): parsed = parse_url(proxy_url) if username is None and password is None and parsed.auth is not None: - split = parsed.auth.split(':') + split = parsed.auth.split(":") if len(split) == 2: username, password = split - if parsed.scheme == 'socks5': + if parsed.scheme == "socks5": socks_version = socks.PROXY_TYPE_SOCKS5 rdns = False - elif parsed.scheme == 'socks5h': + elif parsed.scheme == "socks5h": socks_version = socks.PROXY_TYPE_SOCKS5 rdns = True - elif parsed.scheme == 'socks4': + elif parsed.scheme == "socks4": socks_version = socks.PROXY_TYPE_SOCKS4 rdns = False - elif parsed.scheme == 'socks4a': + elif parsed.scheme == "socks4a": socks_version = socks.PROXY_TYPE_SOCKS4 rdns = True else: - raise ValueError( - "Unable to determine SOCKS version from %s" % proxy_url - ) + raise ValueError("Unable to determine SOCKS version from %s" % proxy_url) self.proxy_url = proxy_url socks_options = { - 'socks_version': socks_version, - 'proxy_host': parsed.host, - 'proxy_port': parsed.port, - 'username': username, - 'password': password, - 'rdns': rdns + "socks_version": socks_version, + "proxy_host": parsed.host, + "proxy_port": parsed.port, + "username": username, + "password": password, + "rdns": rdns, } - connection_pool_kw['_socks_options'] = socks_options + connection_pool_kw["_socks_options"] = socks_options super(SOCKSProxyManager, self).__init__( num_pools, headers, **connection_pool_kw diff --git a/pipenv/vendor/urllib3/exceptions.py b/pipenv/vendor/urllib3/exceptions.py index 7bbaa9871f..0a74c79b5e 100644 --- a/pipenv/vendor/urllib3/exceptions.py +++ b/pipenv/vendor/urllib3/exceptions.py @@ -1,7 +1,6 @@ from __future__ import absolute_import -from .packages.six.moves.http_client import ( - IncompleteRead as httplib_IncompleteRead -) +from .packages.six.moves.http_client import IncompleteRead as httplib_IncompleteRead + # Base Exceptions @@ -17,6 +16,7 @@ class HTTPWarning(Warning): class PoolError(HTTPError): "Base exception for errors caused within a pool." + def __init__(self, pool, message): self.pool = pool HTTPError.__init__(self, "%s: %s" % (pool, message)) @@ -28,6 +28,7 @@ def __reduce__(self): class RequestError(PoolError): "Base exception for PoolErrors that have associated URLs." + def __init__(self, pool, url, message): self.url = url PoolError.__init__(self, pool, message) @@ -63,6 +64,7 @@ class ProtocolError(HTTPError): # Leaf Exceptions + class MaxRetryError(RequestError): """Raised when the maximum number of retries is exceeded. @@ -76,8 +78,7 @@ class MaxRetryError(RequestError): def __init__(self, pool, url, reason=None): self.reason = reason - message = "Max retries exceeded with url: %s (Caused by %r)" % ( - url, reason) + message = "Max retries exceeded with url: %s (Caused by %r)" % (url, reason) RequestError.__init__(self, pool, url, message) @@ -93,6 +94,7 @@ def __init__(self, pool, url, retries=3): class TimeoutStateError(HTTPError): """ Raised when passing an invalid state to a timeout """ + pass @@ -102,6 +104,7 @@ class TimeoutError(HTTPError): Catching this error will catch both :exc:`ReadTimeoutErrors <ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`. """ + pass @@ -149,8 +152,8 @@ def __init__(self, location): class ResponseError(HTTPError): "Used as a container for an error reason supplied in a MaxRetryError." - GENERIC_ERROR = 'too many error responses' - SPECIFIC_ERROR = 'too many {status_code} error responses' + GENERIC_ERROR = "too many error responses" + SPECIFIC_ERROR = "too many {status_code} error responses" class SecurityWarning(HTTPWarning): @@ -188,6 +191,7 @@ class DependencyWarning(HTTPWarning): Warned when an attempt is made to import a module with missing optional dependencies. """ + pass @@ -201,6 +205,7 @@ class BodyNotHttplibCompatible(HTTPError): Body should be httplib.HTTPResponse like (have an fp attribute which returns raw chunks) for read_chunked(). """ + pass @@ -212,12 +217,15 @@ class IncompleteRead(HTTPError, httplib_IncompleteRead): for `partial` to avoid creating large objects on streamed reads. """ + def __init__(self, partial, expected): super(IncompleteRead, self).__init__(partial, expected) def __repr__(self): - return ('IncompleteRead(%i bytes read, ' - '%i more expected)' % (self.partial, self.expected)) + return "IncompleteRead(%i bytes read, %i more expected)" % ( + self.partial, + self.expected, + ) class InvalidHeader(HTTPError): @@ -236,8 +244,9 @@ def __init__(self, scheme): class HeaderParsingError(HTTPError): "Raised by assert_header_parsing, but we convert it to a log.warning statement." + def __init__(self, defects, unparsed_data): - message = '%s, unparsed data: %r' % (defects or 'Unknown', unparsed_data) + message = "%s, unparsed data: %r" % (defects or "Unknown", unparsed_data) super(HeaderParsingError, self).__init__(message) diff --git a/pipenv/vendor/urllib3/fields.py b/pipenv/vendor/urllib3/fields.py index 6a9a5a7f56..8715b2202b 100644 --- a/pipenv/vendor/urllib3/fields.py +++ b/pipenv/vendor/urllib3/fields.py @@ -6,7 +6,7 @@ from .packages import six -def guess_content_type(filename, default='application/octet-stream'): +def guess_content_type(filename, default="application/octet-stream"): """ Guess the "Content-Type" of a file. @@ -41,22 +41,22 @@ def format_header_param_rfc2231(name, value): if not any(ch in value for ch in '"\\\r\n'): result = u'%s="%s"' % (name, value) try: - result.encode('ascii') + result.encode("ascii") except (UnicodeEncodeError, UnicodeDecodeError): pass else: return result - if not six.PY3: # Python 2: - value = value.encode('utf-8') + if six.PY2: # Python 2: + value = value.encode("utf-8") # encode_rfc2231 accepts an encoded string and returns an ascii-encoded # string in Python 2 but accepts and returns unicode strings in Python 3 - value = email.utils.encode_rfc2231(value, 'utf-8') - value = '%s*=%s' % (name, value) + value = email.utils.encode_rfc2231(value, "utf-8") + value = "%s*=%s" % (name, value) - if not six.PY3: # Python 2: - value = value.decode('utf-8') + if six.PY2: # Python 2: + value = value.decode("utf-8") return value @@ -69,23 +69,21 @@ def format_header_param_rfc2231(name, value): } # All control characters from 0x00 to 0x1F *except* 0x1B. -_HTML5_REPLACEMENTS.update({ - six.unichr(cc): u"%{:02X}".format(cc) - for cc - in range(0x00, 0x1F+1) - if cc not in (0x1B,) -}) +_HTML5_REPLACEMENTS.update( + { + six.unichr(cc): u"%{:02X}".format(cc) + for cc in range(0x00, 0x1F + 1) + if cc not in (0x1B,) + } +) def _replace_multiple(value, needles_and_replacements): - def replacer(match): return needles_and_replacements[match.group(0)] pattern = re.compile( - r"|".join([ - re.escape(needle) for needle in needles_and_replacements.keys() - ]) + r"|".join([re.escape(needle) for needle in needles_and_replacements.keys()]) ) result = pattern.sub(replacer, value) @@ -140,13 +138,15 @@ class RequestField(object): An optional callable that is used to encode and format the headers. By default, this is :func:`format_header_param_html5`. """ + def __init__( - self, - name, - data, - filename=None, - headers=None, - header_formatter=format_header_param_html5): + self, + name, + data, + filename=None, + headers=None, + header_formatter=format_header_param_html5, + ): self._name = name self._filename = filename self.data = data @@ -156,11 +156,7 @@ def __init__( self.header_formatter = header_formatter @classmethod - def from_tuples( - cls, - fieldname, - value, - header_formatter=format_header_param_html5): + def from_tuples(cls, fieldname, value, header_formatter=format_header_param_html5): """ A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters. @@ -189,7 +185,8 @@ def from_tuples( data = value request_param = cls( - fieldname, data, filename=filename, header_formatter=header_formatter) + fieldname, data, filename=filename, header_formatter=header_formatter + ) request_param.make_multipart(content_type=content_type) return request_param @@ -227,7 +224,7 @@ def _render_parts(self, header_parts): if value is not None: parts.append(self._render_part(name, value)) - return u'; '.join(parts) + return u"; ".join(parts) def render_headers(self): """ @@ -235,21 +232,22 @@ def render_headers(self): """ lines = [] - sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location'] + sort_keys = ["Content-Disposition", "Content-Type", "Content-Location"] for sort_key in sort_keys: if self.headers.get(sort_key, False): - lines.append(u'%s: %s' % (sort_key, self.headers[sort_key])) + lines.append(u"%s: %s" % (sort_key, self.headers[sort_key])) for header_name, header_value in self.headers.items(): if header_name not in sort_keys: if header_value: - lines.append(u'%s: %s' % (header_name, header_value)) + lines.append(u"%s: %s" % (header_name, header_value)) - lines.append(u'\r\n') - return u'\r\n'.join(lines) + lines.append(u"\r\n") + return u"\r\n".join(lines) - def make_multipart(self, content_disposition=None, content_type=None, - content_location=None): + def make_multipart( + self, content_disposition=None, content_type=None, content_location=None + ): """ Makes this request field into a multipart request field. @@ -262,11 +260,14 @@ def make_multipart(self, content_disposition=None, content_type=None, The 'Content-Location' of the request body. """ - self.headers['Content-Disposition'] = content_disposition or u'form-data' - self.headers['Content-Disposition'] += u'; '.join([ - u'', self._render_parts( - ((u'name', self._name), (u'filename', self._filename)) - ) - ]) - self.headers['Content-Type'] = content_type - self.headers['Content-Location'] = content_location + self.headers["Content-Disposition"] = content_disposition or u"form-data" + self.headers["Content-Disposition"] += u"; ".join( + [ + u"", + self._render_parts( + ((u"name", self._name), (u"filename", self._filename)) + ), + ] + ) + self.headers["Content-Type"] = content_type + self.headers["Content-Location"] = content_location diff --git a/pipenv/vendor/urllib3/filepost.py b/pipenv/vendor/urllib3/filepost.py index 78f1e19b0e..b7b00992c6 100644 --- a/pipenv/vendor/urllib3/filepost.py +++ b/pipenv/vendor/urllib3/filepost.py @@ -9,7 +9,7 @@ from .packages.six import b from .fields import RequestField -writer = codecs.lookup('utf-8')[3] +writer = codecs.lookup("utf-8")[3] def choose_boundary(): @@ -17,8 +17,8 @@ def choose_boundary(): Our embarrassingly-simple replacement for mimetools.choose_boundary. """ boundary = binascii.hexlify(os.urandom(16)) - if six.PY3: - boundary = boundary.decode('ascii') + if not six.PY2: + boundary = boundary.decode("ascii") return boundary @@ -76,7 +76,7 @@ def encode_multipart_formdata(fields, boundary=None): boundary = choose_boundary() for field in iter_field_objects(fields): - body.write(b('--%s\r\n' % (boundary))) + body.write(b("--%s\r\n" % (boundary))) writer(body).write(field.render_headers()) data = field.data @@ -89,10 +89,10 @@ def encode_multipart_formdata(fields, boundary=None): else: body.write(data) - body.write(b'\r\n') + body.write(b"\r\n") - body.write(b('--%s--\r\n' % (boundary))) + body.write(b("--%s--\r\n" % (boundary))) - content_type = str('multipart/form-data; boundary=%s' % boundary) + content_type = str("multipart/form-data; boundary=%s" % boundary) return body.getvalue(), content_type diff --git a/pipenv/vendor/urllib3/packages/__init__.py b/pipenv/vendor/urllib3/packages/__init__.py index 170e974c15..fce4caa65d 100644 --- a/pipenv/vendor/urllib3/packages/__init__.py +++ b/pipenv/vendor/urllib3/packages/__init__.py @@ -2,4 +2,4 @@ from . import ssl_match_hostname -__all__ = ('ssl_match_hostname', ) +__all__ = ("ssl_match_hostname",) diff --git a/pipenv/vendor/urllib3/packages/backports/makefile.py b/pipenv/vendor/urllib3/packages/backports/makefile.py index 740db377d9..a3156a69c0 100644 --- a/pipenv/vendor/urllib3/packages/backports/makefile.py +++ b/pipenv/vendor/urllib3/packages/backports/makefile.py @@ -11,15 +11,14 @@ from socket import SocketIO -def backport_makefile(self, mode="r", buffering=None, encoding=None, - errors=None, newline=None): +def backport_makefile( + self, mode="r", buffering=None, encoding=None, errors=None, newline=None +): """ Backport of ``socket.makefile`` from Python 3.5. """ if not set(mode) <= {"r", "w", "b"}: - raise ValueError( - "invalid mode %r (only r, w, b allowed)" % (mode,) - ) + raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,)) writing = "w" in mode reading = "r" in mode or not writing assert reading or writing diff --git a/pipenv/vendor/urllib3/packages/rfc3986/__init__.py b/pipenv/vendor/urllib3/packages/rfc3986/__init__.py deleted file mode 100644 index 9d3c3bc92b..0000000000 --- a/pipenv/vendor/urllib3/packages/rfc3986/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2014 Rackspace -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -An implementation of semantics and validations described in RFC 3986. - -See http://rfc3986.readthedocs.io/ for detailed documentation. - -:copyright: (c) 2014 Rackspace -:license: Apache v2.0, see LICENSE for details -""" - -from .api import iri_reference -from .api import IRIReference -from .api import is_valid_uri -from .api import normalize_uri -from .api import uri_reference -from .api import URIReference -from .api import urlparse -from .parseresult import ParseResult - -__title__ = 'rfc3986' -__author__ = 'Ian Stapleton Cordasco' -__author_email__ = 'graffatcolmingov@gmail.com' -__license__ = 'Apache v2.0' -__copyright__ = 'Copyright 2014 Rackspace' -__version__ = '1.3.1' - -__all__ = ( - 'ParseResult', - 'URIReference', - 'IRIReference', - 'is_valid_uri', - 'normalize_uri', - 'uri_reference', - 'iri_reference', - 'urlparse', - '__title__', - '__author__', - '__author_email__', - '__license__', - '__copyright__', - '__version__', -) diff --git a/pipenv/vendor/urllib3/packages/rfc3986/_mixin.py b/pipenv/vendor/urllib3/packages/rfc3986/_mixin.py deleted file mode 100644 index 543925cdbc..0000000000 --- a/pipenv/vendor/urllib3/packages/rfc3986/_mixin.py +++ /dev/null @@ -1,353 +0,0 @@ -"""Module containing the implementation of the URIMixin class.""" -import warnings - -from . import exceptions as exc -from . import misc -from . import normalizers -from . import validators - - -class URIMixin(object): - """Mixin with all shared methods for URIs and IRIs.""" - - __hash__ = tuple.__hash__ - - def authority_info(self): - """Return a dictionary with the ``userinfo``, ``host``, and ``port``. - - If the authority is not valid, it will raise a - :class:`~rfc3986.exceptions.InvalidAuthority` Exception. - - :returns: - ``{'userinfo': 'username:password', 'host': 'www.example.com', - 'port': '80'}`` - :rtype: dict - :raises rfc3986.exceptions.InvalidAuthority: - If the authority is not ``None`` and can not be parsed. - """ - if not self.authority: - return {'userinfo': None, 'host': None, 'port': None} - - match = self._match_subauthority() - - if match is None: - # In this case, we have an authority that was parsed from the URI - # Reference, but it cannot be further parsed by our - # misc.SUBAUTHORITY_MATCHER. In this case it must not be a valid - # authority. - raise exc.InvalidAuthority(self.authority.encode(self.encoding)) - - # We had a match, now let's ensure that it is actually a valid host - # address if it is IPv4 - matches = match.groupdict() - host = matches.get('host') - - if (host and misc.IPv4_MATCHER.match(host) and not - validators.valid_ipv4_host_address(host)): - # If we have a host, it appears to be IPv4 and it does not have - # valid bytes, it is an InvalidAuthority. - raise exc.InvalidAuthority(self.authority.encode(self.encoding)) - - return matches - - def _match_subauthority(self): - return misc.SUBAUTHORITY_MATCHER.match(self.authority) - - @property - def host(self): - """If present, a string representing the host.""" - try: - authority = self.authority_info() - except exc.InvalidAuthority: - return None - return authority['host'] - - @property - def port(self): - """If present, the port extracted from the authority.""" - try: - authority = self.authority_info() - except exc.InvalidAuthority: - return None - return authority['port'] - - @property - def userinfo(self): - """If present, the userinfo extracted from the authority.""" - try: - authority = self.authority_info() - except exc.InvalidAuthority: - return None - return authority['userinfo'] - - def is_absolute(self): - """Determine if this URI Reference is an absolute URI. - - See http://tools.ietf.org/html/rfc3986#section-4.3 for explanation. - - :returns: ``True`` if it is an absolute URI, ``False`` otherwise. - :rtype: bool - """ - return bool(misc.ABSOLUTE_URI_MATCHER.match(self.unsplit())) - - def is_valid(self, **kwargs): - """Determine if the URI is valid. - - .. deprecated:: 1.1.0 - - Use the :class:`~rfc3986.validators.Validator` object instead. - - :param bool require_scheme: Set to ``True`` if you wish to require the - presence of the scheme component. - :param bool require_authority: Set to ``True`` if you wish to require - the presence of the authority component. - :param bool require_path: Set to ``True`` if you wish to require the - presence of the path component. - :param bool require_query: Set to ``True`` if you wish to require the - presence of the query component. - :param bool require_fragment: Set to ``True`` if you wish to require - the presence of the fragment component. - :returns: ``True`` if the URI is valid. ``False`` otherwise. - :rtype: bool - """ - warnings.warn("Please use rfc3986.validators.Validator instead. " - "This method will be eventually removed.", - DeprecationWarning) - validators = [ - (self.scheme_is_valid, kwargs.get('require_scheme', False)), - (self.authority_is_valid, kwargs.get('require_authority', False)), - (self.path_is_valid, kwargs.get('require_path', False)), - (self.query_is_valid, kwargs.get('require_query', False)), - (self.fragment_is_valid, kwargs.get('require_fragment', False)), - ] - return all(v(r) for v, r in validators) - - def authority_is_valid(self, require=False): - """Determine if the authority component is valid. - - .. deprecated:: 1.1.0 - - Use the :class:`~rfc3986.validators.Validator` object instead. - - :param bool require: - Set to ``True`` to require the presence of this component. - :returns: - ``True`` if the authority is valid. ``False`` otherwise. - :rtype: - bool - """ - warnings.warn("Please use rfc3986.validators.Validator instead. " - "This method will be eventually removed.", - DeprecationWarning) - try: - self.authority_info() - except exc.InvalidAuthority: - return False - - return validators.authority_is_valid( - self.authority, - host=self.host, - require=require, - ) - - def scheme_is_valid(self, require=False): - """Determine if the scheme component is valid. - - .. deprecated:: 1.1.0 - - Use the :class:`~rfc3986.validators.Validator` object instead. - - :param str require: Set to ``True`` to require the presence of this - component. - :returns: ``True`` if the scheme is valid. ``False`` otherwise. - :rtype: bool - """ - warnings.warn("Please use rfc3986.validators.Validator instead. " - "This method will be eventually removed.", - DeprecationWarning) - return validators.scheme_is_valid(self.scheme, require) - - def path_is_valid(self, require=False): - """Determine if the path component is valid. - - .. deprecated:: 1.1.0 - - Use the :class:`~rfc3986.validators.Validator` object instead. - - :param str require: Set to ``True`` to require the presence of this - component. - :returns: ``True`` if the path is valid. ``False`` otherwise. - :rtype: bool - """ - warnings.warn("Please use rfc3986.validators.Validator instead. " - "This method will be eventually removed.", - DeprecationWarning) - return validators.path_is_valid(self.path, require) - - def query_is_valid(self, require=False): - """Determine if the query component is valid. - - .. deprecated:: 1.1.0 - - Use the :class:`~rfc3986.validators.Validator` object instead. - - :param str require: Set to ``True`` to require the presence of this - component. - :returns: ``True`` if the query is valid. ``False`` otherwise. - :rtype: bool - """ - warnings.warn("Please use rfc3986.validators.Validator instead. " - "This method will be eventually removed.", - DeprecationWarning) - return validators.query_is_valid(self.query, require) - - def fragment_is_valid(self, require=False): - """Determine if the fragment component is valid. - - .. deprecated:: 1.1.0 - - Use the Validator object instead. - - :param str require: Set to ``True`` to require the presence of this - component. - :returns: ``True`` if the fragment is valid. ``False`` otherwise. - :rtype: bool - """ - warnings.warn("Please use rfc3986.validators.Validator instead. " - "This method will be eventually removed.", - DeprecationWarning) - return validators.fragment_is_valid(self.fragment, require) - - def normalized_equality(self, other_ref): - """Compare this URIReference to another URIReference. - - :param URIReference other_ref: (required), The reference with which - we're comparing. - :returns: ``True`` if the references are equal, ``False`` otherwise. - :rtype: bool - """ - return tuple(self.normalize()) == tuple(other_ref.normalize()) - - def resolve_with(self, base_uri, strict=False): - """Use an absolute URI Reference to resolve this relative reference. - - Assuming this is a relative reference that you would like to resolve, - use the provided base URI to resolve it. - - See http://tools.ietf.org/html/rfc3986#section-5 for more information. - - :param base_uri: Either a string or URIReference. It must be an - absolute URI or it will raise an exception. - :returns: A new URIReference which is the result of resolving this - reference using ``base_uri``. - :rtype: :class:`URIReference` - :raises rfc3986.exceptions.ResolutionError: - If the ``base_uri`` is not an absolute URI. - """ - if not isinstance(base_uri, URIMixin): - base_uri = type(self).from_string(base_uri) - - if not base_uri.is_absolute(): - raise exc.ResolutionError(base_uri) - - # This is optional per - # http://tools.ietf.org/html/rfc3986#section-5.2.1 - base_uri = base_uri.normalize() - - # The reference we're resolving - resolving = self - - if not strict and resolving.scheme == base_uri.scheme: - resolving = resolving.copy_with(scheme=None) - - # http://tools.ietf.org/html/rfc3986#page-32 - if resolving.scheme is not None: - target = resolving.copy_with( - path=normalizers.normalize_path(resolving.path) - ) - else: - if resolving.authority is not None: - target = resolving.copy_with( - scheme=base_uri.scheme, - path=normalizers.normalize_path(resolving.path) - ) - else: - if resolving.path is None: - if resolving.query is not None: - query = resolving.query - else: - query = base_uri.query - target = resolving.copy_with( - scheme=base_uri.scheme, - authority=base_uri.authority, - path=base_uri.path, - query=query - ) - else: - if resolving.path.startswith('/'): - path = normalizers.normalize_path(resolving.path) - else: - path = normalizers.normalize_path( - misc.merge_paths(base_uri, resolving.path) - ) - target = resolving.copy_with( - scheme=base_uri.scheme, - authority=base_uri.authority, - path=path, - query=resolving.query - ) - return target - - def unsplit(self): - """Create a URI string from the components. - - :returns: The URI Reference reconstituted as a string. - :rtype: str - """ - # See http://tools.ietf.org/html/rfc3986#section-5.3 - result_list = [] - if self.scheme: - result_list.extend([self.scheme, ':']) - if self.authority: - result_list.extend(['//', self.authority]) - if self.path: - result_list.append(self.path) - if self.query is not None: - result_list.extend(['?', self.query]) - if self.fragment is not None: - result_list.extend(['#', self.fragment]) - return ''.join(result_list) - - def copy_with(self, scheme=misc.UseExisting, authority=misc.UseExisting, - path=misc.UseExisting, query=misc.UseExisting, - fragment=misc.UseExisting): - """Create a copy of this reference with the new components. - - :param str scheme: - (optional) The scheme to use for the new reference. - :param str authority: - (optional) The authority to use for the new reference. - :param str path: - (optional) The path to use for the new reference. - :param str query: - (optional) The query to use for the new reference. - :param str fragment: - (optional) The fragment to use for the new reference. - :returns: - New URIReference with provided components. - :rtype: - URIReference - """ - attributes = { - 'scheme': scheme, - 'authority': authority, - 'path': path, - 'query': query, - 'fragment': fragment, - } - for key, value in list(attributes.items()): - if value is misc.UseExisting: - del attributes[key] - uri = self._replace(**attributes) - uri.encoding = self.encoding - return uri diff --git a/pipenv/vendor/urllib3/packages/rfc3986/abnf_regexp.py b/pipenv/vendor/urllib3/packages/rfc3986/abnf_regexp.py deleted file mode 100644 index 24c9c3d00a..0000000000 --- a/pipenv/vendor/urllib3/packages/rfc3986/abnf_regexp.py +++ /dev/null @@ -1,267 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Module for the regular expressions crafted from ABNF.""" - -import sys - -# https://tools.ietf.org/html/rfc3986#page-13 -GEN_DELIMS = GENERIC_DELIMITERS = ":/?#[]@" -GENERIC_DELIMITERS_SET = set(GENERIC_DELIMITERS) -# https://tools.ietf.org/html/rfc3986#page-13 -SUB_DELIMS = SUB_DELIMITERS = "!$&'()*+,;=" -SUB_DELIMITERS_SET = set(SUB_DELIMITERS) -# Escape the '*' for use in regular expressions -SUB_DELIMITERS_RE = r"!$&'()\*+,;=" -RESERVED_CHARS_SET = GENERIC_DELIMITERS_SET.union(SUB_DELIMITERS_SET) -ALPHA = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' -DIGIT = '0123456789' -# https://tools.ietf.org/html/rfc3986#section-2.3 -UNRESERVED = UNRESERVED_CHARS = ALPHA + DIGIT + r'._!-' -UNRESERVED_CHARS_SET = set(UNRESERVED_CHARS) -NON_PCT_ENCODED_SET = RESERVED_CHARS_SET.union(UNRESERVED_CHARS_SET) -# We need to escape the '-' in this case: -UNRESERVED_RE = r'A-Za-z0-9._~\-' - -# Percent encoded character values -PERCENT_ENCODED = PCT_ENCODED = '%[A-Fa-f0-9]{2}' -PCHAR = '([' + UNRESERVED_RE + SUB_DELIMITERS_RE + ':@]|%s)' % PCT_ENCODED - -# NOTE(sigmavirus24): We're going to use more strict regular expressions -# than appear in Appendix B for scheme. This will prevent over-eager -# consuming of items that aren't schemes. -SCHEME_RE = '[a-zA-Z][a-zA-Z0-9+.-]*' -_AUTHORITY_RE = '[^/?#]*' -_PATH_RE = '[^?#]*' -_QUERY_RE = '[^#]*' -_FRAGMENT_RE = '.*' - -# Extracted from http://tools.ietf.org/html/rfc3986#appendix-B -COMPONENT_PATTERN_DICT = { - 'scheme': SCHEME_RE, - 'authority': _AUTHORITY_RE, - 'path': _PATH_RE, - 'query': _QUERY_RE, - 'fragment': _FRAGMENT_RE, -} - -# See http://tools.ietf.org/html/rfc3986#appendix-B -# In this case, we name each of the important matches so we can use -# SRE_Match#groupdict to parse the values out if we so choose. This is also -# modified to ignore other matches that are not important to the parsing of -# the reference so we can also simply use SRE_Match#groups. -URL_PARSING_RE = ( - r'(?:(?P<scheme>{scheme}):)?(?://(?P<authority>{authority}))?' - r'(?P<path>{path})(?:\?(?P<query>{query}))?' - r'(?:#(?P<fragment>{fragment}))?' -).format(**COMPONENT_PATTERN_DICT) - - -# ######################### -# Authority Matcher Section -# ######################### - -# Host patterns, see: http://tools.ietf.org/html/rfc3986#section-3.2.2 -# The pattern for a regular name, e.g., www.google.com, api.github.com -REGULAR_NAME_RE = REG_NAME = '((?:{0}|[{1}])*)'.format( - '%[0-9A-Fa-f]{2}', SUB_DELIMITERS_RE + UNRESERVED_RE -) -# The pattern for an IPv4 address, e.g., 192.168.255.255, 127.0.0.1, -IPv4_RE = r'([0-9]{1,3}\.){3}[0-9]{1,3}' -# Hexadecimal characters used in each piece of an IPv6 address -HEXDIG_RE = '[0-9A-Fa-f]{1,4}' -# Least-significant 32 bits of an IPv6 address -LS32_RE = '({hex}:{hex}|{ipv4})'.format(hex=HEXDIG_RE, ipv4=IPv4_RE) -# Substitutions into the following patterns for IPv6 patterns defined -# http://tools.ietf.org/html/rfc3986#page-20 -_subs = {'hex': HEXDIG_RE, 'ls32': LS32_RE} - -# Below: h16 = hexdig, see: https://tools.ietf.org/html/rfc5234 for details -# about ABNF (Augmented Backus-Naur Form) use in the comments -variations = [ - # 6( h16 ":" ) ls32 - '(%(hex)s:){6}%(ls32)s' % _subs, - # "::" 5( h16 ":" ) ls32 - '::(%(hex)s:){5}%(ls32)s' % _subs, - # [ h16 ] "::" 4( h16 ":" ) ls32 - '(%(hex)s)?::(%(hex)s:){4}%(ls32)s' % _subs, - # [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32 - '((%(hex)s:)?%(hex)s)?::(%(hex)s:){3}%(ls32)s' % _subs, - # [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32 - '((%(hex)s:){0,2}%(hex)s)?::(%(hex)s:){2}%(ls32)s' % _subs, - # [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32 - '((%(hex)s:){0,3}%(hex)s)?::%(hex)s:%(ls32)s' % _subs, - # [ *4( h16 ":" ) h16 ] "::" ls32 - '((%(hex)s:){0,4}%(hex)s)?::%(ls32)s' % _subs, - # [ *5( h16 ":" ) h16 ] "::" h16 - '((%(hex)s:){0,5}%(hex)s)?::%(hex)s' % _subs, - # [ *6( h16 ":" ) h16 ] "::" - '((%(hex)s:){0,6}%(hex)s)?::' % _subs, -] - -IPv6_RE = '(({0})|({1})|({2})|({3})|({4})|({5})|({6})|({7})|({8}))'.format( - *variations -) - -IPv_FUTURE_RE = r'v[0-9A-Fa-f]+\.[%s]+' % ( - UNRESERVED_RE + SUB_DELIMITERS_RE + ':' -) - -# RFC 6874 Zone ID ABNF -ZONE_ID = '(?:[' + UNRESERVED_RE + ']|' + PCT_ENCODED + ')+' - -IPv6_ADDRZ_RFC4007_RE = IPv6_RE + '(?:(?:%25|%)' + ZONE_ID + ')?' -IPv6_ADDRZ_RE = IPv6_RE + '(?:%25' + ZONE_ID + ')?' - -IP_LITERAL_RE = r'\[({0}|{1})\]'.format( - IPv6_ADDRZ_RFC4007_RE, - IPv_FUTURE_RE, -) - -# Pattern for matching the host piece of the authority -HOST_RE = HOST_PATTERN = '({0}|{1}|{2})'.format( - REG_NAME, - IPv4_RE, - IP_LITERAL_RE, -) -USERINFO_RE = '^([' + UNRESERVED_RE + SUB_DELIMITERS_RE + ':]|%s)+' % ( - PCT_ENCODED -) -PORT_RE = '[0-9]{1,5}' - -# #################### -# Path Matcher Section -# #################### - -# See http://tools.ietf.org/html/rfc3986#section-3.3 for more information -# about the path patterns defined below. -segments = { - 'segment': PCHAR + '*', - # Non-zero length segment - 'segment-nz': PCHAR + '+', - # Non-zero length segment without ":" - 'segment-nz-nc': PCHAR.replace(':', '') + '+' -} - -# Path types taken from Section 3.3 (linked above) -PATH_EMPTY = '^$' -PATH_ROOTLESS = '%(segment-nz)s(/%(segment)s)*' % segments -PATH_NOSCHEME = '%(segment-nz-nc)s(/%(segment)s)*' % segments -PATH_ABSOLUTE = '/(%s)?' % PATH_ROOTLESS -PATH_ABEMPTY = '(/%(segment)s)*' % segments -PATH_RE = '^(%s|%s|%s|%s|%s)$' % ( - PATH_ABEMPTY, PATH_ABSOLUTE, PATH_NOSCHEME, PATH_ROOTLESS, PATH_EMPTY -) - -FRAGMENT_RE = QUERY_RE = ( - '^([/?:@' + UNRESERVED_RE + SUB_DELIMITERS_RE + ']|%s)*$' % PCT_ENCODED -) - -# ########################## -# Relative reference matcher -# ########################## - -# See http://tools.ietf.org/html/rfc3986#section-4.2 for details -RELATIVE_PART_RE = '(//%s%s|%s|%s|%s)' % ( - COMPONENT_PATTERN_DICT['authority'], - PATH_ABEMPTY, - PATH_ABSOLUTE, - PATH_NOSCHEME, - PATH_EMPTY, -) - -# See http://tools.ietf.org/html/rfc3986#section-3 for definition -HIER_PART_RE = '(//%s%s|%s|%s|%s)' % ( - COMPONENT_PATTERN_DICT['authority'], - PATH_ABEMPTY, - PATH_ABSOLUTE, - PATH_ROOTLESS, - PATH_EMPTY, -) - -# ############### -# IRIs / RFC 3987 -# ############### - -# Only wide-unicode gets the high-ranges of UCSCHAR -if sys.maxunicode > 0xFFFF: # pragma: no cover - IPRIVATE = u'\uE000-\uF8FF\U000F0000-\U000FFFFD\U00100000-\U0010FFFD' - UCSCHAR_RE = ( - u'\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF' - u'\U00010000-\U0001FFFD\U00020000-\U0002FFFD' - u'\U00030000-\U0003FFFD\U00040000-\U0004FFFD' - u'\U00050000-\U0005FFFD\U00060000-\U0006FFFD' - u'\U00070000-\U0007FFFD\U00080000-\U0008FFFD' - u'\U00090000-\U0009FFFD\U000A0000-\U000AFFFD' - u'\U000B0000-\U000BFFFD\U000C0000-\U000CFFFD' - u'\U000D0000-\U000DFFFD\U000E1000-\U000EFFFD' - ) -else: # pragma: no cover - IPRIVATE = u'\uE000-\uF8FF' - UCSCHAR_RE = ( - u'\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF' - ) - -IUNRESERVED_RE = u'A-Za-z0-9\\._~\\-' + UCSCHAR_RE -IPCHAR = u'([' + IUNRESERVED_RE + SUB_DELIMITERS_RE + u':@]|%s)' % PCT_ENCODED - -isegments = { - 'isegment': IPCHAR + u'*', - # Non-zero length segment - 'isegment-nz': IPCHAR + u'+', - # Non-zero length segment without ":" - 'isegment-nz-nc': IPCHAR.replace(':', '') + u'+' -} - -IPATH_ROOTLESS = u'%(isegment-nz)s(/%(isegment)s)*' % isegments -IPATH_NOSCHEME = u'%(isegment-nz-nc)s(/%(isegment)s)*' % isegments -IPATH_ABSOLUTE = u'/(?:%s)?' % IPATH_ROOTLESS -IPATH_ABEMPTY = u'(?:/%(isegment)s)*' % isegments -IPATH_RE = u'^(?:%s|%s|%s|%s|%s)$' % ( - IPATH_ABEMPTY, IPATH_ABSOLUTE, IPATH_NOSCHEME, IPATH_ROOTLESS, PATH_EMPTY -) - -IREGULAR_NAME_RE = IREG_NAME = u'(?:{0}|[{1}])*'.format( - u'%[0-9A-Fa-f]{2}', SUB_DELIMITERS_RE + IUNRESERVED_RE -) - -IHOST_RE = IHOST_PATTERN = u'({0}|{1}|{2})'.format( - IREG_NAME, - IPv4_RE, - IP_LITERAL_RE, -) - -IUSERINFO_RE = u'^(?:[' + IUNRESERVED_RE + SUB_DELIMITERS_RE + u':]|%s)+' % ( - PCT_ENCODED -) - -IFRAGMENT_RE = (u'^(?:[/?:@' + IUNRESERVED_RE + SUB_DELIMITERS_RE - + u']|%s)*$' % PCT_ENCODED) -IQUERY_RE = (u'^(?:[/?:@' + IUNRESERVED_RE + SUB_DELIMITERS_RE - + IPRIVATE + u']|%s)*$' % PCT_ENCODED) - -IRELATIVE_PART_RE = u'(//%s%s|%s|%s|%s)' % ( - COMPONENT_PATTERN_DICT['authority'], - IPATH_ABEMPTY, - IPATH_ABSOLUTE, - IPATH_NOSCHEME, - PATH_EMPTY, -) - -IHIER_PART_RE = u'(//%s%s|%s|%s|%s)' % ( - COMPONENT_PATTERN_DICT['authority'], - IPATH_ABEMPTY, - IPATH_ABSOLUTE, - IPATH_ROOTLESS, - PATH_EMPTY, -) diff --git a/pipenv/vendor/urllib3/packages/rfc3986/api.py b/pipenv/vendor/urllib3/packages/rfc3986/api.py deleted file mode 100644 index ddc4a1cd28..0000000000 --- a/pipenv/vendor/urllib3/packages/rfc3986/api.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2014 Rackspace -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Module containing the simple and functional API for rfc3986. - -This module defines functions and provides access to the public attributes -and classes of rfc3986. -""" - -from .iri import IRIReference -from .parseresult import ParseResult -from .uri import URIReference - - -def uri_reference(uri, encoding='utf-8'): - """Parse a URI string into a URIReference. - - This is a convenience function. You could achieve the same end by using - ``URIReference.from_string(uri)``. - - :param str uri: The URI which needs to be parsed into a reference. - :param str encoding: The encoding of the string provided - :returns: A parsed URI - :rtype: :class:`URIReference` - """ - return URIReference.from_string(uri, encoding) - - -def iri_reference(iri, encoding='utf-8'): - """Parse a IRI string into an IRIReference. - - This is a convenience function. You could achieve the same end by using - ``IRIReference.from_string(iri)``. - - :param str iri: The IRI which needs to be parsed into a reference. - :param str encoding: The encoding of the string provided - :returns: A parsed IRI - :rtype: :class:`IRIReference` - """ - return IRIReference.from_string(iri, encoding) - - -def is_valid_uri(uri, encoding='utf-8', **kwargs): - """Determine if the URI given is valid. - - This is a convenience function. You could use either - ``uri_reference(uri).is_valid()`` or - ``URIReference.from_string(uri).is_valid()`` to achieve the same result. - - :param str uri: The URI to be validated. - :param str encoding: The encoding of the string provided - :param bool require_scheme: Set to ``True`` if you wish to require the - presence of the scheme component. - :param bool require_authority: Set to ``True`` if you wish to require the - presence of the authority component. - :param bool require_path: Set to ``True`` if you wish to require the - presence of the path component. - :param bool require_query: Set to ``True`` if you wish to require the - presence of the query component. - :param bool require_fragment: Set to ``True`` if you wish to require the - presence of the fragment component. - :returns: ``True`` if the URI is valid, ``False`` otherwise. - :rtype: bool - """ - return URIReference.from_string(uri, encoding).is_valid(**kwargs) - - -def normalize_uri(uri, encoding='utf-8'): - """Normalize the given URI. - - This is a convenience function. You could use either - ``uri_reference(uri).normalize().unsplit()`` or - ``URIReference.from_string(uri).normalize().unsplit()`` instead. - - :param str uri: The URI to be normalized. - :param str encoding: The encoding of the string provided - :returns: The normalized URI. - :rtype: str - """ - normalized_reference = URIReference.from_string(uri, encoding).normalize() - return normalized_reference.unsplit() - - -def urlparse(uri, encoding='utf-8'): - """Parse a given URI and return a ParseResult. - - This is a partial replacement of the standard library's urlparse function. - - :param str uri: The URI to be parsed. - :param str encoding: The encoding of the string provided. - :returns: A parsed URI - :rtype: :class:`~rfc3986.parseresult.ParseResult` - """ - return ParseResult.from_string(uri, encoding, strict=False) diff --git a/pipenv/vendor/urllib3/packages/rfc3986/builder.py b/pipenv/vendor/urllib3/packages/rfc3986/builder.py deleted file mode 100644 index 7934279995..0000000000 --- a/pipenv/vendor/urllib3/packages/rfc3986/builder.py +++ /dev/null @@ -1,298 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2017 Ian Stapleton Cordasco -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Module containing the logic for the URIBuilder object.""" -from . import compat -from . import normalizers -from . import uri - - -class URIBuilder(object): - """Object to aid in building up a URI Reference from parts. - - .. note:: - - This object should be instantiated by the user, but it's recommended - that it is not provided with arguments. Instead, use the available - method to populate the fields. - - """ - - def __init__(self, scheme=None, userinfo=None, host=None, port=None, - path=None, query=None, fragment=None): - """Initialize our URI builder. - - :param str scheme: - (optional) - :param str userinfo: - (optional) - :param str host: - (optional) - :param int port: - (optional) - :param str path: - (optional) - :param str query: - (optional) - :param str fragment: - (optional) - """ - self.scheme = scheme - self.userinfo = userinfo - self.host = host - self.port = port - self.path = path - self.query = query - self.fragment = fragment - - def __repr__(self): - """Provide a convenient view of our builder object.""" - formatstr = ('URIBuilder(scheme={b.scheme}, userinfo={b.userinfo}, ' - 'host={b.host}, port={b.port}, path={b.path}, ' - 'query={b.query}, fragment={b.fragment})') - return formatstr.format(b=self) - - def add_scheme(self, scheme): - """Add a scheme to our builder object. - - After normalizing, this will generate a new URIBuilder instance with - the specified scheme and all other attributes the same. - - .. code-block:: python - - >>> URIBuilder().add_scheme('HTTPS') - URIBuilder(scheme='https', userinfo=None, host=None, port=None, - path=None, query=None, fragment=None) - - """ - scheme = normalizers.normalize_scheme(scheme) - return URIBuilder( - scheme=scheme, - userinfo=self.userinfo, - host=self.host, - port=self.port, - path=self.path, - query=self.query, - fragment=self.fragment, - ) - - def add_credentials(self, username, password): - """Add credentials as the userinfo portion of the URI. - - .. code-block:: python - - >>> URIBuilder().add_credentials('root', 's3crete') - URIBuilder(scheme=None, userinfo='root:s3crete', host=None, - port=None, path=None, query=None, fragment=None) - - >>> URIBuilder().add_credentials('root', None) - URIBuilder(scheme=None, userinfo='root', host=None, - port=None, path=None, query=None, fragment=None) - """ - if username is None: - raise ValueError('Username cannot be None') - userinfo = normalizers.normalize_username(username) - - if password is not None: - userinfo = '{}:{}'.format( - userinfo, - normalizers.normalize_password(password), - ) - - return URIBuilder( - scheme=self.scheme, - userinfo=userinfo, - host=self.host, - port=self.port, - path=self.path, - query=self.query, - fragment=self.fragment, - ) - - def add_host(self, host): - """Add hostname to the URI. - - .. code-block:: python - - >>> URIBuilder().add_host('google.com') - URIBuilder(scheme=None, userinfo=None, host='google.com', - port=None, path=None, query=None, fragment=None) - - """ - return URIBuilder( - scheme=self.scheme, - userinfo=self.userinfo, - host=normalizers.normalize_host(host), - port=self.port, - path=self.path, - query=self.query, - fragment=self.fragment, - ) - - def add_port(self, port): - """Add port to the URI. - - .. code-block:: python - - >>> URIBuilder().add_port(80) - URIBuilder(scheme=None, userinfo=None, host=None, port='80', - path=None, query=None, fragment=None) - - >>> URIBuilder().add_port(443) - URIBuilder(scheme=None, userinfo=None, host=None, port='443', - path=None, query=None, fragment=None) - - """ - port_int = int(port) - if port_int < 0: - raise ValueError( - 'ports are not allowed to be negative. You provided {}'.format( - port_int, - ) - ) - if port_int > 65535: - raise ValueError( - 'ports are not allowed to be larger than 65535. ' - 'You provided {}'.format( - port_int, - ) - ) - - return URIBuilder( - scheme=self.scheme, - userinfo=self.userinfo, - host=self.host, - port='{}'.format(port_int), - path=self.path, - query=self.query, - fragment=self.fragment, - ) - - def add_path(self, path): - """Add a path to the URI. - - .. code-block:: python - - >>> URIBuilder().add_path('sigmavirus24/rfc3985') - URIBuilder(scheme=None, userinfo=None, host=None, port=None, - path='/sigmavirus24/rfc3986', query=None, fragment=None) - - >>> URIBuilder().add_path('/checkout.php') - URIBuilder(scheme=None, userinfo=None, host=None, port=None, - path='/checkout.php', query=None, fragment=None) - - """ - if not path.startswith('/'): - path = '/{}'.format(path) - - return URIBuilder( - scheme=self.scheme, - userinfo=self.userinfo, - host=self.host, - port=self.port, - path=normalizers.normalize_path(path), - query=self.query, - fragment=self.fragment, - ) - - def add_query_from(self, query_items): - """Generate and add a query a dictionary or list of tuples. - - .. code-block:: python - - >>> URIBuilder().add_query_from({'a': 'b c'}) - URIBuilder(scheme=None, userinfo=None, host=None, port=None, - path=None, query='a=b+c', fragment=None) - - >>> URIBuilder().add_query_from([('a', 'b c')]) - URIBuilder(scheme=None, userinfo=None, host=None, port=None, - path=None, query='a=b+c', fragment=None) - - """ - query = normalizers.normalize_query(compat.urlencode(query_items)) - - return URIBuilder( - scheme=self.scheme, - userinfo=self.userinfo, - host=self.host, - port=self.port, - path=self.path, - query=query, - fragment=self.fragment, - ) - - def add_query(self, query): - """Add a pre-formated query string to the URI. - - .. code-block:: python - - >>> URIBuilder().add_query('a=b&c=d') - URIBuilder(scheme=None, userinfo=None, host=None, port=None, - path=None, query='a=b&c=d', fragment=None) - - """ - return URIBuilder( - scheme=self.scheme, - userinfo=self.userinfo, - host=self.host, - port=self.port, - path=self.path, - query=normalizers.normalize_query(query), - fragment=self.fragment, - ) - - def add_fragment(self, fragment): - """Add a fragment to the URI. - - .. code-block:: python - - >>> URIBuilder().add_fragment('section-2.6.1') - URIBuilder(scheme=None, userinfo=None, host=None, port=None, - path=None, query=None, fragment='section-2.6.1') - - """ - return URIBuilder( - scheme=self.scheme, - userinfo=self.userinfo, - host=self.host, - port=self.port, - path=self.path, - query=self.query, - fragment=normalizers.normalize_fragment(fragment), - ) - - def finalize(self): - """Create a URIReference from our builder. - - .. code-block:: python - - >>> URIBuilder().add_scheme('https').add_host('github.com' - ... ).add_path('sigmavirus24/rfc3986').finalize().unsplit() - 'https://github.com/sigmavirus24/rfc3986' - - >>> URIBuilder().add_scheme('https').add_host('github.com' - ... ).add_path('sigmavirus24/rfc3986').add_credentials( - ... 'sigmavirus24', 'not-re@l').finalize().unsplit() - 'https://sigmavirus24:not-re%40l@github.com/sigmavirus24/rfc3986' - - """ - return uri.URIReference( - self.scheme, - normalizers.normalize_authority( - (self.userinfo, self.host, self.port) - ), - self.path, - self.query, - self.fragment, - ) diff --git a/pipenv/vendor/urllib3/packages/rfc3986/compat.py b/pipenv/vendor/urllib3/packages/rfc3986/compat.py deleted file mode 100644 index 8968c38437..0000000000 --- a/pipenv/vendor/urllib3/packages/rfc3986/compat.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2014 Rackspace -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Compatibility module for Python 2 and 3 support.""" -import sys - -try: - from urllib.parse import quote as urlquote -except ImportError: # Python 2.x - from urllib import quote as urlquote - -try: - from urllib.parse import urlencode -except ImportError: # Python 2.x - from urllib import urlencode - -__all__ = ( - 'to_bytes', - 'to_str', - 'urlquote', - 'urlencode', -) - -PY3 = (3, 0) <= sys.version_info < (4, 0) -PY2 = (2, 6) <= sys.version_info < (2, 8) - - -if PY3: - unicode = str # Python 3.x - - -def to_str(b, encoding='utf-8'): - """Ensure that b is text in the specified encoding.""" - if hasattr(b, 'decode') and not isinstance(b, unicode): - b = b.decode(encoding) - return b - - -def to_bytes(s, encoding='utf-8'): - """Ensure that s is converted to bytes from the encoding.""" - if hasattr(s, 'encode') and not isinstance(s, bytes): - s = s.encode(encoding) - return s diff --git a/pipenv/vendor/urllib3/packages/rfc3986/exceptions.py b/pipenv/vendor/urllib3/packages/rfc3986/exceptions.py deleted file mode 100644 index da8ca7cb1f..0000000000 --- a/pipenv/vendor/urllib3/packages/rfc3986/exceptions.py +++ /dev/null @@ -1,118 +0,0 @@ -# -*- coding: utf-8 -*- -"""Exceptions module for rfc3986.""" - -from . import compat - - -class RFC3986Exception(Exception): - """Base class for all rfc3986 exception classes.""" - - pass - - -class InvalidAuthority(RFC3986Exception): - """Exception when the authority string is invalid.""" - - def __init__(self, authority): - """Initialize the exception with the invalid authority.""" - super(InvalidAuthority, self).__init__( - u"The authority ({0}) is not valid.".format( - compat.to_str(authority))) - - -class InvalidPort(RFC3986Exception): - """Exception when the port is invalid.""" - - def __init__(self, port): - """Initialize the exception with the invalid port.""" - super(InvalidPort, self).__init__( - 'The port ("{0}") is not valid.'.format(port)) - - -class ResolutionError(RFC3986Exception): - """Exception to indicate a failure to resolve a URI.""" - - def __init__(self, uri): - """Initialize the error with the failed URI.""" - super(ResolutionError, self).__init__( - "{0} is not an absolute URI.".format(uri.unsplit())) - - -class ValidationError(RFC3986Exception): - """Exception raised during Validation of a URI.""" - - pass - - -class MissingComponentError(ValidationError): - """Exception raised when a required component is missing.""" - - def __init__(self, uri, *component_names): - """Initialize the error with the missing component name.""" - verb = 'was' - if len(component_names) > 1: - verb = 'were' - - self.uri = uri - self.components = sorted(component_names) - components = ', '.join(self.components) - super(MissingComponentError, self).__init__( - "{} {} required but missing".format(components, verb), - uri, - self.components, - ) - - -class UnpermittedComponentError(ValidationError): - """Exception raised when a component has an unpermitted value.""" - - def __init__(self, component_name, component_value, allowed_values): - """Initialize the error with the unpermitted component.""" - super(UnpermittedComponentError, self).__init__( - "{} was required to be one of {!r} but was {!r}".format( - component_name, list(sorted(allowed_values)), component_value, - ), - component_name, - component_value, - allowed_values, - ) - self.component_name = component_name - self.component_value = component_value - self.allowed_values = allowed_values - - -class PasswordForbidden(ValidationError): - """Exception raised when a URL has a password in the userinfo section.""" - - def __init__(self, uri): - """Initialize the error with the URI that failed validation.""" - unsplit = getattr(uri, 'unsplit', lambda: uri) - super(PasswordForbidden, self).__init__( - '"{}" contained a password when validation forbade it'.format( - unsplit() - ) - ) - self.uri = uri - - -class InvalidComponentsError(ValidationError): - """Exception raised when one or more components are invalid.""" - - def __init__(self, uri, *component_names): - """Initialize the error with the invalid component name(s).""" - verb = 'was' - if len(component_names) > 1: - verb = 'were' - - self.uri = uri - self.components = sorted(component_names) - components = ', '.join(self.components) - super(InvalidComponentsError, self).__init__( - "{} {} found to be invalid".format(components, verb), - uri, - self.components, - ) - - -class MissingDependencyError(RFC3986Exception): - """Exception raised when an IRI is encoded without the 'idna' module.""" diff --git a/pipenv/vendor/urllib3/packages/rfc3986/iri.py b/pipenv/vendor/urllib3/packages/rfc3986/iri.py deleted file mode 100644 index 9c01fe1cd0..0000000000 --- a/pipenv/vendor/urllib3/packages/rfc3986/iri.py +++ /dev/null @@ -1,147 +0,0 @@ -"""Module containing the implementation of the IRIReference class.""" -# -*- coding: utf-8 -*- -# Copyright (c) 2014 Rackspace -# Copyright (c) 2015 Ian Stapleton Cordasco -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from collections import namedtuple - -from . import compat -from . import exceptions -from . import misc -from . import normalizers -from . import uri - - -try: - import idna -except ImportError: # pragma: no cover - idna = None - - -class IRIReference(namedtuple('IRIReference', misc.URI_COMPONENTS), - uri.URIMixin): - """Immutable object representing a parsed IRI Reference. - - Can be encoded into an URIReference object via the procedure - specified in RFC 3987 Section 3.1 - - .. note:: - The IRI submodule is a new interface and may possibly change in - the future. Check for changes to the interface when upgrading. - """ - - slots = () - - def __new__(cls, scheme, authority, path, query, fragment, - encoding='utf-8'): - """Create a new IRIReference.""" - ref = super(IRIReference, cls).__new__( - cls, - scheme or None, - authority or None, - path or None, - query, - fragment) - ref.encoding = encoding - return ref - - def __eq__(self, other): - """Compare this reference to another.""" - other_ref = other - if isinstance(other, tuple): - other_ref = self.__class__(*other) - elif not isinstance(other, IRIReference): - try: - other_ref = self.__class__.from_string(other) - except TypeError: - raise TypeError( - 'Unable to compare {0}() to {1}()'.format( - type(self).__name__, type(other).__name__)) - - # See http://tools.ietf.org/html/rfc3986#section-6.2 - return tuple(self) == tuple(other_ref) - - def _match_subauthority(self): - return misc.ISUBAUTHORITY_MATCHER.match(self.authority) - - @classmethod - def from_string(cls, iri_string, encoding='utf-8'): - """Parse a IRI reference from the given unicode IRI string. - - :param str iri_string: Unicode IRI to be parsed into a reference. - :param str encoding: The encoding of the string provided - :returns: :class:`IRIReference` or subclass thereof - """ - iri_string = compat.to_str(iri_string, encoding) - - split_iri = misc.IRI_MATCHER.match(iri_string).groupdict() - return cls( - split_iri['scheme'], split_iri['authority'], - normalizers.encode_component(split_iri['path'], encoding), - normalizers.encode_component(split_iri['query'], encoding), - normalizers.encode_component(split_iri['fragment'], encoding), - encoding, - ) - - def encode(self, idna_encoder=None): # noqa: C901 - """Encode an IRIReference into a URIReference instance. - - If the ``idna`` module is installed or the ``rfc3986[idna]`` - extra is used then unicode characters in the IRI host - component will be encoded with IDNA2008. - - :param idna_encoder: - Function that encodes each part of the host component - If not given will raise an exception if the IRI - contains a host component. - :rtype: uri.URIReference - :returns: A URI reference - """ - authority = self.authority - if authority: - if idna_encoder is None: - if idna is None: # pragma: no cover - raise exceptions.MissingDependencyError( - "Could not import the 'idna' module " - "and the IRI hostname requires encoding" - ) - - def idna_encoder(name): - if any(ord(c) > 128 for c in name): - try: - return idna.encode(name.lower(), - strict=True, - std3_rules=True) - except idna.IDNAError: - raise exceptions.InvalidAuthority(self.authority) - return name - - authority = "" - if self.host: - authority = ".".join([compat.to_str(idna_encoder(part)) - for part in self.host.split(".")]) - - if self.userinfo is not None: - authority = (normalizers.encode_component( - self.userinfo, self.encoding) + '@' + authority) - - if self.port is not None: - authority += ":" + str(self.port) - - return uri.URIReference(self.scheme, - authority, - path=self.path, - query=self.query, - fragment=self.fragment, - encoding=self.encoding) diff --git a/pipenv/vendor/urllib3/packages/rfc3986/misc.py b/pipenv/vendor/urllib3/packages/rfc3986/misc.py deleted file mode 100644 index 00f9f3b94d..0000000000 --- a/pipenv/vendor/urllib3/packages/rfc3986/misc.py +++ /dev/null @@ -1,146 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2014 Rackspace -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Module containing compiled regular expressions and constants. - -This module contains important constants, patterns, and compiled regular -expressions for parsing and validating URIs and their components. -""" - -import re - -from . import abnf_regexp - -# These are enumerated for the named tuple used as a superclass of -# URIReference -URI_COMPONENTS = ['scheme', 'authority', 'path', 'query', 'fragment'] - -important_characters = { - 'generic_delimiters': abnf_regexp.GENERIC_DELIMITERS, - 'sub_delimiters': abnf_regexp.SUB_DELIMITERS, - # We need to escape the '*' in this case - 're_sub_delimiters': abnf_regexp.SUB_DELIMITERS_RE, - 'unreserved_chars': abnf_regexp.UNRESERVED_CHARS, - # We need to escape the '-' in this case: - 're_unreserved': abnf_regexp.UNRESERVED_RE, -} - -# For details about delimiters and reserved characters, see: -# http://tools.ietf.org/html/rfc3986#section-2.2 -GENERIC_DELIMITERS = abnf_regexp.GENERIC_DELIMITERS_SET -SUB_DELIMITERS = abnf_regexp.SUB_DELIMITERS_SET -RESERVED_CHARS = abnf_regexp.RESERVED_CHARS_SET -# For details about unreserved characters, see: -# http://tools.ietf.org/html/rfc3986#section-2.3 -UNRESERVED_CHARS = abnf_regexp.UNRESERVED_CHARS_SET -NON_PCT_ENCODED = abnf_regexp.NON_PCT_ENCODED_SET - -URI_MATCHER = re.compile(abnf_regexp.URL_PARSING_RE) - -SUBAUTHORITY_MATCHER = re.compile(( - '^(?:(?P<userinfo>{0})@)?' # userinfo - '(?P<host>{1})' # host - ':?(?P<port>{2})?$' # port - ).format(abnf_regexp.USERINFO_RE, - abnf_regexp.HOST_PATTERN, - abnf_regexp.PORT_RE)) - - -HOST_MATCHER = re.compile('^' + abnf_regexp.HOST_RE + '$') -IPv4_MATCHER = re.compile('^' + abnf_regexp.IPv4_RE + '$') -IPv6_MATCHER = re.compile(r'^\[' + abnf_regexp.IPv6_ADDRZ_RFC4007_RE + r'\]$') - -# Used by host validator -IPv6_NO_RFC4007_MATCHER = re.compile(r'^\[%s\]$' % ( - abnf_regexp.IPv6_ADDRZ_RE -)) - -# Matcher used to validate path components -PATH_MATCHER = re.compile(abnf_regexp.PATH_RE) - - -# ################################## -# Query and Fragment Matcher Section -# ################################## - -QUERY_MATCHER = re.compile(abnf_regexp.QUERY_RE) - -FRAGMENT_MATCHER = QUERY_MATCHER - -# Scheme validation, see: http://tools.ietf.org/html/rfc3986#section-3.1 -SCHEME_MATCHER = re.compile('^{0}$'.format(abnf_regexp.SCHEME_RE)) - -RELATIVE_REF_MATCHER = re.compile(r'^%s(\?%s)?(#%s)?$' % ( - abnf_regexp.RELATIVE_PART_RE, - abnf_regexp.QUERY_RE, - abnf_regexp.FRAGMENT_RE, -)) - -# See http://tools.ietf.org/html/rfc3986#section-4.3 -ABSOLUTE_URI_MATCHER = re.compile(r'^%s:%s(\?%s)?$' % ( - abnf_regexp.COMPONENT_PATTERN_DICT['scheme'], - abnf_regexp.HIER_PART_RE, - abnf_regexp.QUERY_RE[1:-1], -)) - -# ############### -# IRIs / RFC 3987 -# ############### - -IRI_MATCHER = re.compile(abnf_regexp.URL_PARSING_RE, re.UNICODE) - -ISUBAUTHORITY_MATCHER = re.compile(( - u'^(?:(?P<userinfo>{0})@)?' # iuserinfo - u'(?P<host>{1})' # ihost - u':?(?P<port>{2})?$' # port - ).format(abnf_regexp.IUSERINFO_RE, - abnf_regexp.IHOST_RE, - abnf_regexp.PORT_RE), re.UNICODE) - - -IHOST_MATCHER = re.compile('^' + abnf_regexp.IHOST_RE + '$', re.UNICODE) - -IPATH_MATCHER = re.compile(abnf_regexp.IPATH_RE, re.UNICODE) - -IQUERY_MATCHER = re.compile(abnf_regexp.IQUERY_RE, re.UNICODE) - -IFRAGMENT_MATCHER = re.compile(abnf_regexp.IFRAGMENT_RE, re.UNICODE) - - -RELATIVE_IRI_MATCHER = re.compile(u'^%s(?:\\?%s)?(?:%s)?$' % ( - abnf_regexp.IRELATIVE_PART_RE, - abnf_regexp.IQUERY_RE, - abnf_regexp.IFRAGMENT_RE -), re.UNICODE) - -ABSOLUTE_IRI_MATCHER = re.compile(u'^%s:%s(?:\\?%s)?$' % ( - abnf_regexp.COMPONENT_PATTERN_DICT['scheme'], - abnf_regexp.IHIER_PART_RE, - abnf_regexp.IQUERY_RE[1:-1] -), re.UNICODE) - - -# Path merger as defined in http://tools.ietf.org/html/rfc3986#section-5.2.3 -def merge_paths(base_uri, relative_path): - """Merge a base URI's path with a relative URI's path.""" - if base_uri.path is None and base_uri.authority is not None: - return '/' + relative_path - else: - path = base_uri.path or '' - index = path.rfind('/') - return path[:index] + '/' + relative_path - - -UseExisting = object() diff --git a/pipenv/vendor/urllib3/packages/rfc3986/normalizers.py b/pipenv/vendor/urllib3/packages/rfc3986/normalizers.py deleted file mode 100644 index 2eb1bb36f7..0000000000 --- a/pipenv/vendor/urllib3/packages/rfc3986/normalizers.py +++ /dev/null @@ -1,167 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2014 Rackspace -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Module with functions to normalize components.""" -import re - -from . import compat -from . import misc - - -def normalize_scheme(scheme): - """Normalize the scheme component.""" - return scheme.lower() - - -def normalize_authority(authority): - """Normalize an authority tuple to a string.""" - userinfo, host, port = authority - result = '' - if userinfo: - result += normalize_percent_characters(userinfo) + '@' - if host: - result += normalize_host(host) - if port: - result += ':' + port - return result - - -def normalize_username(username): - """Normalize a username to make it safe to include in userinfo.""" - return compat.urlquote(username) - - -def normalize_password(password): - """Normalize a password to make safe for userinfo.""" - return compat.urlquote(password) - - -def normalize_host(host): - """Normalize a host string.""" - if misc.IPv6_MATCHER.match(host): - percent = host.find('%') - if percent != -1: - percent_25 = host.find('%25') - - # Replace RFC 4007 IPv6 Zone ID delimiter '%' with '%25' - # from RFC 6874. If the host is '[<IPv6 addr>%25]' then we - # assume RFC 4007 and normalize to '[<IPV6 addr>%2525]' - if percent_25 == -1 or percent < percent_25 or \ - (percent == percent_25 and percent_25 == len(host) - 4): - host = host.replace('%', '%25', 1) - - # Don't normalize the casing of the Zone ID - return host[:percent].lower() + host[percent:] - - return host.lower() - - -def normalize_path(path): - """Normalize the path string.""" - if not path: - return path - - path = normalize_percent_characters(path) - return remove_dot_segments(path) - - -def normalize_query(query): - """Normalize the query string.""" - if not query: - return query - return normalize_percent_characters(query) - - -def normalize_fragment(fragment): - """Normalize the fragment string.""" - if not fragment: - return fragment - return normalize_percent_characters(fragment) - - -PERCENT_MATCHER = re.compile('%[A-Fa-f0-9]{2}') - - -def normalize_percent_characters(s): - """All percent characters should be upper-cased. - - For example, ``"%3afoo%DF%ab"`` should be turned into ``"%3Afoo%DF%AB"``. - """ - matches = set(PERCENT_MATCHER.findall(s)) - for m in matches: - if not m.isupper(): - s = s.replace(m, m.upper()) - return s - - -def remove_dot_segments(s): - """Remove dot segments from the string. - - See also Section 5.2.4 of :rfc:`3986`. - """ - # See http://tools.ietf.org/html/rfc3986#section-5.2.4 for pseudo-code - segments = s.split('/') # Turn the path into a list of segments - output = [] # Initialize the variable to use to store output - - for segment in segments: - # '.' is the current directory, so ignore it, it is superfluous - if segment == '.': - continue - # Anything other than '..', should be appended to the output - elif segment != '..': - output.append(segment) - # In this case segment == '..', if we can, we should pop the last - # element - elif output: - output.pop() - - # If the path starts with '/' and the output is empty or the first string - # is non-empty - if s.startswith('/') and (not output or output[0]): - output.insert(0, '') - - # If the path starts with '/.' or '/..' ensure we add one more empty - # string to add a trailing '/' - if s.endswith(('/.', '/..')): - output.append('') - - return '/'.join(output) - - -def encode_component(uri_component, encoding): - """Encode the specific component in the provided encoding.""" - if uri_component is None: - return uri_component - - # Try to see if the component we're encoding is already percent-encoded - # so we can skip all '%' characters but still encode all others. - percent_encodings = len(PERCENT_MATCHER.findall( - compat.to_str(uri_component, encoding))) - - uri_bytes = compat.to_bytes(uri_component, encoding) - is_percent_encoded = percent_encodings == uri_bytes.count(b'%') - - encoded_uri = bytearray() - - for i in range(0, len(uri_bytes)): - # Will return a single character bytestring on both Python 2 & 3 - byte = uri_bytes[i:i+1] - byte_ord = ord(byte) - if ((is_percent_encoded and byte == b'%') - or (byte_ord < 128 and byte.decode() in misc.NON_PCT_ENCODED)): - encoded_uri.extend(byte) - continue - encoded_uri.extend('%{0:02x}'.format(byte_ord).encode().upper()) - - return encoded_uri.decode(encoding) diff --git a/pipenv/vendor/urllib3/packages/rfc3986/parseresult.py b/pipenv/vendor/urllib3/packages/rfc3986/parseresult.py deleted file mode 100644 index 0a73456693..0000000000 --- a/pipenv/vendor/urllib3/packages/rfc3986/parseresult.py +++ /dev/null @@ -1,385 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2015 Ian Stapleton Cordasco -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Module containing the urlparse compatibility logic.""" -from collections import namedtuple - -from . import compat -from . import exceptions -from . import misc -from . import normalizers -from . import uri - -__all__ = ('ParseResult', 'ParseResultBytes') - -PARSED_COMPONENTS = ('scheme', 'userinfo', 'host', 'port', 'path', 'query', - 'fragment') - - -class ParseResultMixin(object): - def _generate_authority(self, attributes): - # I swear I did not align the comparisons below. That's just how they - # happened to align based on pep8 and attribute lengths. - userinfo, host, port = (attributes[p] - for p in ('userinfo', 'host', 'port')) - if (self.userinfo != userinfo or - self.host != host or - self.port != port): - if port: - port = '{0}'.format(port) - return normalizers.normalize_authority( - (compat.to_str(userinfo, self.encoding), - compat.to_str(host, self.encoding), - port) - ) - return self.authority - - def geturl(self): - """Shim to match the standard library method.""" - return self.unsplit() - - @property - def hostname(self): - """Shim to match the standard library.""" - return self.host - - @property - def netloc(self): - """Shim to match the standard library.""" - return self.authority - - @property - def params(self): - """Shim to match the standard library.""" - return self.query - - -class ParseResult(namedtuple('ParseResult', PARSED_COMPONENTS), - ParseResultMixin): - """Implementation of urlparse compatibility class. - - This uses the URIReference logic to handle compatibility with the - urlparse.ParseResult class. - """ - - slots = () - - def __new__(cls, scheme, userinfo, host, port, path, query, fragment, - uri_ref, encoding='utf-8'): - """Create a new ParseResult.""" - parse_result = super(ParseResult, cls).__new__( - cls, - scheme or None, - userinfo or None, - host, - port or None, - path or None, - query, - fragment) - parse_result.encoding = encoding - parse_result.reference = uri_ref - return parse_result - - @classmethod - def from_parts(cls, scheme=None, userinfo=None, host=None, port=None, - path=None, query=None, fragment=None, encoding='utf-8'): - """Create a ParseResult instance from its parts.""" - authority = '' - if userinfo is not None: - authority += userinfo + '@' - if host is not None: - authority += host - if port is not None: - authority += ':{0}'.format(port) - uri_ref = uri.URIReference(scheme=scheme, - authority=authority, - path=path, - query=query, - fragment=fragment, - encoding=encoding).normalize() - userinfo, host, port = authority_from(uri_ref, strict=True) - return cls(scheme=uri_ref.scheme, - userinfo=userinfo, - host=host, - port=port, - path=uri_ref.path, - query=uri_ref.query, - fragment=uri_ref.fragment, - uri_ref=uri_ref, - encoding=encoding) - - @classmethod - def from_string(cls, uri_string, encoding='utf-8', strict=True, - lazy_normalize=True): - """Parse a URI from the given unicode URI string. - - :param str uri_string: Unicode URI to be parsed into a reference. - :param str encoding: The encoding of the string provided - :param bool strict: Parse strictly according to :rfc:`3986` if True. - If False, parse similarly to the standard library's urlparse - function. - :returns: :class:`ParseResult` or subclass thereof - """ - reference = uri.URIReference.from_string(uri_string, encoding) - if not lazy_normalize: - reference = reference.normalize() - userinfo, host, port = authority_from(reference, strict) - - return cls(scheme=reference.scheme, - userinfo=userinfo, - host=host, - port=port, - path=reference.path, - query=reference.query, - fragment=reference.fragment, - uri_ref=reference, - encoding=encoding) - - @property - def authority(self): - """Return the normalized authority.""" - return self.reference.authority - - def copy_with(self, scheme=misc.UseExisting, userinfo=misc.UseExisting, - host=misc.UseExisting, port=misc.UseExisting, - path=misc.UseExisting, query=misc.UseExisting, - fragment=misc.UseExisting): - """Create a copy of this instance replacing with specified parts.""" - attributes = zip(PARSED_COMPONENTS, - (scheme, userinfo, host, port, path, query, fragment)) - attrs_dict = {} - for name, value in attributes: - if value is misc.UseExisting: - value = getattr(self, name) - attrs_dict[name] = value - authority = self._generate_authority(attrs_dict) - ref = self.reference.copy_with(scheme=attrs_dict['scheme'], - authority=authority, - path=attrs_dict['path'], - query=attrs_dict['query'], - fragment=attrs_dict['fragment']) - return ParseResult(uri_ref=ref, encoding=self.encoding, **attrs_dict) - - def encode(self, encoding=None): - """Convert to an instance of ParseResultBytes.""" - encoding = encoding or self.encoding - attrs = dict( - zip(PARSED_COMPONENTS, - (attr.encode(encoding) if hasattr(attr, 'encode') else attr - for attr in self))) - return ParseResultBytes( - uri_ref=self.reference, - encoding=encoding, - **attrs - ) - - def unsplit(self, use_idna=False): - """Create a URI string from the components. - - :returns: The parsed URI reconstituted as a string. - :rtype: str - """ - parse_result = self - if use_idna and self.host: - hostbytes = self.host.encode('idna') - host = hostbytes.decode(self.encoding) - parse_result = self.copy_with(host=host) - return parse_result.reference.unsplit() - - -class ParseResultBytes(namedtuple('ParseResultBytes', PARSED_COMPONENTS), - ParseResultMixin): - """Compatibility shim for the urlparse.ParseResultBytes object.""" - - def __new__(cls, scheme, userinfo, host, port, path, query, fragment, - uri_ref, encoding='utf-8', lazy_normalize=True): - """Create a new ParseResultBytes instance.""" - parse_result = super(ParseResultBytes, cls).__new__( - cls, - scheme or None, - userinfo or None, - host, - port or None, - path or None, - query or None, - fragment or None) - parse_result.encoding = encoding - parse_result.reference = uri_ref - parse_result.lazy_normalize = lazy_normalize - return parse_result - - @classmethod - def from_parts(cls, scheme=None, userinfo=None, host=None, port=None, - path=None, query=None, fragment=None, encoding='utf-8', - lazy_normalize=True): - """Create a ParseResult instance from its parts.""" - authority = '' - if userinfo is not None: - authority += userinfo + '@' - if host is not None: - authority += host - if port is not None: - authority += ':{0}'.format(int(port)) - uri_ref = uri.URIReference(scheme=scheme, - authority=authority, - path=path, - query=query, - fragment=fragment, - encoding=encoding) - if not lazy_normalize: - uri_ref = uri_ref.normalize() - to_bytes = compat.to_bytes - userinfo, host, port = authority_from(uri_ref, strict=True) - return cls(scheme=to_bytes(scheme, encoding), - userinfo=to_bytes(userinfo, encoding), - host=to_bytes(host, encoding), - port=port, - path=to_bytes(path, encoding), - query=to_bytes(query, encoding), - fragment=to_bytes(fragment, encoding), - uri_ref=uri_ref, - encoding=encoding, - lazy_normalize=lazy_normalize) - - @classmethod - def from_string(cls, uri_string, encoding='utf-8', strict=True, - lazy_normalize=True): - """Parse a URI from the given unicode URI string. - - :param str uri_string: Unicode URI to be parsed into a reference. - :param str encoding: The encoding of the string provided - :param bool strict: Parse strictly according to :rfc:`3986` if True. - If False, parse similarly to the standard library's urlparse - function. - :returns: :class:`ParseResultBytes` or subclass thereof - """ - reference = uri.URIReference.from_string(uri_string, encoding) - if not lazy_normalize: - reference = reference.normalize() - userinfo, host, port = authority_from(reference, strict) - - to_bytes = compat.to_bytes - return cls(scheme=to_bytes(reference.scheme, encoding), - userinfo=to_bytes(userinfo, encoding), - host=to_bytes(host, encoding), - port=port, - path=to_bytes(reference.path, encoding), - query=to_bytes(reference.query, encoding), - fragment=to_bytes(reference.fragment, encoding), - uri_ref=reference, - encoding=encoding, - lazy_normalize=lazy_normalize) - - @property - def authority(self): - """Return the normalized authority.""" - return self.reference.authority.encode(self.encoding) - - def copy_with(self, scheme=misc.UseExisting, userinfo=misc.UseExisting, - host=misc.UseExisting, port=misc.UseExisting, - path=misc.UseExisting, query=misc.UseExisting, - fragment=misc.UseExisting, lazy_normalize=True): - """Create a copy of this instance replacing with specified parts.""" - attributes = zip(PARSED_COMPONENTS, - (scheme, userinfo, host, port, path, query, fragment)) - attrs_dict = {} - for name, value in attributes: - if value is misc.UseExisting: - value = getattr(self, name) - if not isinstance(value, bytes) and hasattr(value, 'encode'): - value = value.encode(self.encoding) - attrs_dict[name] = value - authority = self._generate_authority(attrs_dict) - to_str = compat.to_str - ref = self.reference.copy_with( - scheme=to_str(attrs_dict['scheme'], self.encoding), - authority=to_str(authority, self.encoding), - path=to_str(attrs_dict['path'], self.encoding), - query=to_str(attrs_dict['query'], self.encoding), - fragment=to_str(attrs_dict['fragment'], self.encoding) - ) - if not lazy_normalize: - ref = ref.normalize() - return ParseResultBytes( - uri_ref=ref, - encoding=self.encoding, - lazy_normalize=lazy_normalize, - **attrs_dict - ) - - def unsplit(self, use_idna=False): - """Create a URI bytes object from the components. - - :returns: The parsed URI reconstituted as a string. - :rtype: bytes - """ - parse_result = self - if use_idna and self.host: - # self.host is bytes, to encode to idna, we need to decode it - # first - host = self.host.decode(self.encoding) - hostbytes = host.encode('idna') - parse_result = self.copy_with(host=hostbytes) - if self.lazy_normalize: - parse_result = parse_result.copy_with(lazy_normalize=False) - uri = parse_result.reference.unsplit() - return uri.encode(self.encoding) - - -def split_authority(authority): - # Initialize our expected return values - userinfo = host = port = None - # Initialize an extra var we may need to use - extra_host = None - # Set-up rest in case there is no userinfo portion - rest = authority - - if '@' in authority: - userinfo, rest = authority.rsplit('@', 1) - - # Handle IPv6 host addresses - if rest.startswith('['): - host, rest = rest.split(']', 1) - host += ']' - - if ':' in rest: - extra_host, port = rest.split(':', 1) - elif not host and rest: - host = rest - - if extra_host and not host: - host = extra_host - - return userinfo, host, port - - -def authority_from(reference, strict): - try: - subauthority = reference.authority_info() - except exceptions.InvalidAuthority: - if strict: - raise - userinfo, host, port = split_authority(reference.authority) - else: - # Thanks to Richard Barrell for this idea: - # https://twitter.com/0x2ba22e11/status/617338811975139328 - userinfo, host, port = (subauthority.get(p) - for p in ('userinfo', 'host', 'port')) - - if port: - try: - port = int(port) - except ValueError: - raise exceptions.InvalidPort(port) - return userinfo, host, port diff --git a/pipenv/vendor/urllib3/packages/rfc3986/uri.py b/pipenv/vendor/urllib3/packages/rfc3986/uri.py deleted file mode 100644 index d1d71505e2..0000000000 --- a/pipenv/vendor/urllib3/packages/rfc3986/uri.py +++ /dev/null @@ -1,153 +0,0 @@ -"""Module containing the implementation of the URIReference class.""" -# -*- coding: utf-8 -*- -# Copyright (c) 2014 Rackspace -# Copyright (c) 2015 Ian Stapleton Cordasco -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from collections import namedtuple - -from . import compat -from . import misc -from . import normalizers -from ._mixin import URIMixin - - -class URIReference(namedtuple('URIReference', misc.URI_COMPONENTS), URIMixin): - """Immutable object representing a parsed URI Reference. - - .. note:: - - This class is not intended to be directly instantiated by the user. - - This object exposes attributes for the following components of a - URI: - - - scheme - - authority - - path - - query - - fragment - - .. attribute:: scheme - - The scheme that was parsed for the URI Reference. For example, - ``http``, ``https``, ``smtp``, ``imap``, etc. - - .. attribute:: authority - - Component of the URI that contains the user information, host, - and port sub-components. For example, - ``google.com``, ``127.0.0.1:5000``, ``username@[::1]``, - ``username:password@example.com:443``, etc. - - .. attribute:: path - - The path that was parsed for the given URI Reference. For example, - ``/``, ``/index.php``, etc. - - .. attribute:: query - - The query component for a given URI Reference. For example, ``a=b``, - ``a=b%20c``, ``a=b+c``, ``a=b,c=d,e=%20f``, etc. - - .. attribute:: fragment - - The fragment component of a URI. For example, ``section-3.1``. - - This class also provides extra attributes for easier access to information - like the subcomponents of the authority component. - - .. attribute:: userinfo - - The user information parsed from the authority. - - .. attribute:: host - - The hostname, IPv4, or IPv6 adddres parsed from the authority. - - .. attribute:: port - - The port parsed from the authority. - """ - - slots = () - - def __new__(cls, scheme, authority, path, query, fragment, - encoding='utf-8'): - """Create a new URIReference.""" - ref = super(URIReference, cls).__new__( - cls, - scheme or None, - authority or None, - path or None, - query, - fragment) - ref.encoding = encoding - return ref - - __hash__ = tuple.__hash__ - - def __eq__(self, other): - """Compare this reference to another.""" - other_ref = other - if isinstance(other, tuple): - other_ref = URIReference(*other) - elif not isinstance(other, URIReference): - try: - other_ref = URIReference.from_string(other) - except TypeError: - raise TypeError( - 'Unable to compare URIReference() to {0}()'.format( - type(other).__name__)) - - # See http://tools.ietf.org/html/rfc3986#section-6.2 - naive_equality = tuple(self) == tuple(other_ref) - return naive_equality or self.normalized_equality(other_ref) - - def normalize(self): - """Normalize this reference as described in Section 6.2.2. - - This is not an in-place normalization. Instead this creates a new - URIReference. - - :returns: A new reference object with normalized components. - :rtype: URIReference - """ - # See http://tools.ietf.org/html/rfc3986#section-6.2.2 for logic in - # this method. - return URIReference(normalizers.normalize_scheme(self.scheme or ''), - normalizers.normalize_authority( - (self.userinfo, self.host, self.port)), - normalizers.normalize_path(self.path or ''), - normalizers.normalize_query(self.query), - normalizers.normalize_fragment(self.fragment), - self.encoding) - - @classmethod - def from_string(cls, uri_string, encoding='utf-8'): - """Parse a URI reference from the given unicode URI string. - - :param str uri_string: Unicode URI to be parsed into a reference. - :param str encoding: The encoding of the string provided - :returns: :class:`URIReference` or subclass thereof - """ - uri_string = compat.to_str(uri_string, encoding) - - split_uri = misc.URI_MATCHER.match(uri_string).groupdict() - return cls( - split_uri['scheme'], split_uri['authority'], - normalizers.encode_component(split_uri['path'], encoding), - normalizers.encode_component(split_uri['query'], encoding), - normalizers.encode_component(split_uri['fragment'], encoding), - encoding, - ) diff --git a/pipenv/vendor/urllib3/packages/rfc3986/validators.py b/pipenv/vendor/urllib3/packages/rfc3986/validators.py deleted file mode 100644 index 7fc97215b1..0000000000 --- a/pipenv/vendor/urllib3/packages/rfc3986/validators.py +++ /dev/null @@ -1,450 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2017 Ian Stapleton Cordasco -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Module containing the validation logic for rfc3986.""" -from . import exceptions -from . import misc -from . import normalizers - - -class Validator(object): - """Object used to configure validation of all objects in rfc3986. - - .. versionadded:: 1.0 - - Example usage:: - - >>> from rfc3986 import api, validators - >>> uri = api.uri_reference('https://github.com/') - >>> validator = validators.Validator().require_presence_of( - ... 'scheme', 'host', 'path', - ... ).allow_schemes( - ... 'http', 'https', - ... ).allow_hosts( - ... '127.0.0.1', 'github.com', - ... ) - >>> validator.validate(uri) - >>> invalid_uri = rfc3986.uri_reference('imap://mail.google.com') - >>> validator.validate(invalid_uri) - Traceback (most recent call last): - ... - rfc3986.exceptions.MissingComponentError: ('path was required but - missing', URIReference(scheme=u'imap', authority=u'mail.google.com', - path=None, query=None, fragment=None), ['path']) - - """ - - COMPONENT_NAMES = frozenset([ - 'scheme', - 'userinfo', - 'host', - 'port', - 'path', - 'query', - 'fragment', - ]) - - def __init__(self): - """Initialize our default validations.""" - self.allowed_schemes = set() - self.allowed_hosts = set() - self.allowed_ports = set() - self.allow_password = True - self.required_components = { - 'scheme': False, - 'userinfo': False, - 'host': False, - 'port': False, - 'path': False, - 'query': False, - 'fragment': False, - } - self.validated_components = self.required_components.copy() - - def allow_schemes(self, *schemes): - """Require the scheme to be one of the provided schemes. - - .. versionadded:: 1.0 - - :param schemes: - Schemes, without ``://`` that are allowed. - :returns: - The validator instance. - :rtype: - Validator - """ - for scheme in schemes: - self.allowed_schemes.add(normalizers.normalize_scheme(scheme)) - return self - - def allow_hosts(self, *hosts): - """Require the host to be one of the provided hosts. - - .. versionadded:: 1.0 - - :param hosts: - Hosts that are allowed. - :returns: - The validator instance. - :rtype: - Validator - """ - for host in hosts: - self.allowed_hosts.add(normalizers.normalize_host(host)) - return self - - def allow_ports(self, *ports): - """Require the port to be one of the provided ports. - - .. versionadded:: 1.0 - - :param ports: - Ports that are allowed. - :returns: - The validator instance. - :rtype: - Validator - """ - for port in ports: - port_int = int(port, base=10) - if 0 <= port_int <= 65535: - self.allowed_ports.add(port) - return self - - def allow_use_of_password(self): - """Allow passwords to be present in the URI. - - .. versionadded:: 1.0 - - :returns: - The validator instance. - :rtype: - Validator - """ - self.allow_password = True - return self - - def forbid_use_of_password(self): - """Prevent passwords from being included in the URI. - - .. versionadded:: 1.0 - - :returns: - The validator instance. - :rtype: - Validator - """ - self.allow_password = False - return self - - def check_validity_of(self, *components): - """Check the validity of the components provided. - - This can be specified repeatedly. - - .. versionadded:: 1.1 - - :param components: - Names of components from :attr:`Validator.COMPONENT_NAMES`. - :returns: - The validator instance. - :rtype: - Validator - """ - components = [c.lower() for c in components] - for component in components: - if component not in self.COMPONENT_NAMES: - raise ValueError( - '"{}" is not a valid component'.format(component) - ) - self.validated_components.update({ - component: True for component in components - }) - return self - - def require_presence_of(self, *components): - """Require the components provided. - - This can be specified repeatedly. - - .. versionadded:: 1.0 - - :param components: - Names of components from :attr:`Validator.COMPONENT_NAMES`. - :returns: - The validator instance. - :rtype: - Validator - """ - components = [c.lower() for c in components] - for component in components: - if component not in self.COMPONENT_NAMES: - raise ValueError( - '"{}" is not a valid component'.format(component) - ) - self.required_components.update({ - component: True for component in components - }) - return self - - def validate(self, uri): - """Check a URI for conditions specified on this validator. - - .. versionadded:: 1.0 - - :param uri: - Parsed URI to validate. - :type uri: - rfc3986.uri.URIReference - :raises MissingComponentError: - When a required component is missing. - :raises UnpermittedComponentError: - When a component is not one of those allowed. - :raises PasswordForbidden: - When a password is present in the userinfo component but is - not permitted by configuration. - :raises InvalidComponentsError: - When a component was found to be invalid. - """ - if not self.allow_password: - check_password(uri) - - required_components = [ - component - for component, required in self.required_components.items() - if required - ] - validated_components = [ - component - for component, required in self.validated_components.items() - if required - ] - if required_components: - ensure_required_components_exist(uri, required_components) - if validated_components: - ensure_components_are_valid(uri, validated_components) - - ensure_one_of(self.allowed_schemes, uri, 'scheme') - ensure_one_of(self.allowed_hosts, uri, 'host') - ensure_one_of(self.allowed_ports, uri, 'port') - - -def check_password(uri): - """Assert that there is no password present in the uri.""" - userinfo = uri.userinfo - if not userinfo: - return - credentials = userinfo.split(':', 1) - if len(credentials) <= 1: - return - raise exceptions.PasswordForbidden(uri) - - -def ensure_one_of(allowed_values, uri, attribute): - """Assert that the uri's attribute is one of the allowed values.""" - value = getattr(uri, attribute) - if value is not None and allowed_values and value not in allowed_values: - raise exceptions.UnpermittedComponentError( - attribute, value, allowed_values, - ) - - -def ensure_required_components_exist(uri, required_components): - """Assert that all required components are present in the URI.""" - missing_components = sorted([ - component - for component in required_components - if getattr(uri, component) is None - ]) - if missing_components: - raise exceptions.MissingComponentError(uri, *missing_components) - - -def is_valid(value, matcher, require): - """Determine if a value is valid based on the provided matcher. - - :param str value: - Value to validate. - :param matcher: - Compiled regular expression to use to validate the value. - :param require: - Whether or not the value is required. - """ - if require: - return (value is not None - and matcher.match(value)) - - # require is False and value is not None - return value is None or matcher.match(value) - - -def authority_is_valid(authority, host=None, require=False): - """Determine if the authority string is valid. - - :param str authority: - The authority to validate. - :param str host: - (optional) The host portion of the authority to validate. - :param bool require: - (optional) Specify if authority must not be None. - :returns: - ``True`` if valid, ``False`` otherwise - :rtype: - bool - """ - validated = is_valid(authority, misc.SUBAUTHORITY_MATCHER, require) - if validated and host is not None: - return host_is_valid(host, require) - return validated - - -def host_is_valid(host, require=False): - """Determine if the host string is valid. - - :param str host: - The host to validate. - :param bool require: - (optional) Specify if host must not be None. - :returns: - ``True`` if valid, ``False`` otherwise - :rtype: - bool - """ - validated = is_valid(host, misc.HOST_MATCHER, require) - if validated and host is not None and misc.IPv4_MATCHER.match(host): - return valid_ipv4_host_address(host) - elif validated and host is not None and misc.IPv6_MATCHER.match(host): - return misc.IPv6_NO_RFC4007_MATCHER.match(host) is not None - return validated - - -def scheme_is_valid(scheme, require=False): - """Determine if the scheme is valid. - - :param str scheme: - The scheme string to validate. - :param bool require: - (optional) Set to ``True`` to require the presence of a scheme. - :returns: - ``True`` if the scheme is valid. ``False`` otherwise. - :rtype: - bool - """ - return is_valid(scheme, misc.SCHEME_MATCHER, require) - - -def path_is_valid(path, require=False): - """Determine if the path component is valid. - - :param str path: - The path string to validate. - :param bool require: - (optional) Set to ``True`` to require the presence of a path. - :returns: - ``True`` if the path is valid. ``False`` otherwise. - :rtype: - bool - """ - return is_valid(path, misc.PATH_MATCHER, require) - - -def query_is_valid(query, require=False): - """Determine if the query component is valid. - - :param str query: - The query string to validate. - :param bool require: - (optional) Set to ``True`` to require the presence of a query. - :returns: - ``True`` if the query is valid. ``False`` otherwise. - :rtype: - bool - """ - return is_valid(query, misc.QUERY_MATCHER, require) - - -def fragment_is_valid(fragment, require=False): - """Determine if the fragment component is valid. - - :param str fragment: - The fragment string to validate. - :param bool require: - (optional) Set to ``True`` to require the presence of a fragment. - :returns: - ``True`` if the fragment is valid. ``False`` otherwise. - :rtype: - bool - """ - return is_valid(fragment, misc.FRAGMENT_MATCHER, require) - - -def valid_ipv4_host_address(host): - """Determine if the given host is a valid IPv4 address.""" - # If the host exists, and it might be IPv4, check each byte in the - # address. - return all([0 <= int(byte, base=10) <= 255 for byte in host.split('.')]) - - -_COMPONENT_VALIDATORS = { - 'scheme': scheme_is_valid, - 'path': path_is_valid, - 'query': query_is_valid, - 'fragment': fragment_is_valid, -} - -_SUBAUTHORITY_VALIDATORS = set(['userinfo', 'host', 'port']) - - -def subauthority_component_is_valid(uri, component): - """Determine if the userinfo, host, and port are valid.""" - try: - subauthority_dict = uri.authority_info() - except exceptions.InvalidAuthority: - return False - - # If we can parse the authority into sub-components and we're not - # validating the port, we can assume it's valid. - if component == 'host': - return host_is_valid(subauthority_dict['host']) - elif component != 'port': - return True - - try: - port = int(subauthority_dict['port']) - except TypeError: - # If the port wasn't provided it'll be None and int(None) raises a - # TypeError - return True - - return (0 <= port <= 65535) - - -def ensure_components_are_valid(uri, validated_components): - """Assert that all components are valid in the URI.""" - invalid_components = set([]) - for component in validated_components: - if component in _SUBAUTHORITY_VALIDATORS: - if not subauthority_component_is_valid(uri, component): - invalid_components.add(component) - # Python's peephole optimizer means that while this continue *is* - # actually executed, coverage.py cannot detect that. See also, - # https://bitbucket.org/ned/coveragepy/issues/198/continue-marked-as-not-covered - continue # nocov: Python 2.7, 3.3, 3.4 - - validator = _COMPONENT_VALIDATORS[component] - if not validator(getattr(uri, component)): - invalid_components.add(component) - - if invalid_components: - raise exceptions.InvalidComponentsError(uri, *invalid_components) diff --git a/pipenv/vendor/urllib3/packages/six.py b/pipenv/vendor/urllib3/packages/six.py index 190c0239cd..314424099f 100644 --- a/pipenv/vendor/urllib3/packages/six.py +++ b/pipenv/vendor/urllib3/packages/six.py @@ -1,6 +1,4 @@ -"""Utilities for writing code that runs on Python 2 and 3""" - -# Copyright (c) 2010-2015 Benjamin Peterson +# Copyright (c) 2010-2019 Benjamin Peterson # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal @@ -20,6 +18,8 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. +"""Utilities for writing code that runs on Python 2 and 3""" + from __future__ import absolute_import import functools @@ -29,7 +29,7 @@ import types __author__ = "Benjamin Peterson <benjamin@python.org>" -__version__ = "1.10.0" +__version__ = "1.12.0" # Useful for very coarse version differentiation. @@ -38,15 +38,15 @@ PY34 = sys.version_info[0:2] >= (3, 4) if PY3: - string_types = str, - integer_types = int, - class_types = type, + string_types = (str,) + integer_types = (int,) + class_types = (type,) text_type = str binary_type = bytes MAXSIZE = sys.maxsize else: - string_types = basestring, + string_types = (basestring,) integer_types = (int, long) class_types = (type, types.ClassType) text_type = unicode @@ -58,9 +58,9 @@ else: # It's possible to have sizeof(long) != sizeof(Py_ssize_t). class X(object): - def __len__(self): return 1 << 31 + try: len(X()) except OverflowError: @@ -84,7 +84,6 @@ def _import_module(name): class _LazyDescr(object): - def __init__(self, name): self.name = name @@ -101,7 +100,6 @@ def __get__(self, obj, tp): class MovedModule(_LazyDescr): - def __init__(self, name, old, new=None): super(MovedModule, self).__init__(name) if PY3: @@ -122,7 +120,6 @@ def __getattr__(self, attr): class _LazyModule(types.ModuleType): - def __init__(self, name): super(_LazyModule, self).__init__(name) self.__doc__ = self.__class__.__doc__ @@ -137,7 +134,6 @@ def __dir__(self): class MovedAttribute(_LazyDescr): - def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): super(MovedAttribute, self).__init__(name) if PY3: @@ -221,28 +217,36 @@ def get_code(self, fullname): Required, if is_package is implemented""" self.__get_module(fullname) # eventually raises ImportError return None + get_source = get_code # same as get_code + _importer = _SixMetaPathImporter(__name__) class _MovedItems(_LazyModule): """Lazy loading of moved objects""" + __path__ = [] # mark as package _moved_attributes = [ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), - MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), + MovedAttribute( + "filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse" + ), MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), MovedAttribute("intern", "__builtin__", "sys"), MovedAttribute("map", "itertools", "builtins", "imap", "map"), MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), + MovedAttribute("getoutput", "commands", "subprocess"), MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), + MovedAttribute( + "reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload" + ), MovedAttribute("reduce", "__builtin__", "functools"), MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), MovedAttribute("StringIO", "StringIO", "io"), @@ -251,7 +255,9 @@ class _MovedItems(_LazyModule): MovedAttribute("UserString", "UserString", "collections"), MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), - MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), + MovedAttribute( + "zip_longest", "itertools", "itertools", "izip_longest", "zip_longest" + ), MovedModule("builtins", "__builtin__"), MovedModule("configparser", "ConfigParser"), MovedModule("copyreg", "copy_reg"), @@ -262,10 +268,13 @@ class _MovedItems(_LazyModule): MovedModule("html_entities", "htmlentitydefs", "html.entities"), MovedModule("html_parser", "HTMLParser", "html.parser"), MovedModule("http_client", "httplib", "http.client"), + MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), + MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"), MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), - MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), + MovedModule( + "email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart" + ), MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), - MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), @@ -283,15 +292,12 @@ class _MovedItems(_LazyModule): MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), - MovedModule("tkinter_colorchooser", "tkColorChooser", - "tkinter.colorchooser"), - MovedModule("tkinter_commondialog", "tkCommonDialog", - "tkinter.commondialog"), + MovedModule("tkinter_colorchooser", "tkColorChooser", "tkinter.colorchooser"), + MovedModule("tkinter_commondialog", "tkCommonDialog", "tkinter.commondialog"), MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), MovedModule("tkinter_font", "tkFont", "tkinter.font"), MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), - MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", - "tkinter.simpledialog"), + MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", "tkinter.simpledialog"), MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), @@ -301,9 +307,7 @@ class _MovedItems(_LazyModule): ] # Add windows specific modules. if sys.platform == "win32": - _moved_attributes += [ - MovedModule("winreg", "_winreg"), - ] + _moved_attributes += [MovedModule("winreg", "_winreg")] for attr in _moved_attributes: setattr(_MovedItems, attr.name, attr) @@ -337,10 +341,14 @@ class Module_six_moves_urllib_parse(_LazyModule): MovedAttribute("quote_plus", "urllib", "urllib.parse"), MovedAttribute("unquote", "urllib", "urllib.parse"), MovedAttribute("unquote_plus", "urllib", "urllib.parse"), + MovedAttribute( + "unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes" + ), MovedAttribute("urlencode", "urllib", "urllib.parse"), MovedAttribute("splitquery", "urllib", "urllib.parse"), MovedAttribute("splittag", "urllib", "urllib.parse"), MovedAttribute("splituser", "urllib", "urllib.parse"), + MovedAttribute("splitvalue", "urllib", "urllib.parse"), MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), MovedAttribute("uses_params", "urlparse", "urllib.parse"), @@ -353,8 +361,11 @@ class Module_six_moves_urllib_parse(_LazyModule): Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes -_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), - "moves.urllib_parse", "moves.urllib.parse") +_importer._add_module( + Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), + "moves.urllib_parse", + "moves.urllib.parse", +) class Module_six_moves_urllib_error(_LazyModule): @@ -373,8 +384,11 @@ class Module_six_moves_urllib_error(_LazyModule): Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes -_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), - "moves.urllib_error", "moves.urllib.error") +_importer._add_module( + Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), + "moves.urllib_error", + "moves.urllib.error", +) class Module_six_moves_urllib_request(_LazyModule): @@ -416,6 +430,8 @@ class Module_six_moves_urllib_request(_LazyModule): MovedAttribute("URLopener", "urllib", "urllib.request"), MovedAttribute("FancyURLopener", "urllib", "urllib.request"), MovedAttribute("proxy_bypass", "urllib", "urllib.request"), + MovedAttribute("parse_http_list", "urllib2", "urllib.request"), + MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"), ] for attr in _urllib_request_moved_attributes: setattr(Module_six_moves_urllib_request, attr.name, attr) @@ -423,8 +439,11 @@ class Module_six_moves_urllib_request(_LazyModule): Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes -_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), - "moves.urllib_request", "moves.urllib.request") +_importer._add_module( + Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), + "moves.urllib_request", + "moves.urllib.request", +) class Module_six_moves_urllib_response(_LazyModule): @@ -444,8 +463,11 @@ class Module_six_moves_urllib_response(_LazyModule): Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes -_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), - "moves.urllib_response", "moves.urllib.response") +_importer._add_module( + Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), + "moves.urllib_response", + "moves.urllib.response", +) class Module_six_moves_urllib_robotparser(_LazyModule): @@ -454,21 +476,27 @@ class Module_six_moves_urllib_robotparser(_LazyModule): _urllib_robotparser_moved_attributes = [ - MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), + MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser") ] for attr in _urllib_robotparser_moved_attributes: setattr(Module_six_moves_urllib_robotparser, attr.name, attr) del attr -Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes +Module_six_moves_urllib_robotparser._moved_attributes = ( + _urllib_robotparser_moved_attributes +) -_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), - "moves.urllib_robotparser", "moves.urllib.robotparser") +_importer._add_module( + Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), + "moves.urllib_robotparser", + "moves.urllib.robotparser", +) class Module_six_moves_urllib(types.ModuleType): """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" + __path__ = [] # mark as package parse = _importer._get_module("moves.urllib_parse") error = _importer._get_module("moves.urllib_error") @@ -477,10 +505,12 @@ class Module_six_moves_urllib(types.ModuleType): robotparser = _importer._get_module("moves.urllib_robotparser") def __dir__(self): - return ['parse', 'error', 'request', 'response', 'robotparser'] + return ["parse", "error", "request", "response", "robotparser"] + -_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), - "moves.urllib") +_importer._add_module( + Module_six_moves_urllib(__name__ + ".moves.urllib"), "moves.urllib" +) def add_move(move): @@ -520,19 +550,24 @@ def remove_move(name): try: advance_iterator = next except NameError: + def advance_iterator(it): return it.next() + + next = advance_iterator try: callable = callable except NameError: + def callable(obj): return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) if PY3: + def get_unbound_function(unbound): return unbound @@ -543,6 +578,7 @@ def create_unbound_method(func, cls): Iterator = object else: + def get_unbound_function(unbound): return unbound.im_func @@ -553,13 +589,13 @@ def create_unbound_method(func, cls): return types.MethodType(func, None, cls) class Iterator(object): - def next(self): return type(self).__next__(self) callable = callable -_add_doc(get_unbound_function, - """Get the function out of a possibly unbound function""") +_add_doc( + get_unbound_function, """Get the function out of a possibly unbound function""" +) get_method_function = operator.attrgetter(_meth_func) @@ -571,6 +607,7 @@ def next(self): if PY3: + def iterkeys(d, **kw): return iter(d.keys(**kw)) @@ -589,6 +626,7 @@ def iterlists(d, **kw): viewitems = operator.methodcaller("items") else: + def iterkeys(d, **kw): return d.iterkeys(**kw) @@ -609,28 +647,33 @@ def iterlists(d, **kw): _add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") _add_doc(itervalues, "Return an iterator over the values of a dictionary.") -_add_doc(iteritems, - "Return an iterator over the (key, value) pairs of a dictionary.") -_add_doc(iterlists, - "Return an iterator over the (key, [values]) pairs of a dictionary.") +_add_doc(iteritems, "Return an iterator over the (key, value) pairs of a dictionary.") +_add_doc( + iterlists, "Return an iterator over the (key, [values]) pairs of a dictionary." +) if PY3: + def b(s): return s.encode("latin-1") def u(s): return s + unichr = chr import struct + int2byte = struct.Struct(">B").pack del struct byte2int = operator.itemgetter(0) indexbytes = operator.getitem iterbytes = iter import io + StringIO = io.StringIO BytesIO = io.BytesIO + del io _assertCountEqual = "assertCountEqual" if sys.version_info[1] <= 1: _assertRaisesRegex = "assertRaisesRegexp" @@ -639,12 +682,15 @@ def u(s): _assertRaisesRegex = "assertRaisesRegex" _assertRegex = "assertRegex" else: + def b(s): return s + # Workaround for standalone backslash def u(s): - return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") + return unicode(s.replace(r"\\", r"\\\\"), "unicode_escape") + unichr = unichr int2byte = chr @@ -653,8 +699,10 @@ def byte2int(bs): def indexbytes(buf, i): return ord(buf[i]) + iterbytes = functools.partial(itertools.imap, ord) import StringIO + StringIO = BytesIO = StringIO.StringIO _assertCountEqual = "assertItemsEqual" _assertRaisesRegex = "assertRaisesRegexp" @@ -679,13 +727,19 @@ def assertRegex(self, *args, **kwargs): exec_ = getattr(moves.builtins, "exec") def reraise(tp, value, tb=None): - if value is None: - value = tp() - if value.__traceback__ is not tb: - raise value.with_traceback(tb) - raise value + try: + if value is None: + value = tp() + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + finally: + value = None + tb = None + else: + def exec_(_code_, _globs_=None, _locs_=None): """Execute code in a namespace.""" if _globs_ is None: @@ -698,28 +752,45 @@ def exec_(_code_, _globs_=None, _locs_=None): _locs_ = _globs_ exec("""exec _code_ in _globs_, _locs_""") - exec_("""def reraise(tp, value, tb=None): - raise tp, value, tb -""") + exec_( + """def reraise(tp, value, tb=None): + try: + raise tp, value, tb + finally: + tb = None +""" + ) if sys.version_info[:2] == (3, 2): - exec_("""def raise_from(value, from_value): - if from_value is None: - raise value - raise value from from_value -""") + exec_( + """def raise_from(value, from_value): + try: + if from_value is None: + raise value + raise value from from_value + finally: + value = None +""" + ) elif sys.version_info[:2] > (3, 2): - exec_("""def raise_from(value, from_value): - raise value from from_value -""") + exec_( + """def raise_from(value, from_value): + try: + raise value from from_value + finally: + value = None +""" + ) else: + def raise_from(value, from_value): raise value print_ = getattr(moves.builtins, "print", None) if print_ is None: + def print_(*args, **kwargs): """The new-style print function for Python 2.4 and 2.5.""" fp = kwargs.pop("file", sys.stdout) @@ -730,14 +801,17 @@ def write(data): if not isinstance(data, basestring): data = str(data) # If the file has an encoding, encode unicode with it. - if (isinstance(fp, file) and - isinstance(data, unicode) and - fp.encoding is not None): + if ( + isinstance(fp, file) + and isinstance(data, unicode) + and fp.encoding is not None + ): errors = getattr(fp, "errors", None) if errors is None: errors = "strict" data = data.encode(fp.encoding, errors) fp.write(data) + want_unicode = False sep = kwargs.pop("sep", None) if sep is not None: @@ -773,6 +847,8 @@ def write(data): write(sep) write(arg) write(end) + + if sys.version_info[:2] < (3, 3): _print = print_ @@ -783,16 +859,24 @@ def print_(*args, **kwargs): if flush and fp is not None: fp.flush() + _add_doc(reraise, """Reraise an exception.""") if sys.version_info[0:2] < (3, 4): - def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, - updated=functools.WRAPPER_UPDATES): + + def wraps( + wrapped, + assigned=functools.WRAPPER_ASSIGNMENTS, + updated=functools.WRAPPER_UPDATES, + ): def wrapper(f): f = functools.wraps(wrapped, assigned, updated)(f) f.__wrapped__ = wrapped return f + return wrapper + + else: wraps = functools.wraps @@ -802,29 +886,95 @@ def with_metaclass(meta, *bases): # This requires a bit of explanation: the basic idea is to make a dummy # metaclass for one level of class instantiation that replaces itself with # the actual metaclass. - class metaclass(meta): - + class metaclass(type): def __new__(cls, name, this_bases, d): return meta(name, bases, d) - return type.__new__(metaclass, 'temporary_class', (), {}) + + @classmethod + def __prepare__(cls, name, this_bases): + return meta.__prepare__(name, bases) + + return type.__new__(metaclass, "temporary_class", (), {}) def add_metaclass(metaclass): """Class decorator for creating a class with a metaclass.""" + def wrapper(cls): orig_vars = cls.__dict__.copy() - slots = orig_vars.get('__slots__') + slots = orig_vars.get("__slots__") if slots is not None: if isinstance(slots, str): slots = [slots] for slots_var in slots: orig_vars.pop(slots_var) - orig_vars.pop('__dict__', None) - orig_vars.pop('__weakref__', None) + orig_vars.pop("__dict__", None) + orig_vars.pop("__weakref__", None) + if hasattr(cls, "__qualname__"): + orig_vars["__qualname__"] = cls.__qualname__ return metaclass(cls.__name__, cls.__bases__, orig_vars) + return wrapper +def ensure_binary(s, encoding="utf-8", errors="strict"): + """Coerce **s** to six.binary_type. + + For Python 2: + - `unicode` -> encoded to `str` + - `str` -> `str` + + For Python 3: + - `str` -> encoded to `bytes` + - `bytes` -> `bytes` + """ + if isinstance(s, text_type): + return s.encode(encoding, errors) + elif isinstance(s, binary_type): + return s + else: + raise TypeError("not expecting type '%s'" % type(s)) + + +def ensure_str(s, encoding="utf-8", errors="strict"): + """Coerce *s* to `str`. + + For Python 2: + - `unicode` -> encoded to `str` + - `str` -> `str` + + For Python 3: + - `str` -> `str` + - `bytes` -> decoded to `str` + """ + if not isinstance(s, (text_type, binary_type)): + raise TypeError("not expecting type '%s'" % type(s)) + if PY2 and isinstance(s, text_type): + s = s.encode(encoding, errors) + elif PY3 and isinstance(s, binary_type): + s = s.decode(encoding, errors) + return s + + +def ensure_text(s, encoding="utf-8", errors="strict"): + """Coerce *s* to six.text_type. + + For Python 2: + - `unicode` -> `unicode` + - `str` -> `unicode` + + For Python 3: + - `str` -> `str` + - `bytes` -> decoded to `str` + """ + if isinstance(s, binary_type): + return s.decode(encoding, errors) + elif isinstance(s, text_type): + return s + else: + raise TypeError("not expecting type '%s'" % type(s)) + + def python_2_unicode_compatible(klass): """ A decorator that defines __unicode__ and __str__ methods under Python 2. @@ -834,12 +984,13 @@ def python_2_unicode_compatible(klass): returning text and apply this decorator to the class. """ if PY2: - if '__str__' not in klass.__dict__: - raise ValueError("@python_2_unicode_compatible cannot be applied " - "to %s because it doesn't define __str__()." % - klass.__name__) + if "__str__" not in klass.__dict__: + raise ValueError( + "@python_2_unicode_compatible cannot be applied " + "to %s because it doesn't define __str__()." % klass.__name__ + ) klass.__unicode__ = klass.__str__ - klass.__str__ = lambda self: self.__unicode__().encode('utf-8') + klass.__str__ = lambda self: self.__unicode__().encode("utf-8") return klass @@ -859,8 +1010,10 @@ def python_2_unicode_compatible(klass): # be floating around. Therefore, we can't use isinstance() to check for # the six meta path importer, since the other six instance will have # inserted an importer with different class. - if (type(importer).__name__ == "_SixMetaPathImporter" and - importer.name == __name__): + if ( + type(importer).__name__ == "_SixMetaPathImporter" + and importer.name == __name__ + ): del sys.meta_path[i] break del i, importer diff --git a/pipenv/vendor/urllib3/packages/ssl_match_hostname/__init__.py b/pipenv/vendor/urllib3/packages/ssl_match_hostname/__init__.py index d6594eb264..75b6bb1cf0 100644 --- a/pipenv/vendor/urllib3/packages/ssl_match_hostname/__init__.py +++ b/pipenv/vendor/urllib3/packages/ssl_match_hostname/__init__.py @@ -16,4 +16,4 @@ from ._implementation import CertificateError, match_hostname # Not needed, but documenting what we provide. -__all__ = ('CertificateError', 'match_hostname') +__all__ = ("CertificateError", "match_hostname") diff --git a/pipenv/vendor/urllib3/packages/ssl_match_hostname/_implementation.py b/pipenv/vendor/urllib3/packages/ssl_match_hostname/_implementation.py index d6e66c0196..689208d3c6 100644 --- a/pipenv/vendor/urllib3/packages/ssl_match_hostname/_implementation.py +++ b/pipenv/vendor/urllib3/packages/ssl_match_hostname/_implementation.py @@ -15,7 +15,7 @@ except ImportError: ipaddress = None -__version__ = '3.5.0.1' +__version__ = "3.5.0.1" class CertificateError(ValueError): @@ -33,18 +33,19 @@ def _dnsname_match(dn, hostname, max_wildcards=1): # Ported from python3-syntax: # leftmost, *remainder = dn.split(r'.') - parts = dn.split(r'.') + parts = dn.split(r".") leftmost = parts[0] remainder = parts[1:] - wildcards = leftmost.count('*') + wildcards = leftmost.count("*") if wildcards > max_wildcards: # Issue #17980: avoid denials of service by refusing more # than one wildcard per fragment. A survey of established # policy among SSL implementations showed it to be a # reasonable choice. raise CertificateError( - "too many wildcards in certificate DNS name: " + repr(dn)) + "too many wildcards in certificate DNS name: " + repr(dn) + ) # speed up common case w/o wildcards if not wildcards: @@ -53,11 +54,11 @@ def _dnsname_match(dn, hostname, max_wildcards=1): # RFC 6125, section 6.4.3, subitem 1. # The client SHOULD NOT attempt to match a presented identifier in which # the wildcard character comprises a label other than the left-most label. - if leftmost == '*': + if leftmost == "*": # When '*' is a fragment by itself, it matches a non-empty dotless # fragment. - pats.append('[^.]+') - elif leftmost.startswith('xn--') or hostname.startswith('xn--'): + pats.append("[^.]+") + elif leftmost.startswith("xn--") or hostname.startswith("xn--"): # RFC 6125, section 6.4.3, subitem 3. # The client SHOULD NOT attempt to match a presented identifier # where the wildcard character is embedded within an A-label or @@ -65,21 +66,22 @@ def _dnsname_match(dn, hostname, max_wildcards=1): pats.append(re.escape(leftmost)) else: # Otherwise, '*' matches any dotless string, e.g. www* - pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) + pats.append(re.escape(leftmost).replace(r"\*", "[^.]*")) # add the remaining fragments, ignore any wildcards for frag in remainder: pats.append(re.escape(frag)) - pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) + pat = re.compile(r"\A" + r"\.".join(pats) + r"\Z", re.IGNORECASE) return pat.match(hostname) def _to_unicode(obj): if isinstance(obj, str) and sys.version_info < (3,): - obj = unicode(obj, encoding='ascii', errors='strict') + obj = unicode(obj, encoding="ascii", errors="strict") return obj + def _ipaddress_match(ipname, host_ip): """Exact matching of IP addresses. @@ -101,9 +103,11 @@ def match_hostname(cert, hostname): returns nothing. """ if not cert: - raise ValueError("empty or no certificate, match_hostname needs a " - "SSL socket or SSL context with either " - "CERT_OPTIONAL or CERT_REQUIRED") + raise ValueError( + "empty or no certificate, match_hostname needs a " + "SSL socket or SSL context with either " + "CERT_OPTIONAL or CERT_REQUIRED" + ) try: # Divergence from upstream: ipaddress can't handle byte str host_ip = ipaddress.ip_address(_to_unicode(hostname)) @@ -122,35 +126,35 @@ def match_hostname(cert, hostname): else: raise dnsnames = [] - san = cert.get('subjectAltName', ()) + san = cert.get("subjectAltName", ()) for key, value in san: - if key == 'DNS': + if key == "DNS": if host_ip is None and _dnsname_match(value, hostname): return dnsnames.append(value) - elif key == 'IP Address': + elif key == "IP Address": if host_ip is not None and _ipaddress_match(value, host_ip): return dnsnames.append(value) if not dnsnames: # The subject is only checked when there is no dNSName entry # in subjectAltName - for sub in cert.get('subject', ()): + for sub in cert.get("subject", ()): for key, value in sub: # XXX according to RFC 2818, the most specific Common Name # must be used. - if key == 'commonName': + if key == "commonName": if _dnsname_match(value, hostname): return dnsnames.append(value) if len(dnsnames) > 1: - raise CertificateError("hostname %r " - "doesn't match either of %s" - % (hostname, ', '.join(map(repr, dnsnames)))) + raise CertificateError( + "hostname %r " + "doesn't match either of %s" % (hostname, ", ".join(map(repr, dnsnames))) + ) elif len(dnsnames) == 1: - raise CertificateError("hostname %r " - "doesn't match %r" - % (hostname, dnsnames[0])) + raise CertificateError("hostname %r doesn't match %r" % (hostname, dnsnames[0])) else: - raise CertificateError("no appropriate commonName or " - "subjectAltName fields were found") + raise CertificateError( + "no appropriate commonName or subjectAltName fields were found" + ) diff --git a/pipenv/vendor/urllib3/poolmanager.py b/pipenv/vendor/urllib3/poolmanager.py index a6ade6e905..242a2f8203 100644 --- a/pipenv/vendor/urllib3/poolmanager.py +++ b/pipenv/vendor/urllib3/poolmanager.py @@ -14,48 +14,55 @@ from .util.retry import Retry -__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url'] +__all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] log = logging.getLogger(__name__) -SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs', - 'ssl_version', 'ca_cert_dir', 'ssl_context', - 'key_password') +SSL_KEYWORDS = ( + "key_file", + "cert_file", + "cert_reqs", + "ca_certs", + "ssl_version", + "ca_cert_dir", + "ssl_context", + "key_password", +) # All known keyword arguments that could be provided to the pool manager, its # pools, or the underlying connections. This is used to construct a pool key. _key_fields = ( - 'key_scheme', # str - 'key_host', # str - 'key_port', # int - 'key_timeout', # int or float or Timeout - 'key_retries', # int or Retry - 'key_strict', # bool - 'key_block', # bool - 'key_source_address', # str - 'key_key_file', # str - 'key_key_password', # str - 'key_cert_file', # str - 'key_cert_reqs', # str - 'key_ca_certs', # str - 'key_ssl_version', # str - 'key_ca_cert_dir', # str - 'key_ssl_context', # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext - 'key_maxsize', # int - 'key_headers', # dict - 'key__proxy', # parsed proxy url - 'key__proxy_headers', # dict - 'key_socket_options', # list of (level (int), optname (int), value (int or str)) tuples - 'key__socks_options', # dict - 'key_assert_hostname', # bool or string - 'key_assert_fingerprint', # str - 'key_server_hostname', # str + "key_scheme", # str + "key_host", # str + "key_port", # int + "key_timeout", # int or float or Timeout + "key_retries", # int or Retry + "key_strict", # bool + "key_block", # bool + "key_source_address", # str + "key_key_file", # str + "key_key_password", # str + "key_cert_file", # str + "key_cert_reqs", # str + "key_ca_certs", # str + "key_ssl_version", # str + "key_ca_cert_dir", # str + "key_ssl_context", # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext + "key_maxsize", # int + "key_headers", # dict + "key__proxy", # parsed proxy url + "key__proxy_headers", # dict + "key_socket_options", # list of (level (int), optname (int), value (int or str)) tuples + "key__socks_options", # dict + "key_assert_hostname", # bool or string + "key_assert_fingerprint", # str + "key_server_hostname", # str ) #: The namedtuple class used to construct keys for the connection pool. #: All custom key schemes should include the fields in this key at a minimum. -PoolKey = collections.namedtuple('PoolKey', _key_fields) +PoolKey = collections.namedtuple("PoolKey", _key_fields) def _default_key_normalizer(key_class, request_context): @@ -80,24 +87,24 @@ def _default_key_normalizer(key_class, request_context): """ # Since we mutate the dictionary, make a copy first context = request_context.copy() - context['scheme'] = context['scheme'].lower() - context['host'] = context['host'].lower() + context["scheme"] = context["scheme"].lower() + context["host"] = context["host"].lower() # These are both dictionaries and need to be transformed into frozensets - for key in ('headers', '_proxy_headers', '_socks_options'): + for key in ("headers", "_proxy_headers", "_socks_options"): if key in context and context[key] is not None: context[key] = frozenset(context[key].items()) # The socket_options key may be a list and needs to be transformed into a # tuple. - socket_opts = context.get('socket_options') + socket_opts = context.get("socket_options") if socket_opts is not None: - context['socket_options'] = tuple(socket_opts) + context["socket_options"] = tuple(socket_opts) # Map the kwargs to the names in the namedtuple - this is necessary since # namedtuples can't have fields starting with '_'. for key in list(context.keys()): - context['key_' + key] = context.pop(key) + context["key_" + key] = context.pop(key) # Default to ``None`` for keys missing from the context for field in key_class._fields: @@ -112,14 +119,11 @@ def _default_key_normalizer(key_class, request_context): #: Each PoolManager makes a copy of this dictionary so they can be configured #: globally here, or individually on the instance. key_fn_by_scheme = { - 'http': functools.partial(_default_key_normalizer, PoolKey), - 'https': functools.partial(_default_key_normalizer, PoolKey), + "http": functools.partial(_default_key_normalizer, PoolKey), + "https": functools.partial(_default_key_normalizer, PoolKey), } -pool_classes_by_scheme = { - 'http': HTTPConnectionPool, - 'https': HTTPSConnectionPool, -} +pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool} class PoolManager(RequestMethods): @@ -155,8 +159,7 @@ class PoolManager(RequestMethods): def __init__(self, num_pools=10, headers=None, **connection_pool_kw): RequestMethods.__init__(self, headers) self.connection_pool_kw = connection_pool_kw - self.pools = RecentlyUsedContainer(num_pools, - dispose_func=lambda p: p.close()) + self.pools = RecentlyUsedContainer(num_pools, dispose_func=lambda p: p.close()) # Locally set the pool classes and keys so other PoolManagers can # override them. @@ -189,10 +192,10 @@ def _new_pool(self, scheme, host, port, request_context=None): # this function has historically only used the scheme, host, and port # in the positional args. When an API change is acceptable these can # be removed. - for key in ('scheme', 'host', 'port'): + for key in ("scheme", "host", "port"): request_context.pop(key, None) - if scheme == 'http': + if scheme == "http": for kw in SSL_KEYWORDS: request_context.pop(kw, None) @@ -207,7 +210,7 @@ def clear(self): """ self.pools.clear() - def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None): + def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None): """ Get a :class:`ConnectionPool` based on the host, port, and scheme. @@ -222,11 +225,11 @@ def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None) raise LocationValueError("No host specified.") request_context = self._merge_pool_kwargs(pool_kwargs) - request_context['scheme'] = scheme or 'http' + request_context["scheme"] = scheme or "http" if not port: - port = port_by_scheme.get(request_context['scheme'].lower(), 80) - request_context['port'] = port - request_context['host'] = host + port = port_by_scheme.get(request_context["scheme"].lower(), 80) + request_context["port"] = port + request_context["host"] = host return self.connection_from_context(request_context) @@ -237,7 +240,7 @@ def connection_from_context(self, request_context): ``request_context`` must at least contain the ``scheme`` key and its value must be a key in ``key_fn_by_scheme`` instance variable. """ - scheme = request_context['scheme'].lower() + scheme = request_context["scheme"].lower() pool_key_constructor = self.key_fn_by_scheme[scheme] pool_key = pool_key_constructor(request_context) @@ -259,9 +262,9 @@ def connection_from_pool_key(self, pool_key, request_context=None): return pool # Make a fresh ConnectionPool of the desired type - scheme = request_context['scheme'] - host = request_context['host'] - port = request_context['port'] + scheme = request_context["scheme"] + host = request_context["host"] + port = request_context["port"] pool = self._new_pool(scheme, host, port, request_context=request_context) self.pools[pool_key] = pool @@ -279,8 +282,9 @@ def connection_from_url(self, url, pool_kwargs=None): not used. """ u = parse_url(url) - return self.connection_from_host(u.host, port=u.port, scheme=u.scheme, - pool_kwargs=pool_kwargs) + return self.connection_from_host( + u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs + ) def _merge_pool_kwargs(self, override): """ @@ -314,11 +318,11 @@ def urlopen(self, method, url, redirect=True, **kw): u = parse_url(url) conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) - kw['assert_same_host'] = False - kw['redirect'] = False + kw["assert_same_host"] = False + kw["redirect"] = False - if 'headers' not in kw: - kw['headers'] = self.headers.copy() + if "headers" not in kw: + kw["headers"] = self.headers.copy() if self.proxy is not None and u.scheme == "http": response = conn.urlopen(method, url, **kw) @@ -334,21 +338,22 @@ def urlopen(self, method, url, redirect=True, **kw): # RFC 7231, Section 6.4.4 if response.status == 303: - method = 'GET' + method = "GET" - retries = kw.get('retries') + retries = kw.get("retries") if not isinstance(retries, Retry): retries = Retry.from_int(retries, redirect=redirect) # Strip headers marked as unsafe to forward to the redirected location. # Check remove_headers_on_redirect to avoid a potential network call within # conn.is_same_host() which may use socket.gethostbyname() in the future. - if (retries.remove_headers_on_redirect - and not conn.is_same_host(redirect_location)): - headers = list(six.iterkeys(kw['headers'])) + if retries.remove_headers_on_redirect and not conn.is_same_host( + redirect_location + ): + headers = list(six.iterkeys(kw["headers"])) for header in headers: if header.lower() in retries.remove_headers_on_redirect: - kw['headers'].pop(header, None) + kw["headers"].pop(header, None) try: retries = retries.increment(method, url, response=response, _pool=conn) @@ -357,8 +362,8 @@ def urlopen(self, method, url, redirect=True, **kw): raise return response - kw['retries'] = retries - kw['redirect'] = redirect + kw["retries"] = retries + kw["redirect"] = redirect log.info("Redirecting %s -> %s", url, redirect_location) return self.urlopen(method, redirect_location, **kw) @@ -391,12 +396,21 @@ class ProxyManager(PoolManager): """ - def __init__(self, proxy_url, num_pools=10, headers=None, - proxy_headers=None, **connection_pool_kw): + def __init__( + self, + proxy_url, + num_pools=10, + headers=None, + proxy_headers=None, + **connection_pool_kw + ): if isinstance(proxy_url, HTTPConnectionPool): - proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host, - proxy_url.port) + proxy_url = "%s://%s:%i" % ( + proxy_url.scheme, + proxy_url.host, + proxy_url.port, + ) proxy = parse_url(proxy_url) if not proxy.port: port = port_by_scheme.get(proxy.scheme, 80) @@ -408,30 +422,31 @@ def __init__(self, proxy_url, num_pools=10, headers=None, self.proxy = proxy self.proxy_headers = proxy_headers or {} - connection_pool_kw['_proxy'] = self.proxy - connection_pool_kw['_proxy_headers'] = self.proxy_headers + connection_pool_kw["_proxy"] = self.proxy + connection_pool_kw["_proxy_headers"] = self.proxy_headers - super(ProxyManager, self).__init__( - num_pools, headers, **connection_pool_kw) + super(ProxyManager, self).__init__(num_pools, headers, **connection_pool_kw) - def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None): + def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None): if scheme == "https": return super(ProxyManager, self).connection_from_host( - host, port, scheme, pool_kwargs=pool_kwargs) + host, port, scheme, pool_kwargs=pool_kwargs + ) return super(ProxyManager, self).connection_from_host( - self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs) + self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs + ) def _set_proxy_headers(self, url, headers=None): """ Sets headers needed by proxies: specifically, the Accept and Host headers. Only sets headers not provided by the user. """ - headers_ = {'Accept': '*/*'} + headers_ = {"Accept": "*/*"} netloc = parse_url(url).netloc if netloc: - headers_['Host'] = netloc + headers_["Host"] = netloc if headers: headers_.update(headers) @@ -445,8 +460,8 @@ def urlopen(self, method, url, redirect=True, **kw): # For proxied HTTPS requests, httplib sets the necessary headers # on the CONNECT to the proxy. For HTTP, we'll definitely # need to set 'Host' at the very least. - headers = kw.get('headers', self.headers) - kw['headers'] = self._set_proxy_headers(url, headers) + headers = kw.get("headers", self.headers) + kw["headers"] = self._set_proxy_headers(url, headers) return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw) diff --git a/pipenv/vendor/urllib3/request.py b/pipenv/vendor/urllib3/request.py index 8f2f44bb21..55f160bbf1 100644 --- a/pipenv/vendor/urllib3/request.py +++ b/pipenv/vendor/urllib3/request.py @@ -4,7 +4,7 @@ from .packages.six.moves.urllib.parse import urlencode -__all__ = ['RequestMethods'] +__all__ = ["RequestMethods"] class RequestMethods(object): @@ -36,16 +36,25 @@ class RequestMethods(object): explicitly. """ - _encode_url_methods = {'DELETE', 'GET', 'HEAD', 'OPTIONS'} + _encode_url_methods = {"DELETE", "GET", "HEAD", "OPTIONS"} def __init__(self, headers=None): self.headers = headers or {} - def urlopen(self, method, url, body=None, headers=None, - encode_multipart=True, multipart_boundary=None, - **kw): # Abstract - raise NotImplementedError("Classes extending RequestMethods must implement " - "their own ``urlopen`` method.") + def urlopen( + self, + method, + url, + body=None, + headers=None, + encode_multipart=True, + multipart_boundary=None, + **kw + ): # Abstract + raise NotImplementedError( + "Classes extending RequestMethods must implement " + "their own ``urlopen`` method." + ) def request(self, method, url, fields=None, headers=None, **urlopen_kw): """ @@ -60,19 +69,18 @@ def request(self, method, url, fields=None, headers=None, **urlopen_kw): """ method = method.upper() - urlopen_kw['request_url'] = url + urlopen_kw["request_url"] = url if method in self._encode_url_methods: - return self.request_encode_url(method, url, fields=fields, - headers=headers, - **urlopen_kw) + return self.request_encode_url( + method, url, fields=fields, headers=headers, **urlopen_kw + ) else: - return self.request_encode_body(method, url, fields=fields, - headers=headers, - **urlopen_kw) + return self.request_encode_body( + method, url, fields=fields, headers=headers, **urlopen_kw + ) - def request_encode_url(self, method, url, fields=None, headers=None, - **urlopen_kw): + def request_encode_url(self, method, url, fields=None, headers=None, **urlopen_kw): """ Make a request using :meth:`urlopen` with the ``fields`` encoded in the url. This is useful for request methods like GET, HEAD, DELETE, etc. @@ -80,17 +88,24 @@ def request_encode_url(self, method, url, fields=None, headers=None, if headers is None: headers = self.headers - extra_kw = {'headers': headers} + extra_kw = {"headers": headers} extra_kw.update(urlopen_kw) if fields: - url += '?' + urlencode(fields) + url += "?" + urlencode(fields) return self.urlopen(method, url, **extra_kw) - def request_encode_body(self, method, url, fields=None, headers=None, - encode_multipart=True, multipart_boundary=None, - **urlopen_kw): + def request_encode_body( + self, + method, + url, + fields=None, + headers=None, + encode_multipart=True, + multipart_boundary=None, + **urlopen_kw + ): """ Make a request using :meth:`urlopen` with the ``fields`` encoded in the body. This is useful for request methods like POST, PUT, PATCH, etc. @@ -129,22 +144,28 @@ def request_encode_body(self, method, url, fields=None, headers=None, if headers is None: headers = self.headers - extra_kw = {'headers': {}} + extra_kw = {"headers": {}} if fields: - if 'body' in urlopen_kw: + if "body" in urlopen_kw: raise TypeError( - "request got values for both 'fields' and 'body', can only specify one.") + "request got values for both 'fields' and 'body', can only specify one." + ) if encode_multipart: - body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary) + body, content_type = encode_multipart_formdata( + fields, boundary=multipart_boundary + ) else: - body, content_type = urlencode(fields), 'application/x-www-form-urlencoded' + body, content_type = ( + urlencode(fields), + "application/x-www-form-urlencoded", + ) - extra_kw['body'] = body - extra_kw['headers'] = {'Content-Type': content_type} + extra_kw["body"] = body + extra_kw["headers"] = {"Content-Type": content_type} - extra_kw['headers'].update(headers) + extra_kw["headers"].update(headers) extra_kw.update(urlopen_kw) return self.urlopen(method, url, **extra_kw) diff --git a/pipenv/vendor/urllib3/response.py b/pipenv/vendor/urllib3/response.py index 4f857932c5..adc321e713 100644 --- a/pipenv/vendor/urllib3/response.py +++ b/pipenv/vendor/urllib3/response.py @@ -13,8 +13,13 @@ from ._collections import HTTPHeaderDict from .exceptions import ( - BodyNotHttplibCompatible, ProtocolError, DecodeError, ReadTimeoutError, - ResponseNotChunked, IncompleteRead, InvalidHeader + BodyNotHttplibCompatible, + ProtocolError, + DecodeError, + ReadTimeoutError, + ResponseNotChunked, + IncompleteRead, + InvalidHeader, ) from .packages.six import string_types as basestring, PY3 from .packages.six.moves import http_client as httplib @@ -25,10 +30,9 @@ class DeflateDecoder(object): - def __init__(self): self._first_try = True - self._data = b'' + self._data = b"" self._obj = zlib.decompressobj() def __getattr__(self, name): @@ -65,7 +69,6 @@ class GzipDecoderState(object): class GzipDecoder(object): - def __init__(self): self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS) self._state = GzipDecoderState.FIRST_MEMBER @@ -96,6 +99,7 @@ def decompress(self, data): if brotli is not None: + class BrotliDecoder(object): # Supports both 'brotlipy' and 'Brotli' packages # since they share an import name. The top branches @@ -104,14 +108,14 @@ def __init__(self): self._obj = brotli.Decompressor() def decompress(self, data): - if hasattr(self._obj, 'decompress'): + if hasattr(self._obj, "decompress"): return self._obj.decompress(data) return self._obj.process(data) def flush(self): - if hasattr(self._obj, 'flush'): + if hasattr(self._obj, "flush"): return self._obj.flush() - return b'' + return b"" class MultiDecoder(object): @@ -124,7 +128,7 @@ class MultiDecoder(object): """ def __init__(self, modes): - self._decoders = [_get_decoder(m.strip()) for m in modes.split(',')] + self._decoders = [_get_decoder(m.strip()) for m in modes.split(",")] def flush(self): return self._decoders[0].flush() @@ -136,13 +140,13 @@ def decompress(self, data): def _get_decoder(mode): - if ',' in mode: + if "," in mode: return MultiDecoder(mode) - if mode == 'gzip': + if mode == "gzip": return GzipDecoder() - if brotli is not None and mode == 'br': + if brotli is not None and mode == "br": return BrotliDecoder() return DeflateDecoder() @@ -181,16 +185,31 @@ class is also compatible with the Python standard library's :mod:`io` value of Content-Length header, if present. Otherwise, raise error. """ - CONTENT_DECODERS = ['gzip', 'deflate'] + CONTENT_DECODERS = ["gzip", "deflate"] if brotli is not None: - CONTENT_DECODERS += ['br'] + CONTENT_DECODERS += ["br"] REDIRECT_STATUSES = [301, 302, 303, 307, 308] - def __init__(self, body='', headers=None, status=0, version=0, reason=None, - strict=0, preload_content=True, decode_content=True, - original_response=None, pool=None, connection=None, msg=None, - retries=None, enforce_content_length=False, - request_method=None, request_url=None): + def __init__( + self, + body="", + headers=None, + status=0, + version=0, + reason=None, + strict=0, + preload_content=True, + decode_content=True, + original_response=None, + pool=None, + connection=None, + msg=None, + retries=None, + enforce_content_length=False, + request_method=None, + request_url=None, + auto_close=True, + ): if isinstance(headers, HTTPHeaderDict): self.headers = headers @@ -203,6 +222,7 @@ def __init__(self, body='', headers=None, status=0, version=0, reason=None, self.decode_content = decode_content self.retries = retries self.enforce_content_length = enforce_content_length + self.auto_close = auto_close self._decoder = None self._body = None @@ -218,13 +238,13 @@ def __init__(self, body='', headers=None, status=0, version=0, reason=None, self._pool = pool self._connection = connection - if hasattr(body, 'read'): + if hasattr(body, "read"): self._fp = body # Are we using the chunked-style of transfer encoding? self.chunked = False self.chunk_left = None - tr_enc = self.headers.get('transfer-encoding', '').lower() + tr_enc = self.headers.get("transfer-encoding", "").lower() # Don't incur the penalty of creating a list and then discarding it encodings = (enc.strip() for enc in tr_enc.split(",")) if "chunked" in encodings: @@ -246,7 +266,7 @@ def get_redirect_location(self): location. ``False`` if not a redirect status code. """ if self.status in self.REDIRECT_STATUSES: - return self.headers.get('location') + return self.headers.get("location") return False @@ -285,18 +305,20 @@ def _init_length(self, request_method): """ Set initial length value for Response content if available. """ - length = self.headers.get('content-length') + length = self.headers.get("content-length") if length is not None: if self.chunked: # This Response will fail with an IncompleteRead if it can't be # received as chunked. This method falls back to attempt reading # the response before raising an exception. - log.warning("Received response with both Content-Length and " - "Transfer-Encoding set. This is expressly forbidden " - "by RFC 7230 sec 3.3.2. Ignoring Content-Length and " - "attempting to process response as Transfer-Encoding: " - "chunked.") + log.warning( + "Received response with both Content-Length and " + "Transfer-Encoding set. This is expressly forbidden " + "by RFC 7230 sec 3.3.2. Ignoring Content-Length and " + "attempting to process response as Transfer-Encoding: " + "chunked." + ) return None try: @@ -305,10 +327,12 @@ def _init_length(self, request_method): # (e.g. Content-Length: 42, 42). This line ensures the values # are all valid ints and that as long as the `set` length is 1, # all values are the same. Otherwise, the header is invalid. - lengths = set([int(val) for val in length.split(',')]) + lengths = set([int(val) for val in length.split(",")]) if len(lengths) > 1: - raise InvalidHeader("Content-Length contained multiple " - "unmatching values (%s)" % length) + raise InvalidHeader( + "Content-Length contained multiple " + "unmatching values (%s)" % length + ) length = lengths.pop() except ValueError: length = None @@ -324,7 +348,7 @@ def _init_length(self, request_method): status = 0 # Check for responses that shouldn't include a body - if status in (204, 304) or 100 <= status < 200 or request_method == 'HEAD': + if status in (204, 304) or 100 <= status < 200 or request_method == "HEAD": length = 0 return length @@ -335,14 +359,16 @@ def _init_decoder(self): """ # Note: content-encoding value should be case-insensitive, per RFC 7230 # Section 3.2 - content_encoding = self.headers.get('content-encoding', '').lower() + content_encoding = self.headers.get("content-encoding", "").lower() if self._decoder is None: if content_encoding in self.CONTENT_DECODERS: self._decoder = _get_decoder(content_encoding) - elif ',' in content_encoding: + elif "," in content_encoding: encodings = [ - e.strip() for e in content_encoding.split(',') - if e.strip() in self.CONTENT_DECODERS] + e.strip() + for e in content_encoding.split(",") + if e.strip() in self.CONTENT_DECODERS + ] if len(encodings): self._decoder = _get_decoder(content_encoding) @@ -361,10 +387,12 @@ def _decode(self, data, decode_content, flush_decoder): if self._decoder: data = self._decoder.decompress(data) except self.DECODER_ERROR_CLASSES as e: - content_encoding = self.headers.get('content-encoding', '').lower() + content_encoding = self.headers.get("content-encoding", "").lower() raise DecodeError( "Received response with content-encoding: %s, but " - "failed to decode it." % content_encoding, e) + "failed to decode it." % content_encoding, + e, + ) if flush_decoder: data += self._flush_decoder() @@ -376,10 +404,10 @@ def _flush_decoder(self): being used. """ if self._decoder: - buf = self._decoder.decompress(b'') + buf = self._decoder.decompress(b"") return buf + self._decoder.flush() - return b'' + return b"" @contextmanager def _error_catcher(self): @@ -399,20 +427,20 @@ def _error_catcher(self): except SocketTimeout: # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but # there is yet no clean way to get at it from this context. - raise ReadTimeoutError(self._pool, None, 'Read timed out.') + raise ReadTimeoutError(self._pool, None, "Read timed out.") except BaseSSLError as e: # FIXME: Is there a better way to differentiate between SSLErrors? - if 'read operation timed out' not in str(e): # Defensive: + if "read operation timed out" not in str(e): # Defensive: # This shouldn't happen but just in case we're missing an edge # case, let's avoid swallowing SSL errors. raise - raise ReadTimeoutError(self._pool, None, 'Read timed out.') + raise ReadTimeoutError(self._pool, None, "Read timed out.") except (HTTPException, SocketError) as e: # This includes IncompleteRead. - raise ProtocolError('Connection broken: %r' % e, e) + raise ProtocolError("Connection broken: %r" % e, e) # If no exception is thrown, we should avoid cleaning up # unnecessarily. @@ -467,17 +495,19 @@ def read(self, amt=None, decode_content=None, cache_content=False): return flush_decoder = False - data = None + fp_closed = getattr(self._fp, "closed", False) with self._error_catcher(): if amt is None: # cStringIO doesn't like amt=None - data = self._fp.read() + data = self._fp.read() if not fp_closed else b"" flush_decoder = True else: cache_content = False - data = self._fp.read(amt) - if amt != 0 and not data: # Platform-specific: Buggy versions of Python. + data = self._fp.read(amt) if not fp_closed else b"" + if ( + amt != 0 and not data + ): # Platform-specific: Buggy versions of Python. # Close the connection when no data is returned # # This is redundant to what httplib/http.client _should_ @@ -487,7 +517,10 @@ def read(self, amt=None, decode_content=None, cache_content=False): # no harm in redundantly calling close. self._fp.close() flush_decoder = True - if self.enforce_content_length and self.length_remaining not in (0, None): + if self.enforce_content_length and self.length_remaining not in ( + 0, + None, + ): # This is an edge case that httplib failed to cover due # to concerns of backward compatibility. We're # addressing it here to make sure IncompleteRead is @@ -507,7 +540,7 @@ def read(self, amt=None, decode_content=None, cache_content=False): return data - def stream(self, amt=2**16, decode_content=None): + def stream(self, amt=2 ** 16, decode_content=None): """ A generator wrapper for the read() method. A call will block until ``amt`` bytes have been read from the connection or until the @@ -552,15 +585,17 @@ def from_httplib(ResponseCls, r, **response_kw): headers = HTTPHeaderDict.from_httplib(headers) # HTTPResponse objects in Python 3 don't have a .strict attribute - strict = getattr(r, 'strict', 0) - resp = ResponseCls(body=r, - headers=headers, - status=r.status, - version=r.version, - reason=r.reason, - strict=strict, - original_response=r, - **response_kw) + strict = getattr(r, "strict", 0) + resp = ResponseCls( + body=r, + headers=headers, + status=r.status, + version=r.version, + reason=r.reason, + strict=strict, + original_response=r, + **response_kw + ) return resp # Backwards-compatibility methods for httplib.HTTPResponse @@ -582,13 +617,18 @@ def close(self): if self._connection: self._connection.close() + if not self.auto_close: + io.IOBase.close(self) + @property def closed(self): - if self._fp is None: + if not self.auto_close: + return io.IOBase.closed.__get__(self) + elif self._fp is None: return True - elif hasattr(self._fp, 'isclosed'): + elif hasattr(self._fp, "isclosed"): return self._fp.isclosed() - elif hasattr(self._fp, 'closed'): + elif hasattr(self._fp, "closed"): return self._fp.closed else: return True @@ -599,11 +639,17 @@ def fileno(self): elif hasattr(self._fp, "fileno"): return self._fp.fileno() else: - raise IOError("The file-like object this HTTPResponse is wrapped " - "around has no file descriptor") + raise IOError( + "The file-like object this HTTPResponse is wrapped " + "around has no file descriptor" + ) def flush(self): - if self._fp is not None and hasattr(self._fp, 'flush'): + if ( + self._fp is not None + and hasattr(self._fp, "flush") + and not getattr(self._fp, "closed", False) + ): return self._fp.flush() def readable(self): @@ -616,7 +662,7 @@ def readinto(self, b): if len(temp) == 0: return 0 else: - b[:len(temp)] = temp + b[: len(temp)] = temp return len(temp) def supports_chunked_reads(self): @@ -626,7 +672,7 @@ def supports_chunked_reads(self): attribute. If it is present we assume it returns raw chunks as processed by read_chunked(). """ - return hasattr(self._fp, 'fp') + return hasattr(self._fp, "fp") def _update_chunk_length(self): # First, we'll figure out length of a chunk and then @@ -634,7 +680,7 @@ def _update_chunk_length(self): if self.chunk_left is not None: return line = self._fp.fp.readline() - line = line.split(b';', 1)[0] + line = line.split(b";", 1)[0] try: self.chunk_left = int(line, 16) except ValueError: @@ -683,11 +729,13 @@ def read_chunked(self, amt=None, decode_content=None): if not self.chunked: raise ResponseNotChunked( "Response is not chunked. " - "Header 'transfer-encoding: chunked' is missing.") + "Header 'transfer-encoding: chunked' is missing." + ) if not self.supports_chunked_reads(): raise BodyNotHttplibCompatible( "Body should be httplib.HTTPResponse like. " - "It should have have an fp attribute which returns raw chunks.") + "It should have have an fp attribute which returns raw chunks." + ) with self._error_catcher(): # Don't bother reading the body of a HEAD request. @@ -705,8 +753,9 @@ def read_chunked(self, amt=None, decode_content=None): if self.chunk_left == 0: break chunk = self._handle_chunk(amt) - decoded = self._decode(chunk, decode_content=decode_content, - flush_decoder=False) + decoded = self._decode( + chunk, decode_content=decode_content, flush_decoder=False + ) if decoded: yield decoded @@ -724,7 +773,7 @@ def read_chunked(self, amt=None, decode_content=None): if not line: # Some sites may not end with '\r\n'. break - if line == b'\r\n': + if line == b"\r\n": break # We read everything; close the "file". diff --git a/pipenv/vendor/urllib3/util/__init__.py b/pipenv/vendor/urllib3/util/__init__.py index 2914bb468b..a96c73a9d8 100644 --- a/pipenv/vendor/urllib3/util/__init__.py +++ b/pipenv/vendor/urllib3/util/__init__.py @@ -1,4 +1,5 @@ from __future__ import absolute_import + # For backwards compatibility, provide imports that used to be here. from .connection import is_connection_dropped from .request import make_headers @@ -14,43 +15,32 @@ ssl_wrap_socket, PROTOCOL_TLS, ) -from .timeout import ( - current_time, - Timeout, -) +from .timeout import current_time, Timeout from .retry import Retry -from .url import ( - get_host, - parse_url, - split_first, - Url, -) -from .wait import ( - wait_for_read, - wait_for_write -) +from .url import get_host, parse_url, split_first, Url +from .wait import wait_for_read, wait_for_write __all__ = ( - 'HAS_SNI', - 'IS_PYOPENSSL', - 'IS_SECURETRANSPORT', - 'SSLContext', - 'PROTOCOL_TLS', - 'Retry', - 'Timeout', - 'Url', - 'assert_fingerprint', - 'current_time', - 'is_connection_dropped', - 'is_fp_closed', - 'get_host', - 'parse_url', - 'make_headers', - 'resolve_cert_reqs', - 'resolve_ssl_version', - 'split_first', - 'ssl_wrap_socket', - 'wait_for_read', - 'wait_for_write' + "HAS_SNI", + "IS_PYOPENSSL", + "IS_SECURETRANSPORT", + "SSLContext", + "PROTOCOL_TLS", + "Retry", + "Timeout", + "Url", + "assert_fingerprint", + "current_time", + "is_connection_dropped", + "is_fp_closed", + "get_host", + "parse_url", + "make_headers", + "resolve_cert_reqs", + "resolve_ssl_version", + "split_first", + "ssl_wrap_socket", + "wait_for_read", + "wait_for_write", ) diff --git a/pipenv/vendor/urllib3/util/connection.py b/pipenv/vendor/urllib3/util/connection.py index 5ad70b2f1c..86f0a3b00e 100644 --- a/pipenv/vendor/urllib3/util/connection.py +++ b/pipenv/vendor/urllib3/util/connection.py @@ -14,7 +14,7 @@ def is_connection_dropped(conn): # Platform-specific Note: For platforms like AppEngine, this will always return ``False`` to let the platform handle connection recycling transparently for us. """ - sock = getattr(conn, 'sock', False) + sock = getattr(conn, "sock", False) if sock is False: # Platform-specific: AppEngine return False if sock is None: # Connection already closed (such as by httplib). @@ -30,8 +30,12 @@ def is_connection_dropped(conn): # Platform-specific # library test suite. Added to its signature is only `socket_options`. # One additional modification is that we avoid binding to IPv6 servers # discovered in DNS if the system doesn't have IPv6 functionality. -def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, - source_address=None, socket_options=None): +def create_connection( + address, + timeout=socket._GLOBAL_DEFAULT_TIMEOUT, + source_address=None, + socket_options=None, +): """Connect to *address* and return the socket object. Convenience function. Connect to *address* (a 2-tuple ``(host, @@ -45,8 +49,8 @@ def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, """ host, port = address - if host.startswith('['): - host = host.strip('[]') + if host.startswith("["): + host = host.strip("[]") err = None # Using the value from allowed_gai_family() in the context of getaddrinfo lets @@ -117,7 +121,7 @@ def _has_ipv6(host): # has_ipv6 returns true if cPython was compiled with IPv6 support. # It does not tell us if the system has IPv6 support enabled. To # determine that we must bind to an IPv6 address. - # https://github.com/shazow/urllib3/pull/611 + # https://github.com/urllib3/urllib3/pull/611 # https://bugs.python.org/issue658327 try: sock = socket.socket(socket.AF_INET6) @@ -131,4 +135,4 @@ def _has_ipv6(host): return has_ipv6 -HAS_IPV6 = _has_ipv6('::1') +HAS_IPV6 = _has_ipv6("::1") diff --git a/pipenv/vendor/urllib3/util/request.py b/pipenv/vendor/urllib3/util/request.py index 280b8530c6..3b7bb54daf 100644 --- a/pipenv/vendor/urllib3/util/request.py +++ b/pipenv/vendor/urllib3/util/request.py @@ -4,19 +4,25 @@ from ..packages.six import b, integer_types from ..exceptions import UnrewindableBodyError -ACCEPT_ENCODING = 'gzip,deflate' +ACCEPT_ENCODING = "gzip,deflate" try: import brotli as _unused_module_brotli # noqa: F401 except ImportError: pass else: - ACCEPT_ENCODING += ',br' + ACCEPT_ENCODING += ",br" _FAILEDTELL = object() -def make_headers(keep_alive=None, accept_encoding=None, user_agent=None, - basic_auth=None, proxy_basic_auth=None, disable_cache=None): +def make_headers( + keep_alive=None, + accept_encoding=None, + user_agent=None, + basic_auth=None, + proxy_basic_auth=None, + disable_cache=None, +): """ Shortcuts for generating request headers. @@ -56,27 +62,27 @@ def make_headers(keep_alive=None, accept_encoding=None, user_agent=None, if isinstance(accept_encoding, str): pass elif isinstance(accept_encoding, list): - accept_encoding = ','.join(accept_encoding) + accept_encoding = ",".join(accept_encoding) else: accept_encoding = ACCEPT_ENCODING - headers['accept-encoding'] = accept_encoding + headers["accept-encoding"] = accept_encoding if user_agent: - headers['user-agent'] = user_agent + headers["user-agent"] = user_agent if keep_alive: - headers['connection'] = 'keep-alive' + headers["connection"] = "keep-alive" if basic_auth: - headers['authorization'] = 'Basic ' + \ - b64encode(b(basic_auth)).decode('utf-8') + headers["authorization"] = "Basic " + b64encode(b(basic_auth)).decode("utf-8") if proxy_basic_auth: - headers['proxy-authorization'] = 'Basic ' + \ - b64encode(b(proxy_basic_auth)).decode('utf-8') + headers["proxy-authorization"] = "Basic " + b64encode( + b(proxy_basic_auth) + ).decode("utf-8") if disable_cache: - headers['cache-control'] = 'no-cache' + headers["cache-control"] = "no-cache" return headers @@ -88,7 +94,7 @@ def set_file_position(body, pos): """ if pos is not None: rewind_body(body, pos) - elif getattr(body, 'tell', None) is not None: + elif getattr(body, "tell", None) is not None: try: pos = body.tell() except (IOError, OSError): @@ -110,16 +116,20 @@ def rewind_body(body, body_pos): :param int pos: Position to seek to in file. """ - body_seek = getattr(body, 'seek', None) + body_seek = getattr(body, "seek", None) if body_seek is not None and isinstance(body_pos, integer_types): try: body_seek(body_pos) except (IOError, OSError): - raise UnrewindableBodyError("An error occurred when rewinding request " - "body for redirect/retry.") + raise UnrewindableBodyError( + "An error occurred when rewinding request body for redirect/retry." + ) elif body_pos is _FAILEDTELL: - raise UnrewindableBodyError("Unable to record file position for rewinding " - "request body during a redirect/retry.") + raise UnrewindableBodyError( + "Unable to record file position for rewinding " + "request body during a redirect/retry." + ) else: - raise ValueError("body_pos must be of type integer, " - "instead it was %s." % type(body_pos)) + raise ValueError( + "body_pos must be of type integer, instead it was %s." % type(body_pos) + ) diff --git a/pipenv/vendor/urllib3/util/response.py b/pipenv/vendor/urllib3/util/response.py index 3d5486485a..715868dd10 100644 --- a/pipenv/vendor/urllib3/util/response.py +++ b/pipenv/vendor/urllib3/util/response.py @@ -52,11 +52,10 @@ def assert_header_parsing(headers): # This will fail silently if we pass in the wrong kind of parameter. # To make debugging easier add an explicit check. if not isinstance(headers, httplib.HTTPMessage): - raise TypeError('expected httplib.Message, got {0}.'.format( - type(headers))) + raise TypeError("expected httplib.Message, got {0}.".format(type(headers))) - defects = getattr(headers, 'defects', None) - get_payload = getattr(headers, 'get_payload', None) + defects = getattr(headers, "defects", None) + get_payload = getattr(headers, "get_payload", None) unparsed_data = None if get_payload: @@ -84,4 +83,4 @@ def is_response_to_head(response): method = response._method if isinstance(method, int): # Platform-specific: Appengine return method == 3 - return method.upper() == 'HEAD' + return method.upper() == "HEAD" diff --git a/pipenv/vendor/urllib3/util/retry.py b/pipenv/vendor/urllib3/util/retry.py index 02429ee8e4..5a049fe65e 100644 --- a/pipenv/vendor/urllib3/util/retry.py +++ b/pipenv/vendor/urllib3/util/retry.py @@ -21,8 +21,9 @@ # Data structure for representing the metadata of requests that result in a retry. -RequestHistory = namedtuple('RequestHistory', ["method", "url", "error", - "status", "redirect_location"]) +RequestHistory = namedtuple( + "RequestHistory", ["method", "url", "error", "status", "redirect_location"] +) class Retry(object): @@ -146,21 +147,33 @@ class Retry(object): request. """ - DEFAULT_METHOD_WHITELIST = frozenset([ - 'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE']) + DEFAULT_METHOD_WHITELIST = frozenset( + ["HEAD", "GET", "PUT", "DELETE", "OPTIONS", "TRACE"] + ) RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503]) - DEFAULT_REDIRECT_HEADERS_BLACKLIST = frozenset(['Authorization']) + DEFAULT_REDIRECT_HEADERS_BLACKLIST = frozenset(["Authorization"]) #: Maximum backoff time. BACKOFF_MAX = 120 - def __init__(self, total=10, connect=None, read=None, redirect=None, status=None, - method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None, - backoff_factor=0, raise_on_redirect=True, raise_on_status=True, - history=None, respect_retry_after_header=True, - remove_headers_on_redirect=DEFAULT_REDIRECT_HEADERS_BLACKLIST): + def __init__( + self, + total=10, + connect=None, + read=None, + redirect=None, + status=None, + method_whitelist=DEFAULT_METHOD_WHITELIST, + status_forcelist=None, + backoff_factor=0, + raise_on_redirect=True, + raise_on_status=True, + history=None, + respect_retry_after_header=True, + remove_headers_on_redirect=DEFAULT_REDIRECT_HEADERS_BLACKLIST, + ): self.total = total self.connect = connect @@ -179,20 +192,25 @@ def __init__(self, total=10, connect=None, read=None, redirect=None, status=None self.raise_on_status = raise_on_status self.history = history or tuple() self.respect_retry_after_header = respect_retry_after_header - self.remove_headers_on_redirect = frozenset([ - h.lower() for h in remove_headers_on_redirect]) + self.remove_headers_on_redirect = frozenset( + [h.lower() for h in remove_headers_on_redirect] + ) def new(self, **kw): params = dict( total=self.total, - connect=self.connect, read=self.read, redirect=self.redirect, status=self.status, + connect=self.connect, + read=self.read, + redirect=self.redirect, + status=self.status, method_whitelist=self.method_whitelist, status_forcelist=self.status_forcelist, backoff_factor=self.backoff_factor, raise_on_redirect=self.raise_on_redirect, raise_on_status=self.raise_on_status, history=self.history, - remove_headers_on_redirect=self.remove_headers_on_redirect + remove_headers_on_redirect=self.remove_headers_on_redirect, + respect_retry_after_header=self.respect_retry_after_header, ) params.update(kw) return type(self)(**params) @@ -217,8 +235,11 @@ def get_backoff_time(self): :rtype: float """ # We want to consider only the last consecutive errors sequence (Ignore redirects). - consecutive_errors_len = len(list(takewhile(lambda x: x.redirect_location is None, - reversed(self.history)))) + consecutive_errors_len = len( + list( + takewhile(lambda x: x.redirect_location is None, reversed(self.history)) + ) + ) if consecutive_errors_len <= 1: return 0 @@ -274,7 +295,7 @@ def sleep(self, response=None): this method will return immediately. """ - if response: + if self.respect_retry_after_header and response: slept = self.sleep_for_retry(response) if slept: return @@ -315,8 +336,12 @@ def is_retry(self, method, status_code, has_retry_after=False): if self.status_forcelist and status_code in self.status_forcelist: return True - return (self.total and self.respect_retry_after_header and - has_retry_after and (status_code in self.RETRY_AFTER_STATUS_CODES)) + return ( + self.total + and self.respect_retry_after_header + and has_retry_after + and (status_code in self.RETRY_AFTER_STATUS_CODES) + ) def is_exhausted(self): """ Are we out of retries? """ @@ -327,8 +352,15 @@ def is_exhausted(self): return min(retry_counts) < 0 - def increment(self, method=None, url=None, response=None, error=None, - _pool=None, _stacktrace=None): + def increment( + self, + method=None, + url=None, + response=None, + error=None, + _pool=None, + _stacktrace=None, + ): """ Return a new Retry object with incremented retry counters. :param response: A response object, or None, if the server did not @@ -351,7 +383,7 @@ def increment(self, method=None, url=None, response=None, error=None, read = self.read redirect = self.redirect status_count = self.status - cause = 'unknown' + cause = "unknown" status = None redirect_location = None @@ -373,7 +405,7 @@ def increment(self, method=None, url=None, response=None, error=None, # Redirect retry? if redirect is not None: redirect -= 1 - cause = 'too many redirects' + cause = "too many redirects" redirect_location = response.get_redirect_location() status = response.status @@ -384,16 +416,21 @@ def increment(self, method=None, url=None, response=None, error=None, if response and response.status: if status_count is not None: status_count -= 1 - cause = ResponseError.SPECIFIC_ERROR.format( - status_code=response.status) + cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status) status = response.status - history = self.history + (RequestHistory(method, url, error, status, redirect_location),) + history = self.history + ( + RequestHistory(method, url, error, status, redirect_location), + ) new_retry = self.new( total=total, - connect=connect, read=read, redirect=redirect, status=status_count, - history=history) + connect=connect, + read=read, + redirect=redirect, + status=status_count, + history=history, + ) if new_retry.is_exhausted(): raise MaxRetryError(_pool, url, error or ResponseError(cause)) @@ -403,9 +440,10 @@ def increment(self, method=None, url=None, response=None, error=None, return new_retry def __repr__(self): - return ('{cls.__name__}(total={self.total}, connect={self.connect}, ' - 'read={self.read}, redirect={self.redirect}, status={self.status})').format( - cls=type(self), self=self) + return ( + "{cls.__name__}(total={self.total}, connect={self.connect}, " + "read={self.read}, redirect={self.redirect}, status={self.status})" + ).format(cls=type(self), self=self) # For backwards compatibility (equivalent to pre-v1.9): diff --git a/pipenv/vendor/urllib3/util/ssl_.py b/pipenv/vendor/urllib3/util/ssl_.py index f271ce9301..8495b7753d 100644 --- a/pipenv/vendor/urllib3/util/ssl_.py +++ b/pipenv/vendor/urllib3/util/ssl_.py @@ -2,14 +2,14 @@ import errno import warnings import hmac -import re +import sys from binascii import hexlify, unhexlify from hashlib import md5, sha1, sha256 +from .url import IPV4_RE, BRACELESS_IPV6_ADDRZ_RE from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning from ..packages import six -from ..packages.rfc3986 import abnf_regexp SSLContext = None @@ -18,11 +18,7 @@ IS_SECURETRANSPORT = False # Maps the length of a digest to a possible hash function producing this digest -HASHFUNC_MAP = { - 32: md5, - 40: sha1, - 64: sha256, -} +HASHFUNC_MAP = {32: md5, 40: sha1, 64: sha256} def _const_compare_digest_backport(a, b): @@ -38,18 +34,7 @@ def _const_compare_digest_backport(a, b): return result == 0 -_const_compare_digest = getattr(hmac, 'compare_digest', - _const_compare_digest_backport) - -# Borrow rfc3986's regular expressions for IPv4 -# and IPv6 addresses for use in is_ipaddress() -_IP_ADDRESS_REGEX = re.compile( - r'^(?:%s|%s|%s)$' % ( - abnf_regexp.IPv4_RE, - abnf_regexp.IPv6_RE, - abnf_regexp.IPv6_ADDRZ_RFC4007_RE - ) -) +_const_compare_digest = getattr(hmac, "compare_digest", _const_compare_digest_backport) try: # Test for SSL features import ssl @@ -60,10 +45,12 @@ def _const_compare_digest_backport(a, b): try: # Platform-specific: Python 3.6 from ssl import PROTOCOL_TLS + PROTOCOL_SSLv23 = PROTOCOL_TLS except ImportError: try: from ssl import PROTOCOL_SSLv23 as PROTOCOL_TLS + PROTOCOL_SSLv23 = PROTOCOL_TLS except ImportError: PROTOCOL_SSLv23 = PROTOCOL_TLS = 2 @@ -93,26 +80,29 @@ def _const_compare_digest_backport(a, b): # insecure ciphers for security reasons. # - NOTE: TLS 1.3 cipher suites are managed through a different interface # not exposed by CPython (yet!) and are enabled by default if they're available. -DEFAULT_CIPHERS = ':'.join([ - 'ECDHE+AESGCM', - 'ECDHE+CHACHA20', - 'DHE+AESGCM', - 'DHE+CHACHA20', - 'ECDH+AESGCM', - 'DH+AESGCM', - 'ECDH+AES', - 'DH+AES', - 'RSA+AESGCM', - 'RSA+AES', - '!aNULL', - '!eNULL', - '!MD5', - '!DSS', -]) +DEFAULT_CIPHERS = ":".join( + [ + "ECDHE+AESGCM", + "ECDHE+CHACHA20", + "DHE+AESGCM", + "DHE+CHACHA20", + "ECDH+AESGCM", + "DH+AESGCM", + "ECDH+AES", + "DH+AES", + "RSA+AESGCM", + "RSA+AES", + "!aNULL", + "!eNULL", + "!MD5", + "!DSS", + ] +) try: from ssl import SSLContext # Modern SSL? except ImportError: + class SSLContext(object): # Platform-specific: Python 2 def __init__(self, protocol_version): self.protocol = protocol_version @@ -140,21 +130,21 @@ def set_ciphers(self, cipher_suite): def wrap_socket(self, socket, server_hostname=None, server_side=False): warnings.warn( - 'A true SSLContext object is not available. This prevents ' - 'urllib3 from configuring SSL appropriately and may cause ' - 'certain SSL connections to fail. You can upgrade to a newer ' - 'version of Python to solve this. For more information, see ' - 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' - '#ssl-warnings', - InsecurePlatformWarning + "A true SSLContext object is not available. This prevents " + "urllib3 from configuring SSL appropriately and may cause " + "certain SSL connections to fail. You can upgrade to a newer " + "version of Python to solve this. For more information, see " + "https://urllib3.readthedocs.io/en/latest/advanced-usage.html" + "#ssl-warnings", + InsecurePlatformWarning, ) kwargs = { - 'keyfile': self.keyfile, - 'certfile': self.certfile, - 'ca_certs': self.ca_certs, - 'cert_reqs': self.verify_mode, - 'ssl_version': self.protocol, - 'server_side': server_side, + "keyfile": self.keyfile, + "certfile": self.certfile, + "ca_certs": self.ca_certs, + "cert_reqs": self.verify_mode, + "ssl_version": self.protocol, + "server_side": server_side, } return wrap_socket(socket, ciphers=self.ciphers, **kwargs) @@ -169,12 +159,11 @@ def assert_fingerprint(cert, fingerprint): Fingerprint as string of hexdigits, can be interspersed by colons. """ - fingerprint = fingerprint.replace(':', '').lower() + fingerprint = fingerprint.replace(":", "").lower() digest_length = len(fingerprint) hashfunc = HASHFUNC_MAP.get(digest_length) if not hashfunc: - raise SSLError( - 'Fingerprint of invalid length: {0}'.format(fingerprint)) + raise SSLError("Fingerprint of invalid length: {0}".format(fingerprint)) # We need encode() here for py32; works on py2 and p33. fingerprint_bytes = unhexlify(fingerprint.encode()) @@ -182,8 +171,11 @@ def assert_fingerprint(cert, fingerprint): cert_digest = hashfunc(cert).digest() if not _const_compare_digest(cert_digest, fingerprint_bytes): - raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".' - .format(fingerprint, hexlify(cert_digest))) + raise SSLError( + 'Fingerprints did not match. Expected "{0}", got "{1}".'.format( + fingerprint, hexlify(cert_digest) + ) + ) def resolve_cert_reqs(candidate): @@ -203,7 +195,7 @@ def resolve_cert_reqs(candidate): if isinstance(candidate, str): res = getattr(ssl, candidate, None) if res is None: - res = getattr(ssl, 'CERT_' + candidate) + res = getattr(ssl, "CERT_" + candidate) return res return candidate @@ -219,14 +211,15 @@ def resolve_ssl_version(candidate): if isinstance(candidate, str): res = getattr(ssl, candidate, None) if res is None: - res = getattr(ssl, 'PROTOCOL_' + candidate) + res = getattr(ssl, "PROTOCOL_" + candidate) return res return candidate -def create_urllib3_context(ssl_version=None, cert_reqs=None, - options=None, ciphers=None): +def create_urllib3_context( + ssl_version=None, cert_reqs=None, options=None, ciphers=None +): """All arguments have the same meaning as ``ssl_wrap_socket``. By default, this function does a lot of the same work that @@ -279,18 +272,40 @@ def create_urllib3_context(ssl_version=None, cert_reqs=None, context.options |= options + # Enable post-handshake authentication for TLS 1.3, see GH #1634. PHA is + # necessary for conditional client cert authentication with TLS 1.3. + # The attribute is None for OpenSSL <= 1.1.0 or does not exist in older + # versions of Python. We only enable on Python 3.7.4+ or if certificate + # verification is enabled to work around Python issue #37428 + # See: https://bugs.python.org/issue37428 + if (cert_reqs == ssl.CERT_REQUIRED or sys.version_info >= (3, 7, 4)) and getattr( + context, "post_handshake_auth", None + ) is not None: + context.post_handshake_auth = True + context.verify_mode = cert_reqs - if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2 + if ( + getattr(context, "check_hostname", None) is not None + ): # Platform-specific: Python 3.2 # We do our own verification, including fingerprints and alternative # hostnames. So disable it here context.check_hostname = False return context -def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, - ca_certs=None, server_hostname=None, - ssl_version=None, ciphers=None, ssl_context=None, - ca_cert_dir=None, key_password=None): +def ssl_wrap_socket( + sock, + keyfile=None, + certfile=None, + cert_reqs=None, + ca_certs=None, + server_hostname=None, + ssl_version=None, + ciphers=None, + ssl_context=None, + ca_cert_dir=None, + key_password=None, +): """ All arguments except for server_hostname, ssl_context, and ca_cert_dir have the same meaning as they do when using :func:`ssl.wrap_socket`. @@ -314,8 +329,7 @@ def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, # Note: This branch of code and all the variables in it are no longer # used by urllib3 itself. We should consider deprecating and removing # this code. - context = create_urllib3_context(ssl_version, cert_reqs, - ciphers=ciphers) + context = create_urllib3_context(ssl_version, cert_reqs, ciphers=ciphers) if ca_certs or ca_cert_dir: try: @@ -329,7 +343,7 @@ def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, raise SSLError(e) raise - elif ssl_context is None and hasattr(context, 'load_default_certs'): + elif ssl_context is None and hasattr(context, "load_default_certs"): # try to load OS default certs; works well on Windows (require Python3.4+) context.load_default_certs() @@ -349,20 +363,21 @@ def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, # extension should not be used according to RFC3546 Section 3.1 # We shouldn't warn the user if SNI isn't available but we would # not be using SNI anyways due to IP address for server_hostname. - if ((server_hostname is not None and not is_ipaddress(server_hostname)) - or IS_SECURETRANSPORT): + if ( + server_hostname is not None and not is_ipaddress(server_hostname) + ) or IS_SECURETRANSPORT: if HAS_SNI and server_hostname is not None: return context.wrap_socket(sock, server_hostname=server_hostname) warnings.warn( - 'An HTTPS request has been made, but the SNI (Server Name ' - 'Indication) extension to TLS is not available on this platform. ' - 'This may cause the server to present an incorrect TLS ' - 'certificate, which can cause validation failures. You can upgrade to ' - 'a newer version of Python to solve this. For more information, see ' - 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' - '#ssl-warnings', - SNIMissingWarning + "An HTTPS request has been made, but the SNI (Server Name " + "Indication) extension to TLS is not available on this platform. " + "This may cause the server to present an incorrect TLS " + "certificate, which can cause validation failures. You can upgrade to " + "a newer version of Python to solve this. For more information, see " + "https://urllib3.readthedocs.io/en/latest/advanced-usage.html" + "#ssl-warnings", + SNIMissingWarning, ) return context.wrap_socket(sock) @@ -375,18 +390,18 @@ def is_ipaddress(hostname): :param str hostname: Hostname to examine. :return: True if the hostname is an IP address, False otherwise. """ - if six.PY3 and isinstance(hostname, bytes): + if not six.PY2 and isinstance(hostname, bytes): # IDN A-label bytes are ASCII compatible. - hostname = hostname.decode('ascii') - return _IP_ADDRESS_REGEX.match(hostname) is not None + hostname = hostname.decode("ascii") + return bool(IPV4_RE.match(hostname) or BRACELESS_IPV6_ADDRZ_RE.match(hostname)) def _is_key_file_encrypted(key_file): """Detects if a key file is encrypted or not.""" - with open(key_file, 'r') as f: + with open(key_file, "r") as f: for line in f: # Look for Proc-Type: 4,ENCRYPTED - if 'ENCRYPTED' in line: + if "ENCRYPTED" in line: return True return False diff --git a/pipenv/vendor/urllib3/util/timeout.py b/pipenv/vendor/urllib3/util/timeout.py index a4d004a848..9883700556 100644 --- a/pipenv/vendor/urllib3/util/timeout.py +++ b/pipenv/vendor/urllib3/util/timeout.py @@ -1,4 +1,5 @@ from __future__ import absolute_import + # The default socket timeout, used by httplib to indicate that no timeout was # specified by the user from socket import _GLOBAL_DEFAULT_TIMEOUT @@ -45,19 +46,20 @@ class Timeout(object): :type total: integer, float, or None :param connect: - The maximum amount of time to wait for a connection attempt to a server - to succeed. Omitting the parameter will default the connect timeout to - the system default, probably `the global default timeout in socket.py + The maximum amount of time (in seconds) to wait for a connection + attempt to a server to succeed. Omitting the parameter will default the + connect timeout to the system default, probably `the global default + timeout in socket.py <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_. None will set an infinite timeout for connection attempts. :type connect: integer, float, or None :param read: - The maximum amount of time to wait between consecutive - read operations for a response from the server. Omitting - the parameter will default the read timeout to the system - default, probably `the global default timeout in socket.py + The maximum amount of time (in seconds) to wait between consecutive + read operations for a response from the server. Omitting the parameter + will default the read timeout to the system default, probably `the + global default timeout in socket.py <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_. None will set an infinite timeout. @@ -91,14 +93,18 @@ class Timeout(object): DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT def __init__(self, total=None, connect=_Default, read=_Default): - self._connect = self._validate_timeout(connect, 'connect') - self._read = self._validate_timeout(read, 'read') - self.total = self._validate_timeout(total, 'total') + self._connect = self._validate_timeout(connect, "connect") + self._read = self._validate_timeout(read, "read") + self.total = self._validate_timeout(total, "total") self._start_connect = None def __str__(self): - return '%s(connect=%r, read=%r, total=%r)' % ( - type(self).__name__, self._connect, self._read, self.total) + return "%s(connect=%r, read=%r, total=%r)" % ( + type(self).__name__, + self._connect, + self._read, + self.total, + ) @classmethod def _validate_timeout(cls, value, name): @@ -118,23 +124,31 @@ def _validate_timeout(cls, value, name): return value if isinstance(value, bool): - raise ValueError("Timeout cannot be a boolean value. It must " - "be an int, float or None.") + raise ValueError( + "Timeout cannot be a boolean value. It must " + "be an int, float or None." + ) try: float(value) except (TypeError, ValueError): - raise ValueError("Timeout value %s was %s, but it must be an " - "int, float or None." % (name, value)) + raise ValueError( + "Timeout value %s was %s, but it must be an " + "int, float or None." % (name, value) + ) try: if value <= 0: - raise ValueError("Attempted to set %s timeout to %s, but the " - "timeout cannot be set to a value less " - "than or equal to 0." % (name, value)) + raise ValueError( + "Attempted to set %s timeout to %s, but the " + "timeout cannot be set to a value less " + "than or equal to 0." % (name, value) + ) except TypeError: # Python 3 - raise ValueError("Timeout value %s was %s, but it must be an " - "int, float or None." % (name, value)) + raise ValueError( + "Timeout value %s was %s, but it must be an " + "int, float or None." % (name, value) + ) return value @@ -166,8 +180,7 @@ def clone(self): # We can't use copy.deepcopy because that will also create a new object # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to # detect the user default. - return Timeout(connect=self._connect, read=self._read, - total=self.total) + return Timeout(connect=self._connect, read=self._read, total=self.total) def start_connect(self): """ Start the timeout clock, used during a connect() attempt @@ -183,14 +196,15 @@ def start_connect(self): def get_connect_duration(self): """ Gets the time elapsed since the call to :meth:`start_connect`. - :return: Elapsed time. + :return: Elapsed time in seconds. :rtype: float :raises urllib3.exceptions.TimeoutStateError: if you attempt to get duration for a timer that hasn't been started. """ if self._start_connect is None: - raise TimeoutStateError("Can't get connect duration for timer " - "that has not started.") + raise TimeoutStateError( + "Can't get connect duration for timer that has not started." + ) return current_time() - self._start_connect @property @@ -228,15 +242,16 @@ def read_timeout(self): :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect` has not yet been called on this object. """ - if (self.total is not None and - self.total is not self.DEFAULT_TIMEOUT and - self._read is not None and - self._read is not self.DEFAULT_TIMEOUT): + if ( + self.total is not None + and self.total is not self.DEFAULT_TIMEOUT + and self._read is not None + and self._read is not self.DEFAULT_TIMEOUT + ): # In case the connect timeout has not yet been established. if self._start_connect is None: return self._read - return max(0, min(self.total - self.get_connect_duration(), - self._read)) + return max(0, min(self.total - self.get_connect_duration(), self._read)) elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT: return max(0, self.total - self.get_connect_duration()) else: diff --git a/pipenv/vendor/urllib3/util/url.py b/pipenv/vendor/urllib3/util/url.py index 0bc6ced756..f7568e9d78 100644 --- a/pipenv/vendor/urllib3/util/url.py +++ b/pipenv/vendor/urllib3/util/url.py @@ -3,41 +3,108 @@ from collections import namedtuple from ..exceptions import LocationParseError -from ..packages import six, rfc3986 -from ..packages.rfc3986.exceptions import RFC3986Exception, ValidationError -from ..packages.rfc3986.validators import Validator -from ..packages.rfc3986 import abnf_regexp, normalizers, compat, misc +from ..packages import six -url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'] +url_attrs = ["scheme", "auth", "host", "port", "path", "query", "fragment"] # We only want to normalize urls with an HTTP(S) scheme. # urllib3 infers URLs without a scheme (None) to be http. -NORMALIZABLE_SCHEMES = ('http', 'https', None) - -# Regex for detecting URLs with schemes. RFC 3986 Section 3.1 -SCHEME_REGEX = re.compile(r"^(?:[a-zA-Z][a-zA-Z0-9+\-]*:|/)") - -PATH_CHARS = abnf_regexp.UNRESERVED_CHARS_SET | abnf_regexp.SUB_DELIMITERS_SET | {':', '@', '/'} -QUERY_CHARS = FRAGMENT_CHARS = PATH_CHARS | {'?'} - - -class Url(namedtuple('Url', url_attrs)): +NORMALIZABLE_SCHEMES = ("http", "https", None) + +# Almost all of these patterns were derived from the +# 'rfc3986' module: https://github.com/python-hyper/rfc3986 +PERCENT_RE = re.compile(r"%[a-fA-F0-9]{2}") +SCHEME_RE = re.compile(r"^(?:[a-zA-Z][a-zA-Z0-9+-]*:|/)") +URI_RE = re.compile( + r"^(?:([a-zA-Z][a-zA-Z0-9+.-]*):)?" + r"(?://([^/?#]*))?" + r"([^?#]*)" + r"(?:\?([^#]*))?" + r"(?:#(.*))?$", + re.UNICODE | re.DOTALL, +) + +IPV4_PAT = r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}" +HEX_PAT = "[0-9A-Fa-f]{1,4}" +LS32_PAT = "(?:{hex}:{hex}|{ipv4})".format(hex=HEX_PAT, ipv4=IPV4_PAT) +_subs = {"hex": HEX_PAT, "ls32": LS32_PAT} +_variations = [ + # 6( h16 ":" ) ls32 + "(?:%(hex)s:){6}%(ls32)s", + # "::" 5( h16 ":" ) ls32 + "::(?:%(hex)s:){5}%(ls32)s", + # [ h16 ] "::" 4( h16 ":" ) ls32 + "(?:%(hex)s)?::(?:%(hex)s:){4}%(ls32)s", + # [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32 + "(?:(?:%(hex)s:)?%(hex)s)?::(?:%(hex)s:){3}%(ls32)s", + # [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32 + "(?:(?:%(hex)s:){0,2}%(hex)s)?::(?:%(hex)s:){2}%(ls32)s", + # [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32 + "(?:(?:%(hex)s:){0,3}%(hex)s)?::%(hex)s:%(ls32)s", + # [ *4( h16 ":" ) h16 ] "::" ls32 + "(?:(?:%(hex)s:){0,4}%(hex)s)?::%(ls32)s", + # [ *5( h16 ":" ) h16 ] "::" h16 + "(?:(?:%(hex)s:){0,5}%(hex)s)?::%(hex)s", + # [ *6( h16 ":" ) h16 ] "::" + "(?:(?:%(hex)s:){0,6}%(hex)s)?::", +] + +UNRESERVED_PAT = r"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._!\-~" +IPV6_PAT = "(?:" + "|".join([x % _subs for x in _variations]) + ")" +ZONE_ID_PAT = "(?:%25|%)(?:[" + UNRESERVED_PAT + "]|%[a-fA-F0-9]{2})+" +IPV6_ADDRZ_PAT = r"\[" + IPV6_PAT + r"(?:" + ZONE_ID_PAT + r")?\]" +REG_NAME_PAT = r"(?:[^\[\]%:/?#]|%[a-fA-F0-9]{2})*" +TARGET_RE = re.compile(r"^(/[^?#]*)(?:\?([^#]*))?(?:#.*)?$") + +IPV4_RE = re.compile("^" + IPV4_PAT + "$") +IPV6_RE = re.compile("^" + IPV6_PAT + "$") +IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT + "$") +BRACELESS_IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT[2:-2] + "$") +ZONE_ID_RE = re.compile("(" + ZONE_ID_PAT + r")\]$") + +SUBAUTHORITY_PAT = (u"^(?:(.*)@)?(%s|%s|%s)(?::([0-9]{0,5}))?$") % ( + REG_NAME_PAT, + IPV4_PAT, + IPV6_ADDRZ_PAT, +) +SUBAUTHORITY_RE = re.compile(SUBAUTHORITY_PAT, re.UNICODE | re.DOTALL) + +UNRESERVED_CHARS = set( + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._-~" +) +SUB_DELIM_CHARS = set("!$&'()*+,;=") +USERINFO_CHARS = UNRESERVED_CHARS | SUB_DELIM_CHARS | {":"} +PATH_CHARS = USERINFO_CHARS | {"@", "/"} +QUERY_CHARS = FRAGMENT_CHARS = PATH_CHARS | {"?"} + + +class Url(namedtuple("Url", url_attrs)): """ Data structure for representing an HTTP URL. Used as a return value for :func:`parse_url`. Both the scheme and host are normalized as they are both case-insensitive according to RFC 3986. """ + __slots__ = () - def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None, - query=None, fragment=None): - if path and not path.startswith('/'): - path = '/' + path + def __new__( + cls, + scheme=None, + auth=None, + host=None, + port=None, + path=None, + query=None, + fragment=None, + ): + if path and not path.startswith("/"): + path = "/" + path if scheme is not None: scheme = scheme.lower() - return super(Url, cls).__new__(cls, scheme, auth, host, port, path, - query, fragment) + return super(Url, cls).__new__( + cls, scheme, auth, host, port, path, query, fragment + ) @property def hostname(self): @@ -47,10 +114,10 @@ def hostname(self): @property def request_uri(self): """Absolute path including the query string.""" - uri = self.path or '/' + uri = self.path or "/" if self.query is not None: - uri += '?' + self.query + uri += "?" + self.query return uri @@ -58,7 +125,7 @@ def request_uri(self): def netloc(self): """Network location including host and port""" if self.port: - return '%s:%d' % (self.host, self.port) + return "%s:%d" % (self.host, self.port) return self.host @property @@ -81,23 +148,23 @@ def url(self): 'http://username:password@host.com:80/path?query#fragment' """ scheme, auth, host, port, path, query, fragment = self - url = u'' + url = u"" # We use "is not None" we want things to happen with empty strings (or 0 port) if scheme is not None: - url += scheme + u'://' + url += scheme + u"://" if auth is not None: - url += auth + u'@' + url += auth + u"@" if host is not None: url += host if port is not None: - url += u':' + str(port) + url += u":" + str(port) if path is not None: url += path if query is not None: - url += u'?' + query + url += u"?" + query if fragment is not None: - url += u'#' + fragment + url += u"#" + fragment return url @@ -135,48 +202,146 @@ def split_first(s, delims): min_delim = d if min_idx is None or min_idx < 0: - return s, '', None + return s, "", None - return s[:min_idx], s[min_idx + 1:], min_delim + return s[:min_idx], s[min_idx + 1 :], min_delim -def _encode_invalid_chars(component, allowed_chars, encoding='utf-8'): +def _encode_invalid_chars(component, allowed_chars, encoding="utf-8"): """Percent-encodes a URI component without reapplying - onto an already percent-encoded component. Based on - rfc3986.normalizers.encode_component() + onto an already percent-encoded component. """ if component is None: return component + component = six.ensure_text(component) + # Try to see if the component we're encoding is already percent-encoded # so we can skip all '%' characters but still encode all others. - percent_encodings = len(normalizers.PERCENT_MATCHER.findall( - compat.to_str(component, encoding))) + percent_encodings = PERCENT_RE.findall(component) - uri_bytes = component.encode('utf-8', 'surrogatepass') - is_percent_encoded = percent_encodings == uri_bytes.count(b'%') + # Normalize existing percent-encoded bytes. + for enc in percent_encodings: + if not enc.isupper(): + component = component.replace(enc, enc.upper()) + + uri_bytes = component.encode("utf-8", "surrogatepass") + is_percent_encoded = len(percent_encodings) == uri_bytes.count(b"%") encoded_component = bytearray() for i in range(0, len(uri_bytes)): # Will return a single character bytestring on both Python 2 & 3 - byte = uri_bytes[i:i+1] + byte = uri_bytes[i : i + 1] byte_ord = ord(byte) - if ((is_percent_encoded and byte == b'%') - or (byte_ord < 128 and byte.decode() in allowed_chars)): + if (is_percent_encoded and byte == b"%") or ( + byte_ord < 128 and byte.decode() in allowed_chars + ): encoded_component.extend(byte) continue - encoded_component.extend('%{0:02x}'.format(byte_ord).encode().upper()) + encoded_component.extend(b"%" + (hex(byte_ord)[2:].encode().zfill(2).upper())) return encoded_component.decode(encoding) +def _remove_path_dot_segments(path): + # See http://tools.ietf.org/html/rfc3986#section-5.2.4 for pseudo-code + segments = path.split("/") # Turn the path into a list of segments + output = [] # Initialize the variable to use to store output + + for segment in segments: + # '.' is the current directory, so ignore it, it is superfluous + if segment == ".": + continue + # Anything other than '..', should be appended to the output + elif segment != "..": + output.append(segment) + # In this case segment == '..', if we can, we should pop the last + # element + elif output: + output.pop() + + # If the path starts with '/' and the output is empty or the first string + # is non-empty + if path.startswith("/") and (not output or output[0]): + output.insert(0, "") + + # If the path starts with '/.' or '/..' ensure we add one more empty + # string to add a trailing '/' + if path.endswith(("/.", "/..")): + output.append("") + + return "/".join(output) + + +def _normalize_host(host, scheme): + if host: + if isinstance(host, six.binary_type): + host = six.ensure_str(host) + + if scheme in NORMALIZABLE_SCHEMES: + is_ipv6 = IPV6_ADDRZ_RE.match(host) + if is_ipv6: + match = ZONE_ID_RE.search(host) + if match: + start, end = match.span(1) + zone_id = host[start:end] + + if zone_id.startswith("%25") and zone_id != "%25": + zone_id = zone_id[3:] + else: + zone_id = zone_id[1:] + zone_id = "%" + _encode_invalid_chars(zone_id, UNRESERVED_CHARS) + return host[:start].lower() + zone_id + host[end:] + else: + return host.lower() + elif not IPV4_RE.match(host): + return six.ensure_str( + b".".join([_idna_encode(label) for label in host.split(".")]) + ) + return host + + +def _idna_encode(name): + if name and any([ord(x) > 128 for x in name]): + try: + import idna + except ImportError: + six.raise_from( + LocationParseError("Unable to parse URL without the 'idna' module"), + None, + ) + try: + return idna.encode(name.lower(), strict=True, std3_rules=True) + except idna.IDNAError: + six.raise_from( + LocationParseError(u"Name '%s' is not a valid IDNA label" % name), None + ) + return name.lower().encode("ascii") + + +def _encode_target(target): + """Percent-encodes a request target so that there are no invalid characters""" + if not target.startswith("/"): + return target + + path, query = TARGET_RE.match(target).groups() + target = _encode_invalid_chars(path, PATH_CHARS) + query = _encode_invalid_chars(query, QUERY_CHARS) + if query is not None: + target += "?" + query + return target + + def parse_url(url): """ Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is performed to parse incomplete urls. Fields not provided will be None. This parser is RFC 3986 compliant. + The parser logic and helper functions are based heavily on + work done in the ``rfc3986`` module. + :param str url: URL to parse into a :class:`.Url` namedtuple. Partly backwards-compatible with :mod:`urlparse`. @@ -194,90 +359,72 @@ def parse_url(url): # Empty return Url() - is_string = not isinstance(url, six.binary_type) - - # RFC 3986 doesn't like URLs that have a host but don't start - # with a scheme and we support URLs like that so we need to - # detect that problem and add an empty scheme indication. - # We don't get hurt on path-only URLs here as it's stripped - # off and given an empty scheme anyways. - if not SCHEME_REGEX.search(url): + source_url = url + if not SCHEME_RE.search(url): url = "//" + url - def idna_encode(name): - if name and any([ord(x) > 128 for x in name]): - try: - import idna - except ImportError: - raise LocationParseError("Unable to parse URL without the 'idna' module") - try: - return idna.encode(name.lower(), strict=True, std3_rules=True) - except idna.IDNAError: - raise LocationParseError(u"Name '%s' is not a valid IDNA label" % name) - return name - - try: - split_iri = misc.IRI_MATCHER.match(compat.to_str(url)).groupdict() - iri_ref = rfc3986.IRIReference( - split_iri['scheme'], split_iri['authority'], - _encode_invalid_chars(split_iri['path'], PATH_CHARS), - _encode_invalid_chars(split_iri['query'], QUERY_CHARS), - _encode_invalid_chars(split_iri['fragment'], FRAGMENT_CHARS) - ) - has_authority = iri_ref.authority is not None - uri_ref = iri_ref.encode(idna_encoder=idna_encode) - except (ValueError, RFC3986Exception): - return six.raise_from(LocationParseError(url), None) - - # rfc3986 strips the authority if it's invalid - if has_authority and uri_ref.authority is None: - raise LocationParseError(url) - - # Only normalize schemes we understand to not break http+unix - # or other schemes that don't follow RFC 3986. - if uri_ref.scheme is None or uri_ref.scheme.lower() in NORMALIZABLE_SCHEMES: - uri_ref = uri_ref.normalize() - - # Validate all URIReference components and ensure that all - # components that were set before are still set after - # normalization has completed. - validator = Validator() try: - validator.check_validity_of( - *validator.COMPONENT_NAMES - ).validate(uri_ref) - except ValidationError: - return six.raise_from(LocationParseError(url), None) + scheme, authority, path, query, fragment = URI_RE.match(url).groups() + normalize_uri = scheme is None or scheme.lower() in NORMALIZABLE_SCHEMES + + if scheme: + scheme = scheme.lower() + + if authority: + auth, host, port = SUBAUTHORITY_RE.match(authority).groups() + if auth and normalize_uri: + auth = _encode_invalid_chars(auth, USERINFO_CHARS) + if port == "": + port = None + else: + auth, host, port = None, None, None + + if port is not None: + port = int(port) + if not (0 <= port <= 65535): + raise LocationParseError(url) + + host = _normalize_host(host, scheme) + + if normalize_uri and path: + path = _remove_path_dot_segments(path) + path = _encode_invalid_chars(path, PATH_CHARS) + if normalize_uri and query: + query = _encode_invalid_chars(query, QUERY_CHARS) + if normalize_uri and fragment: + fragment = _encode_invalid_chars(fragment, FRAGMENT_CHARS) + + except (ValueError, AttributeError): + return six.raise_from(LocationParseError(source_url), None) # For the sake of backwards compatibility we put empty # string values for path if there are any defined values # beyond the path in the URL. # TODO: Remove this when we break backwards compatibility. - path = uri_ref.path if not path: - if (uri_ref.query is not None - or uri_ref.fragment is not None): + if query is not None or fragment is not None: path = "" else: path = None # Ensure that each part of the URL is a `str` for # backwards compatibility. - def to_input_type(x): - if x is None: - return None - elif not is_string and not isinstance(x, six.binary_type): - return x.encode('utf-8') - return x + if isinstance(url, six.text_type): + ensure_func = six.ensure_text + else: + ensure_func = six.ensure_str + + def ensure_type(x): + return x if x is None else ensure_func(x) return Url( - scheme=to_input_type(uri_ref.scheme), - auth=to_input_type(uri_ref.userinfo), - host=to_input_type(uri_ref.host), - port=int(uri_ref.port) if uri_ref.port is not None else None, - path=to_input_type(path), - query=to_input_type(uri_ref.query), - fragment=to_input_type(uri_ref.fragment) + scheme=ensure_type(scheme), + auth=ensure_type(auth), + host=ensure_type(host), + port=port, + path=ensure_type(path), + query=ensure_type(query), + fragment=ensure_type(fragment), ) @@ -286,4 +433,4 @@ def get_host(url): Deprecated. Use :func:`parse_url` instead. """ p = parse_url(url) - return p.scheme or 'http', p.hostname, p.port + return p.scheme or "http", p.hostname, p.port diff --git a/pipenv/vendor/urllib3/util/wait.py b/pipenv/vendor/urllib3/util/wait.py index 4db71bafd8..d71d2fd722 100644 --- a/pipenv/vendor/urllib3/util/wait.py +++ b/pipenv/vendor/urllib3/util/wait.py @@ -2,6 +2,7 @@ from functools import partial import select import sys + try: from time import monotonic except ImportError: @@ -40,6 +41,8 @@ class NoWayToWaitForSocketError(Exception): # Modern Python, that retries syscalls by default def _retry_on_intr(fn, timeout): return fn(timeout) + + else: # Old and broken Pythons. def _retry_on_intr(fn, timeout): diff --git a/pipenv/vendor/vendor.txt b/pipenv/vendor/vendor.txt index ac5c524dca..566c0641ba 100644 --- a/pipenv/vendor/vendor.txt +++ b/pipenv/vendor/vendor.txt @@ -2,50 +2,50 @@ appdirs==1.4.3 backports.shutil_get_terminal_size==1.0.0 backports.weakref==1.0.post1 click==7.0 -click-completion==0.5.1 +click-completion==0.5.2 click-didyoumean==0.0.3 colorama==0.4.1 delegator.py==0.1.1 pexpect==4.7.0 ptyprocess==0.6.0 -python-dotenv==0.10.2 +python-dotenv==0.10.3 first==2.0.1 iso8601==0.1.12 -jinja2==2.10. +jinja2==2.10.3 markupsafe==1.1.1 -parse==1.12.0 -pathlib2==2.3.3 +parse==1.13.0 +pathlib2==2.3.5 scandir==1.10 pipdeptree==0.13.2 -pipreqs==0.4.9 +pipreqs==0.4.10 docopt==0.6.2 yarg==0.1.9 pythonfinder==1.2.1 requests==2.22.0 chardet==3.0.4 idna==2.8 - urllib3==1.25.2 - certifi==2019.3.9 -requirementslib==1.5.1 - attrs==19.1.0 - distlib==0.2.9 - packaging==19.0 - pyparsing==2.3.1 + urllib3==1.25.7 + certifi==2019.11.28 +requirementslib==1.5.3 + attrs==19.3.0 + distlib==0.3.0 + packaging==19.2 + pyparsing==2.4.5 git+https://github.com/sarugaku/plette.git@master#egg=plette - tomlkit==0.5.3 + tomlkit==0.5.8 shellingham==1.3.1 -six==1.12.0 -semver==2.8.1 +six==1.13.0 +semver==2.9.0 toml==0.10.0 cached-property==1.5.1 -vistir==0.4.2 -pip-shims==0.3.2 +vistir==0.4.3 +pip-shims==0.4.0 enum34==1.1.6 +# yaspin==0.15.0 yaspin==0.14.3 -cerberus==1.3.1 +cerberus==1.3.2 resolvelib==0.2.2 backports.functools_lru_cache==1.5 -pep517==0.5.0 - pytoml==0.1.20 +pep517==0.8.1 git+https://github.com/sarugaku/passa.git@master#egg=passa -orderedmultidict==1.0 +orderedmultidict==1.0.1 diff --git a/pipenv/vendor/vendor_pip.txt b/pipenv/vendor/vendor_pip.txt index 7b5482550b..aadd35261a 100644 --- a/pipenv/vendor/vendor_pip.txt +++ b/pipenv/vendor/vendor_pip.txt @@ -1,23 +1,23 @@ appdirs==1.4.3 CacheControl==0.12.5 colorama==0.4.1 -distlib==0.2.8 +contextlib2==0.6.0 +distlib==0.2.9.post0 distro==1.4.0 html5lib==1.0.1 ipaddress==1.0.22 # Only needed on 2.6 and 2.7 -lockfile==0.12.2 -msgpack==0.5.6 -packaging==19.0 -pep517==0.5.0 +msgpack==0.6.2 +packaging==19.2 +pep517==0.7.0 progress==1.5 -pyparsing==2.4.0 -pytoml==0.1.20 -requests==2.21.0 - certifi==2019.3.9 +pyparsing==2.4.2 +pytoml==0.1.21 +requests==2.22.0 + certifi==2019.9.11 chardet==3.0.4 idna==2.8 - urllib3==1.25.2 + urllib3==1.25.6 retrying==1.3.3 -setuptools==41.0.1 +setuptools==41.4.0 six==1.12.0 webencodings==0.5.1 diff --git a/pipenv/vendor/vistir/__init__.py b/pipenv/vendor/vistir/__init__.py index 821ea29b5a..09af185425 100644 --- a/pipenv/vendor/vistir/__init__.py +++ b/pipenv/vendor/vistir/__init__.py @@ -36,7 +36,7 @@ from .path import create_tracked_tempdir, create_tracked_tempfile, mkdir_p, rmtree from .spin import create_spinner -__version__ = "0.4.2" +__version__ = "0.4.3" __all__ = [ diff --git a/pipenv/vendor/vistir/misc.py b/pipenv/vendor/vistir/misc.py index 36218a50ae..54e0d2a062 100644 --- a/pipenv/vendor/vistir/misc.py +++ b/pipenv/vendor/vistir/misc.py @@ -186,7 +186,6 @@ def _read_streams(stream_dict): return results - def get_stream_results(cmd_instance, verbose, maxlen, spinner=None, stdout_allowed=False): stream_results = {"stdout": [], "stderr": []} streams = {"stderr": cmd_instance.stderr, "stdout": cmd_instance.stdout} @@ -207,7 +206,7 @@ def get_stream_results(cmd_instance, verbose, maxlen, spinner=None, stdout_allow if stream_name == "stderr" else display_line ) - if display_line and last_changed < 100: + if display_line and last_changed > 10: last_changed = 0 display_line = "" elif display_line: @@ -269,18 +268,15 @@ def _create_subprocess( c = _spawn_subprocess( cmd, env=env, block=block, cwd=cwd, combine_stderr=combine_stderr ) - except Exception as exc: + except Exception as exc: # pragma: no cover import traceback - formatted_tb = "".join( - traceback.format_exception(*sys.exc_info()) - ) # pragma: no cover - sys.stderr.write( # pragma: no cover - "Error while executing command %s:" - % to_native_string(" ".join(cmd._parts)) # pragma: no cover - ) # pragma: no cover - sys.stderr.write(formatted_tb) # pragma: no cover - raise exc # pragma: no cover + formatted_tb = "".join(traceback.format_exception(*sys.exc_info())) + sys.stderr.write( + "Error while executing command %s:" % to_native_string(" ".join(cmd._parts)) + ) + sys.stderr.write(formatted_tb) + raise exc if not block: c.stdin.close() spinner_orig_text = "" @@ -303,7 +299,7 @@ def _create_subprocess( else: try: c.out, c.err = c.communicate() - except (SystemExit, KeyboardInterrupt, TimeoutError): + except (SystemExit, KeyboardInterrupt, TimeoutError): # pragma: no cover c.terminate() c.out, c.err = c.communicate() raise @@ -858,7 +854,9 @@ def _isatty(stream): if colorama is not None: def _is_wrapped_for_color(stream): - return isinstance(stream, (colorama.AnsiToWin32, colorama.ansitowin32.StreamWrapper)) + return isinstance( + stream, (colorama.AnsiToWin32, colorama.ansitowin32.StreamWrapper) + ) def _wrap_for_color(stream, color=None): try: diff --git a/pipenv/vendor/vistir/path.py b/pipenv/vendor/vistir/path.py index d5b02f641b..2d6e80c6a8 100644 --- a/pipenv/vendor/vistir/path.py +++ b/pipenv/vendor/vistir/path.py @@ -356,7 +356,9 @@ def set_write_bit(fn): "/T", "/C", "/Q", - ], nospin=True, return_object=True + ], + nospin=True, + return_object=True, ) if not c.err and c.returncode == 0: return diff --git a/tasks/vendoring/__init__.py b/tasks/vendoring/__init__.py index 8329ba4e72..925d48a9be 100644 --- a/tasks/vendoring/__init__.py +++ b/tasks/vendoring/__init__.py @@ -416,7 +416,6 @@ def vendor(ctx, vendor_dir, package=None, rewrite=True): @invoke.task def redo_imports(ctx, library): - vendor_dir = _get_vendor_dir(ctx) log('Using vendor dir: %s' % vendor_dir) vendored_libs = detect_vendored_libs(vendor_dir) item = vendor_dir / library @@ -675,6 +674,16 @@ def update_pip_deps(ctx): download_licenses(ctx, vendor_dir) +@invoke.task +def download_all_licenses(ctx, include_pip=False): + vendor_dir = _get_vendor_dir(ctx) + patched_dir = _get_patched_dir(ctx) + download_licenses(ctx, vendor_dir) + download_licenses(ctx, patched_dir, "patched.txt") + if include_pip: + update_pip_deps(ctx) + + @invoke.task(name=TASK_NAME) def main(ctx, package=None): vendor_dir = _get_vendor_dir(ctx) @@ -689,14 +698,7 @@ def main(ctx, package=None): clean_vendor(ctx, patched_dir) vendor(ctx, vendor_dir) vendor(ctx, patched_dir, rewrite=True) - download_licenses(ctx, vendor_dir) - download_licenses(ctx, patched_dir, 'patched.txt') - for pip_dir in [patched_dir / 'notpip']: - _vendor_dir = pip_dir / '_vendor' - vendor_src_file = vendor_dir / 'vendor_pip.txt' - vendor_file = _vendor_dir / 'vendor.txt' - vendor_file.write_bytes(vendor_src_file.read_bytes()) - download_licenses(ctx, _vendor_dir) + download_all_licenses(ctx, include_pip=True) # from .vendor_passa import vendor_passa # log("Vendoring passa...") # vendor_passa(ctx) diff --git a/tasks/vendoring/patches/patched/_post-pip-update-pep425tags.patch b/tasks/vendoring/patches/patched/_post-pip-update-pep425tags.patch index b4ffbc9f27..792a94faf3 100644 --- a/tasks/vendoring/patches/patched/_post-pip-update-pep425tags.patch +++ b/tasks/vendoring/patches/patched/_post-pip-update-pep425tags.patch @@ -1,8 +1,19 @@ diff --git a/pipenv/patched/notpip/_internal/pep425tags.py b/pipenv/patched/notpip/_internal/pep425tags.py -index 3c760ca3..3b11b965 100644 +index 042ba34b..58decc23 100644 --- a/pipenv/patched/notpip/_internal/pep425tags.py +++ b/pipenv/patched/notpip/_internal/pep425tags.py -@@ -178,7 +178,7 @@ def is_manylinux1_compatible(): +@@ -170,8 +170,9 @@ def is_linux_armhf(): + return False + # hard-float ABI can be detected from the ELF header of the running + # process ++ sys_executable = os.environ.get('PIP_PYTHON_PATH', sys.executable) + try: +- with open(sys.executable, 'rb') as f: ++ with open(sys_executable, 'rb') as f: + elf_header_raw = f.read(40) # read 40 first bytes of ELF header + except (IOError, OSError, TypeError): + return False +@@ -205,7 +206,7 @@ def is_manylinux1_compatible(): pass # Check glibc version. CentOS 5 uses glibc 2.5. @@ -11,7 +22,7 @@ index 3c760ca3..3b11b965 100644 def is_manylinux2010_compatible(): -@@ -196,7 +196,7 @@ def is_manylinux2010_compatible(): +@@ -223,7 +224,7 @@ def is_manylinux2010_compatible(): pass # Check glibc version. CentOS 6 uses glibc 2.12. @@ -19,4 +30,13 @@ index 3c760ca3..3b11b965 100644 + return pipenv.patched.notpip._internal.utils.glibc.have_compatible_glibc(2, 12) + def is_manylinux2014_compatible(): +@@ -249,7 +250,7 @@ def is_manylinux2014_compatible(): + pass + + # Check glibc version. CentOS 7 uses glibc 2.17. +- return pip._internal.utils.glibc.have_compatible_glibc(2, 17) ++ return pipenv.patched.notpip._internal.utils.glibc.have_compatible_glibc(2, 17) + + def get_darwin_arches(major, minor, machine): diff --git a/tasks/vendoring/patches/patched/_post-pip-update-pypi-uri.patch b/tasks/vendoring/patches/patched/_post-pip-update-pypi-uri.patch deleted file mode 100644 index 93f7ccbc59..0000000000 --- a/tasks/vendoring/patches/patched/_post-pip-update-pypi-uri.patch +++ /dev/null @@ -1,44 +0,0 @@ -diff --git a/pipenv/patched/notpip/_vendor/distlib/index.py b/pipenv/patched/notpip/_vendor/distlib/index.py -index 2406be21..7a87cdcf 100644 ---- a/pipenv/patched/notpip/_vendor/distlib/index.py -+++ b/pipenv/patched/notpip/_vendor/distlib/index.py -@@ -22,7 +22,7 @@ from .util import cached_property, zip_dir, ServerProxy - - logger = logging.getLogger(__name__) - --DEFAULT_INDEX = 'https://pypi.python.org/pypi' -+DEFAULT_INDEX = 'https://pypi.org/pypi' - DEFAULT_REALM = 'pypi' - - class PackageIndex(object): -diff --git a/pipenv/patched/notpip/_vendor/distlib/locators.py b/pipenv/patched/notpip/_vendor/distlib/locators.py -index 5c655c3e..a7ed9469 100644 ---- a/pipenv/patched/notpip/_vendor/distlib/locators.py -+++ b/pipenv/patched/notpip/_vendor/distlib/locators.py -@@ -36,7 +36,7 @@ logger = logging.getLogger(__name__) - HASHER_HASH = re.compile(r'^(\w+)=([a-f0-9]+)') - CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I) - HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml') --DEFAULT_INDEX = 'https://pypi.python.org/pypi' -+DEFAULT_INDEX = 'https://pypi.org/pypi' - - def get_all_distribution_names(url=None): - """ -@@ -197,7 +197,7 @@ class Locator(object): - is_downloadable = basename.endswith(self.downloadable_extensions) - if is_wheel: - compatible = is_compatible(Wheel(basename), self.wheel_tags) -- return (t.scheme == 'https', 'pypi.python.org' in t.netloc, -+ return (t.scheme == 'https', 'pypi.org' in t.netloc, - is_downloadable, is_wheel, compatible, basename) - - def prefer_url(self, url1, url2): -@@ -1049,7 +1049,7 @@ class AggregatingLocator(Locator): - # versions which don't conform to PEP 426 / PEP 440. - default_locator = AggregatingLocator( - JSONLocator(), -- SimpleScrapingLocator('https://pypi.python.org/simple/', -+ SimpleScrapingLocator('https://pypi.org/simple/', - timeout=3.0), - scheme='legacy') - diff --git a/tasks/vendoring/patches/patched/_post-pip-update-requests-imports.patch b/tasks/vendoring/patches/patched/_post-pip-update-requests-imports.patch index 79e126599e..f916579cec 100644 --- a/tasks/vendoring/patches/patched/_post-pip-update-requests-imports.patch +++ b/tasks/vendoring/patches/patched/_post-pip-update-requests-imports.patch @@ -1,5 +1,5 @@ diff --git a/pipenv/patched/notpip/_vendor/requests/packages.py b/pipenv/patched/notpip/_vendor/requests/packages.py -index 9582fa73..258c89ed 100644 +index 9582fa73..5fb6f07d 100644 --- a/pipenv/patched/notpip/_vendor/requests/packages.py +++ b/pipenv/patched/notpip/_vendor/requests/packages.py @@ -4,13 +4,13 @@ import sys @@ -7,7 +7,7 @@ index 9582fa73..258c89ed 100644 for package in ('urllib3', 'idna', 'chardet'): - vendored_package = "pip._vendor." + package -+ vendored_package = "notpip._vendor." + package ++ vendored_package = "pipenv.patched.notpip._vendor." + package locals()[package] = __import__(vendored_package) # This traversal is apparently necessary such that the identities are # preserved (requests.packages.urllib3.* is urllib3.*) @@ -15,7 +15,7 @@ index 9582fa73..258c89ed 100644 if mod == vendored_package or mod.startswith(vendored_package + '.'): - unprefixed_mod = mod[len("pip._vendor."):] - sys.modules['pip._vendor.requests.packages.' + unprefixed_mod] = sys.modules[mod] -+ unprefixed_mod = mod[len("notpip._vendor."):] -+ sys.modules['notpip._vendor.requests.packages.' + unprefixed_mod] = sys.modules[mod] ++ unprefixed_mod = mod[len("pipenv.patched.notpip._vendor."):] ++ sys.modules['pipenv.patched.notpip._vendor.requests.packages.' + unprefixed_mod] = sys.modules[mod] # Kinda cool, though, right? diff --git a/tasks/vendoring/patches/patched/pip19.patch b/tasks/vendoring/patches/patched/pip19.patch index 74b98f50c7..c40379f5bb 100644 --- a/tasks/vendoring/patches/patched/pip19.patch +++ b/tasks/vendoring/patches/patched/pip19.patch @@ -1,78 +1,119 @@ -diff --git a/pipenv/patched/pip/_internal/download.py b/pipenv/patched/pip/_internal/download.py -index 2bbe1762..872af328 100644 ---- a/pipenv/patched/pip/_internal/download.py -+++ b/pipenv/patched/pip/_internal/download.py -@@ -77,7 +77,7 @@ def user_agent(): - Return a string representing the user agent. - """ - data = { -- "installer": {"name": "pip", "version": pip.__version__}, -+ "installer": {"name": "pip", "version": pipenv.patched.notpip.__version__}, - "python": platform.python_version(), - "implementation": { - "name": platform.python_implementation(), +diff --git a/pipenv/patched/pip/_internal/build_env.py b/pipenv/patched/pip/_internal/build_env.py +index 5e6dc460..0412f635 100644 +--- a/pipenv/patched/pip/_internal/build_env.py ++++ b/pipenv/patched/pip/_internal/build_env.py +@@ -169,8 +169,9 @@ class BuildEnvironment(object): + prefix.setup = True + if not requirements: + return ++ sys_executable = os.environ.get('PIP_PYTHON_PATH', sys.executable) + args = [ +- sys.executable, os.path.dirname(pip_location), 'install', ++ sys_executable, os.path.dirname(pip_location), 'install', + '--ignore-installed', '--no-user', '--prefix', prefix.path, + '--no-warn-script-location', + ] # type: List[str] +diff --git a/pipenv/patched/pip/_internal/commands/install.py b/pipenv/patched/pip/_internal/commands/install.py +index 5842d18d..4e56d0bb 100644 +--- a/pipenv/patched/pip/_internal/commands/install.py ++++ b/pipenv/patched/pip/_internal/commands/install.py +@@ -388,7 +388,7 @@ class InstallCommand(RequirementCommand): + else: + # If we're not replacing an already installed pip, + # we're not modifying it. +- modifying_pip = pip_req.satisfied_by is None ++ modifying_pip = getattr(pip_req, "satisfied_by", None) is None + protect_pip_from_modification_on_windows( + modifying_pip=modifying_pip + ) diff --git a/pipenv/patched/pip/_internal/index.py b/pipenv/patched/pip/_internal/index.py -index 9eda3a35..67dd952c 100644 +index 897444aa..4c61043c 100644 --- a/pipenv/patched/pip/_internal/index.py +++ b/pipenv/patched/pip/_internal/index.py -@@ -331,6 +331,9 @@ class PackageFinder(object): - # The Session we'll use to make requests - self.session = session - -+ # Kenneth's Hack -+ self.extra = None -+ - # The valid tags to check potential found wheel candidates against - self.valid_tags = get_supported( - versions=versions, -@@ -369,6 +372,23 @@ class PackageFinder(object): - ) - return "\n".join(lines) - -+ @staticmethod -+ def get_extras_links(links): -+ requires = [] -+ extras = {} -+ -+ current_list = requires -+ -+ for link in links: -+ if not link: -+ current_list = requires -+ if link.startswith('['): -+ current_list = [] -+ extras[link[1:-1]] = current_list -+ else: -+ current_list.append(link) -+ return extras -+ - @staticmethod - def _sort_locations(locations, expand_dir=False): - # type: (Sequence[str], bool) -> Tuple[List[str], List[str]] -@@ -427,8 +447,8 @@ class PackageFinder(object): - - return files, urls +@@ -119,6 +119,7 @@ class LinkEvaluator(object): + target_python, # type: TargetPython + allow_yanked, # type: bool + ignore_requires_python=None, # type: Optional[bool] ++ ignore_compatibility=None, # type: Optional[bool] + ): + # type: (...) -> None + """ +@@ -137,15 +138,20 @@ class LinkEvaluator(object): + :param ignore_requires_python: Whether to ignore incompatible + PEP 503 "data-requires-python" values in HTML links. Defaults + to False. ++ :param Optional[bool] ignore_compatibility: Whether to ignore ++ compatibility of python versions and allow all versions of packages. + """ + if ignore_requires_python is None: + ignore_requires_python = False ++ if ignore_compatibility is None: ++ ignore_compatibility = True + + self._allow_yanked = allow_yanked + self._canonical_name = canonical_name + self._ignore_requires_python = ignore_requires_python + self._formats = formats + self._target_python = target_python ++ self._ignore_compatibility = ignore_compatibility + + self.project_name = project_name + +@@ -176,10 +182,10 @@ class LinkEvaluator(object): + return (False, 'not a file') + if ext not in SUPPORTED_EXTENSIONS: + return (False, 'unsupported archive format: %s' % ext) +- if "binary" not in self._formats and ext == WHEEL_EXTENSION: ++ if "binary" not in self._formats and ext == WHEEL_EXTENSION and not self._ignore_compatibility: + reason = 'No binaries permitted for %s' % self.project_name + return (False, reason) +- if "macosx10" in link.path and ext == '.zip': ++ if "macosx10" in link.path and ext == '.zip' and not self._ignore_compatibility: + return (False, 'macosx10 one') + if ext == WHEEL_EXTENSION: + try: +@@ -191,7 +197,7 @@ class LinkEvaluator(object): + return (False, reason) + + supported_tags = self._target_python.get_tags() +- if not wheel.supported(supported_tags): ++ if not wheel.supported(supported_tags) and not self._ignore_compatibility: + # Include the wheel's tags in the reason string to + # simplify troubleshooting compatibility issues. + file_tags = wheel.get_formatted_file_tags() +@@ -228,7 +234,7 @@ class LinkEvaluator(object): + link, version_info=self._target_python.py_version_info, + ignore_requires_python=self._ignore_requires_python, + ) +- if not supports_python: ++ if not supports_python and not self._ignore_compatibility: + # Return None for the reason text to suppress calling + # _log_skipped_link(). + return (False, None) +@@ -479,8 +485,8 @@ class CandidateEvaluator(object): + project_name=self._project_name, + ) -- def _candidate_sort_key(self, candidate): +- def _sort_key(self, candidate): - # type: (InstallationCandidate) -> CandidateSortingKey -+ def _candidate_sort_key(self, candidate, ignore_compatibility=True): ++ def _sort_key(self, candidate, ignore_compatibility=True): + # type: (InstallationCandidate, bool) -> CandidateSortingKey """ - Function used to generate link sort key for link tuples. - The greater the return value, the more preferred it is. -@@ -448,14 +468,18 @@ class PackageFinder(object): - if candidate.location.is_wheel: + Function to pass as the `key` argument to a call to sorted() to sort + InstallationCandidates by preference. +@@ -518,14 +524,18 @@ class CandidateEvaluator(object): + if link.is_wheel: # can raise InvalidWheelFilename - wheel = Wheel(candidate.location.filename) -- if not wheel.supported(self.valid_tags): -+ if not wheel.supported(self.valid_tags) and not ignore_compatibility: + wheel = Wheel(link.filename) +- if not wheel.supported(valid_tags): ++ if not wheel.supported(valid_tags) and not ignore_compatibility: raise UnsupportedWheel( "%s is not a supported wheel for this platform. It " "can't be sorted." % wheel.filename ) - if self.prefer_binary: + if self._prefer_binary: binary_preference = 1 -- pri = -(wheel.support_index_min(self.valid_tags)) +- pri = -(wheel.support_index_min(valid_tags)) + tags = self.valid_tags if not ignore_compatibility else None + try: + pri = -(wheel.support_index_min(tags=tags)) @@ -81,286 +122,144 @@ index 9eda3a35..67dd952c 100644 if wheel.build_tag is not None: match = re.match(r'^(\d+)(.*)$', wheel.build_tag) build_tag_groups = match.groups() -@@ -608,7 +632,10 @@ class PackageFinder(object): - - page_versions = [] - for page in self._get_pages(url_locations, project_name): -- logger.debug('Analyzing links from page %s', page.url) -+ try: -+ logger.debug('Analyzing links from page %s', page.url) -+ except AttributeError: -+ continue - with indent_log(): - page_versions.extend( - self._package_versions(page.iter_links(), search) -@@ -628,8 +655,8 @@ class PackageFinder(object): - # This is an intentional priority ordering - return file_versions + find_links_versions + page_versions - -- def find_requirement(self, req, upgrade): -- # type: (InstallRequirement, bool) -> Optional[Link] -+ def find_requirement(self, req, upgrade, ignore_compatibility=False): -+ # type: (InstallRequirement, bool, bool) -> Optional[Link] - """Try to find a Link matching req - - Expects req, an InstallRequirement and upgrade, a boolean -@@ -784,8 +811,8 @@ class PackageFinder(object): - logger.debug('Skipping link %s; %s', link, reason) - self.logged_links.add(link) - -- def _link_package_versions(self, link, search): -- # type: (Link, Search) -> Optional[InstallationCandidate] -+ def _link_package_versions(self, link, search, ignore_compatibility=True): -+ # type: (Link, Search, bool) -> Optional[InstallationCandidate] - """Return an InstallationCandidate or None""" - version = None - if link.egg_fragment: -@@ -801,12 +828,12 @@ class PackageFinder(object): - link, 'unsupported archive format: %s' % ext, - ) - return None -- if "binary" not in search.formats and ext == WHEEL_EXTENSION: -+ if "binary" not in search.formats and ext == WHEEL_EXTENSION and not ignore_compatibility: - self._log_skipped_link( - link, 'No binaries permitted for %s' % search.supplied, - ) - return None -- if "macosx10" in link.path and ext == '.zip': -+ if "macosx10" in link.path and ext == '.zip' and not ignore_compatibility: - self._log_skipped_link(link, 'macosx10 one') - return None - if ext == WHEEL_EXTENSION: -@@ -820,7 +847,7 @@ class PackageFinder(object): - link, 'wrong project name (not %s)' % search.supplied) - return None - -- if not wheel.supported(self.valid_tags): -+ if not wheel.supported(self.valid_tags) and not ignore_compatibility: - self._log_skipped_link( - link, 'it is not compatible with this Python') - return None -@@ -856,14 +883,14 @@ class PackageFinder(object): - link.filename, link.requires_python) - support_this_python = True - -- if not support_this_python: -+ if not support_this_python and not ignore_compatibility: - logger.debug("The package %s is incompatible with the python " - "version in use. Acceptable python versions are: %s", - link, link.requires_python) - return None - logger.debug('Found link %s, version: %s', link, version) - -- return InstallationCandidate(search.supplied, version, link) -+ return InstallationCandidate(search.supplied, version, link, link.requires_python) - - - def _find_name_version_sep(egg_info, canonical_name): -diff --git a/pipenv/patched/pip/_internal/models/candidate.py b/pipenv/patched/pip/_internal/models/candidate.py -index 4475458a..6748957d 100644 ---- a/pipenv/patched/pip/_internal/models/candidate.py -+++ b/pipenv/patched/pip/_internal/models/candidate.py -@@ -13,11 +13,12 @@ class InstallationCandidate(KeyBasedCompareMixin): - """Represents a potential "candidate" for installation. - """ +@@ -603,6 +613,7 @@ class PackageFinder(object): + format_control=None, # type: Optional[FormatControl] + candidate_prefs=None, # type: CandidatePreferences + ignore_requires_python=None, # type: Optional[bool] ++ ignore_compatibility=None, # type: Optional[bool] + ): + # type: (...) -> None + """ +@@ -617,6 +628,8 @@ class PackageFinder(object): + """ + if candidate_prefs is None: + candidate_prefs = CandidatePreferences() ++ if ignore_compatibility is None: ++ ignore_compatibility = False -- def __init__(self, project, version, location): -- # type: (Any, str, Link) -> None -+ def __init__(self, project, version, location, requires_python=None): -+ # type: (Any, str, Link, Any) -> None - self.project = project - self.version = parse_version(version) # type: _BaseVersion - self.location = location -+ self.requires_python = requires_python + format_control = format_control or FormatControl(set(), set()) - super(InstallationCandidate, self).__init__( - key=(self.project, self.version, self.location), -diff --git a/pipenv/patched/pip/_internal/operations/prepare.py b/pipenv/patched/pip/_internal/operations/prepare.py -index 4f31dd5a..ed0c86b2 100644 ---- a/pipenv/patched/pip/_internal/operations/prepare.py -+++ b/pipenv/patched/pip/_internal/operations/prepare.py -@@ -17,7 +17,7 @@ from pip._internal.exceptions import ( - from pip._internal.utils.compat import expanduser - from pip._internal.utils.hashes import MissingHashes - from pip._internal.utils.logging import indent_log --from pip._internal.utils.misc import display_path, normalize_path -+from pip._internal.utils.misc import display_path, normalize_path, rmtree - from pip._internal.utils.typing import MYPY_CHECK_RUNNING - from pip._internal.vcs import vcs +@@ -625,12 +638,16 @@ class PackageFinder(object): + self._ignore_requires_python = ignore_requires_python + self._link_collector = link_collector + self._target_python = target_python ++ self._ignore_compatibility = ignore_compatibility -@@ -258,14 +258,7 @@ class RequirementPreparer(object): - # package unpacked in `req.source_dir` - # package unpacked in `req.source_dir` - if os.path.exists(os.path.join(req.source_dir, 'setup.py')): -- raise PreviousBuildDirError( -- "pip can't proceed with requirements '%s' due to a" -- " pre-existing build directory (%s). This is " -- "likely due to a previous installation that failed" -- ". pip is being responsible and not assuming it " -- "can delete this. Please delete it and try again." -- % (req, req.source_dir) -- ) -+ rmtree(req.source_dir) - req.populate_link(finder, upgrade_allowed, require_hashes) - - # We can't hit this spot and have populate_link return None. -diff --git a/pipenv/patched/pip/_internal/pep425tags.py b/pipenv/patched/pip/_internal/pep425tags.py -index 1e782d1a..3c760ca3 100644 ---- a/pipenv/patched/pip/_internal/pep425tags.py -+++ b/pipenv/patched/pip/_internal/pep425tags.py -@@ -10,7 +10,10 @@ import sysconfig - import warnings - from collections import OrderedDict - --import pip._internal.utils.glibc -+try: -+ import pip._internal.utils.glibc -+except ImportError: -+ import pip.utils.glibc - from pip._internal.utils.compat import get_extension_suffixes - from pip._internal.utils.typing import MYPY_CHECK_RUNNING + self.format_control = format_control -diff --git a/pipenv/patched/pip/_internal/req/req_install.py b/pipenv/patched/pip/_internal/req/req_install.py -index a4834b00..2c22e141 100644 ---- a/pipenv/patched/pip/_internal/req/req_install.py -+++ b/pipenv/patched/pip/_internal/req/req_install.py -@@ -588,7 +588,8 @@ class InstallRequirement(object): - self.setup_py, self.link, - ) - script = SETUPTOOLS_SHIM % self.setup_py -- base_cmd = [sys.executable, '-c', script] -+ sys_executable = os.environ.get('PIP_PYTHON_PATH', sys.executable) -+ base_cmd = [sys_executable, '-c', script] - if self.isolated: - base_cmd += ["--no-user-cfg"] - egg_info_cmd = base_cmd + ['egg_info'] -@@ -746,9 +747,10 @@ class InstallRequirement(object): - with indent_log(): - # FIXME: should we do --install-headers here too? - with self.build_env: -+ sys_executable = os.environ.get('PIP_PYTHON_PATH', sys.executable) - call_subprocess( - [ -- sys.executable, -+ sys_executable, - '-c', - SETUPTOOLS_SHIM % self.setup_py - ] + -@@ -995,7 +997,8 @@ class InstallRequirement(object): - pycompile # type: bool - ): - # type: (...) -> List[str] -- install_args = [sys.executable, "-u"] -+ sys_executable = os.environ.get('PIP_PYTHON_PATH', sys.executable) -+ install_args = [sys_executable, "-u"] - install_args.append('-c') - install_args.append(SETUPTOOLS_SHIM % self.setup_py) - install_args += list(global_options) + \ -diff --git a/pipenv/patched/pip/_internal/req/req_set.py b/pipenv/patched/pip/_internal/req/req_set.py -index d1410e93..69a53bf2 100644 ---- a/pipenv/patched/pip/_internal/req/req_set.py -+++ b/pipenv/patched/pip/_internal/req/req_set.py -@@ -18,7 +18,7 @@ logger = logging.getLogger(__name__) + # These are boring links that have already been logged somehow. + self._logged_links = set() # type: Set[Link] - class RequirementSet(object): ++ # Kenneth's Hack ++ self.extra = None ++ + # Don't include an allow_yanked default value to make sure each call + # site considers whether yanked releases are allowed. This also causes + # that decision to be made explicit in the calling code, which helps +@@ -668,6 +685,23 @@ class PackageFinder(object): + ignore_requires_python=selection_prefs.ignore_requires_python, + ) -- def __init__(self, require_hashes=False, check_supported_wheels=True): -+ def __init__(self, require_hashes=False, check_supported_wheels=True, ignore_compatibility=True): - # type: (bool, bool) -> None - """Create a RequirementSet. - """ -@@ -26,6 +26,9 @@ class RequirementSet(object): - self.requirements = OrderedDict() # type: Dict[str, InstallRequirement] # noqa: E501 - self.require_hashes = require_hashes - self.check_supported_wheels = check_supported_wheels -+ if ignore_compatibility: -+ self.check_supported_wheels = False -+ self.ignore_compatibility = (check_supported_wheels is False or ignore_compatibility is True) ++ @staticmethod ++ def get_extras_links(links): ++ requires = [] ++ extras = {} ++ ++ current_list = requires ++ ++ for link in links: ++ if not link: ++ current_list = requires ++ if link.startswith('['): ++ current_list = [] ++ extras[link[1:-1]] = current_list ++ else: ++ current_list.append(link) ++ return extras ++ + @property + def search_scope(self): + # type: () -> SearchScope +@@ -715,6 +749,7 @@ class PackageFinder(object): + target_python=self._target_python, + allow_yanked=self._allow_yanked, + ignore_requires_python=self._ignore_requires_python, ++ ignore_compatibility=self._ignore_compatibility + ) - # Mapping of alias: real_name - self.requirement_aliases = {} # type: Dict[str, str] -@@ -186,7 +189,7 @@ class RequirementSet(object): - return self.requirements[name] - if name in self.requirement_aliases: - return self.requirements[self.requirement_aliases[name]] -- raise KeyError("No project with the name %r" % project_name) -+ pass + def _sort_links(self, links): +@@ -763,6 +798,7 @@ class PackageFinder(object): + # Convert the Text result to str since InstallationCandidate + # accepts str. + version=str(result), ++ requires_python=getattr(link, "requires_python", None) + ) - def cleanup_files(self): - # type: () -> None -diff --git a/pipenv/patched/pip/_internal/resolve.py b/pipenv/patched/pip/_internal/resolve.py -index 33f572f1..dfe149ad 100644 ---- a/pipenv/patched/pip/_internal/resolve.py -+++ b/pipenv/patched/pip/_internal/resolve.py -@@ -19,6 +19,7 @@ from pip._internal.exceptions import ( - UnsupportedPythonVersion, - ) - from pip._internal.req.constructors import install_req_from_req_string -+from pip._internal.req.req_install import InstallRequirement - from pip._internal.utils.logging import indent_log - from pip._internal.utils.misc import dist_in_usersite, ensure_dir - from pip._internal.utils.packaging import check_dist_requires_python -@@ -58,7 +59,8 @@ class Resolver(object): + def evaluate_links(self, link_evaluator, links): +diff --git a/pipenv/patched/pip/_internal/legacy_resolve.py b/pipenv/patched/pip/_internal/legacy_resolve.py +index c24158f4..37c3197f 100644 +--- a/pipenv/patched/pip/_internal/legacy_resolve.py ++++ b/pipenv/patched/pip/_internal/legacy_resolve.py +@@ -126,6 +126,7 @@ class Resolver(object): force_reinstall, # type: bool - isolated, # type: bool upgrade_strategy, # type: str -- use_pep517=None # type: Optional[bool] -+ use_pep517=None, # type: Optional[bool] + py_version_info=None, # type: Optional[Tuple[int, ...]] + ignore_compatibility=False, # type: bool ): # type: (...) -> None super(Resolver, self).__init__() -@@ -81,8 +83,12 @@ class Resolver(object): - self.ignore_dependencies = ignore_dependencies - self.ignore_installed = ignore_installed +@@ -152,6 +153,10 @@ class Resolver(object): self.ignore_requires_python = ignore_requires_python -+ self.ignore_compatibility = ignore_compatibility self.use_user_site = use_user_site - self.use_pep517 = use_pep517 + self._make_install_req = make_install_req ++ self.ignore_compatibility = ignore_compatibility + self.requires_python = None + if self.ignore_compatibility: + self.ignore_requires_python = True self._discovered_dependencies = \ defaultdict(list) # type: DefaultDict[str, List] -@@ -273,7 +279,8 @@ class Resolver(object): +@@ -344,7 +349,8 @@ class Resolver(object): def _resolve_one( self, requirement_set, # type: RequirementSet - req_to_install # type: InstallRequirement + req_to_install, # type: InstallRequirement -+ ignore_requires_python=False # type: bool ++ ignore_requires_python=False, # type: bool ): # type: (...) -> List[InstallRequirement] """Prepare a single requirements file. -@@ -298,11 +305,18 @@ class Resolver(object): - try: - check_dist_requires_python(dist) - except UnsupportedPythonVersion as err: -- if self.ignore_requires_python: -+ if self.ignore_requires_python or ignore_requires_python or self.ignore_compatibility: - logger.warning(err.args[0]) - else: - raise - -+ # A huge hack, by Kenneth Reitz. +@@ -368,11 +374,21 @@ class Resolver(object): + dist = abstract_dist.get_pkg_resources_distribution() + # This will raise UnsupportedPythonVersion if the given Python + # version isn't compatible with the distribution's Requires-Python. ++ ignore_requires_python = ( ++ ignore_requires_python or self.ignore_requires_python or ++ self.ignore_compatibility ++ ) + _check_dist_requires_python( + dist, version_info=self._py_version_info, +- ignore_requires_python=self.ignore_requires_python, ++ ignore_requires_python=ignore_requires_python, + ) +- ++ # Patched in - lets get the python version on here then ++ # FIXME: Does this patch even work? it puts the python version ++ # on the resolver... why? + try: -+ self.requires_python = check_dist_requires_python(dist, absorb=False) ++ self.requires_python = get_requires_python(dist) + except TypeError: + self.requires_python = None -+ -+ more_reqs = [] # type: List[InstallRequirement] def add_req(subreq, extras_requested): -@@ -329,10 +343,14 @@ class Resolver(object): - # We add req_to_install before its dependencies, so that we +@@ -397,9 +413,13 @@ class Resolver(object): # can refer to it when adding dependencies. if not requirement_set.has_requirement(req_to_install.name): + # 'unnamed' requirements will get added here + available_requested = sorted( + set(dist.extras) & set(req_to_install.extras) + ) - # 'unnamed' requirements will get added here req_to_install.is_direct = True requirement_set.add_requirement( req_to_install, parent_req_name=None, @@ -368,7 +267,7 @@ index 33f572f1..dfe149ad 100644 ) if not self.ignore_dependencies: -@@ -356,6 +374,20 @@ class Resolver(object): +@@ -423,6 +443,17 @@ class Resolver(object): for subreq in dist.requires(available_requested): add_req(subreq, extras_requested=available_requested) @@ -376,12 +275,9 @@ index 33f572f1..dfe149ad 100644 + for available in available_requested: + if hasattr(dist, '_DistInfoDistribution__dep_map'): + for req in dist._DistInfoDistribution__dep_map[available]: -+ req = InstallRequirement( ++ req = self._make_install_req( + req, -+ req_to_install, -+ isolated=self.isolated, -+ wheel_cache=self.wheel_cache, -+ use_pep517=None ++ req_to_install + ) + + more_reqs.append(req) @@ -389,77 +285,212 @@ index 33f572f1..dfe149ad 100644 if not req_to_install.editable and not req_to_install.satisfied_by: # XXX: --no-install leads this to report 'Successfully # downloaded' for only non-editable reqs, even though we took +diff --git a/pipenv/patched/pip/_internal/models/candidate.py b/pipenv/patched/pip/_internal/models/candidate.py +index 4d49604d..cdfe65aa 100644 +--- a/pipenv/patched/pip/_internal/models/candidate.py ++++ b/pipenv/patched/pip/_internal/models/candidate.py +@@ -16,11 +16,12 @@ class InstallationCandidate(KeyBasedCompareMixin): + """Represents a potential "candidate" for installation. + """ + +- def __init__(self, project, version, link): +- # type: (Any, str, Link) -> None ++ def __init__(self, project, version, link, requires_python=None): ++ # type: (Any, str, Link, Any) -> None + self.project = project + self.version = parse_version(version) # type: _BaseVersion + self.link = link ++ self.requires_python = requires_python + + super(InstallationCandidate, self).__init__( + key=(self.project, self.version, self.link), +diff --git a/pipenv/patched/pip/_internal/operations/prepare.py b/pipenv/patched/pip/_internal/operations/prepare.py +index d0930458..91527ae8 100644 +--- a/pipenv/patched/pip/_internal/operations/prepare.py ++++ b/pipenv/patched/pip/_internal/operations/prepare.py +@@ -140,14 +140,7 @@ class RequirementPreparer(object): + # FIXME: this won't upgrade when there's an existing + # package unpacked in `req.source_dir` + if os.path.exists(os.path.join(req.source_dir, 'setup.py')): +- raise PreviousBuildDirError( +- "pip can't proceed with requirements '%s' due to a" +- " pre-existing build directory (%s). This is " +- "likely due to a previous installation that failed" +- ". pip is being responsible and not assuming it " +- "can delete this. Please delete it and try again." +- % (req, req.source_dir) +- ) ++ rmtree(req.source_dir) + + # Now that we have the real link, we can tell what kind of + # requirements we have and raise some more informative errors +diff --git a/pipenv/patched/pip/_internal/req/req_set.py b/pipenv/patched/pip/_internal/req/req_set.py +index b34a2bb1..afcd2e4f 100644 +--- a/pipenv/patched/pip/_internal/req/req_set.py ++++ b/pipenv/patched/pip/_internal/req/req_set.py +@@ -24,8 +24,8 @@ logger = logging.getLogger(__name__) + + class RequirementSet(object): + +- def __init__(self, require_hashes=False, check_supported_wheels=True): +- # type: (bool, bool) -> None ++ def __init__(self, require_hashes=False, check_supported_wheels=True, ignore_compatibility=True): ++ # type: (bool) -> None + """Create a RequirementSet. + """ + +@@ -36,6 +36,9 @@ class RequirementSet(object): + self.unnamed_requirements = [] # type: List[InstallRequirement] + self.successfully_downloaded = [] # type: List[InstallRequirement] + self.reqs_to_cleanup = [] # type: List[InstallRequirement] ++ if ignore_compatibility: ++ self.check_supported_wheels = False ++ self.ignore_compatibility = (check_supported_wheels is False or ignore_compatibility is True) + + def __str__(self): + # type: () -> str +@@ -199,7 +202,7 @@ class RequirementSet(object): + if project_name in self.requirements: + return self.requirements[project_name] + +- raise KeyError("No project with the name %r" % name) ++ pass + + def cleanup_files(self): + # type: () -> None +diff --git a/pipenv/patched/pip/_internal/utils/logging.py b/pipenv/patched/pip/_internal/utils/logging.py +index 7767111a..52738e16 100644 +--- a/pipenv/patched/pip/_internal/utils/logging.py ++++ b/pipenv/patched/pip/_internal/utils/logging.py +@@ -314,8 +314,8 @@ def setup_logging(verbosity, no_color, user_log_file): + "stderr": "ext://sys.stderr", + } + handler_classes = { +- "stream": "pip._internal.utils.logging.ColorizedStreamHandler", +- "file": "pip._internal.utils.logging.BetterRotatingFileHandler", ++ "stream": "pipenv.patched.notpip._internal.utils.logging.ColorizedStreamHandler", ++ "file": "pipenv.patched.notpip._internal.utils.logging.BetterRotatingFileHandler", + } + handlers = ["console", "console_errors", "console_subprocess"] + ( + ["user_log"] if include_user_log else [] +@@ -326,7 +326,7 @@ def setup_logging(verbosity, no_color, user_log_file): + "disable_existing_loggers": False, + "filters": { + "exclude_warnings": { +- "()": "pip._internal.utils.logging.MaxLevelFilter", ++ "()": "pipenv.patched.notpip._internal.utils.logging.MaxLevelFilter", + "level": logging.WARNING, + }, + "restrict_to_subprocess": { +@@ -334,7 +334,7 @@ def setup_logging(verbosity, no_color, user_log_file): + "name": subprocess_logger.name, + }, + "exclude_subprocess": { +- "()": "pip._internal.utils.logging.ExcludeLoggerFilter", ++ "()": "pipenv.patched.notpip._internal.utils.logging.ExcludeLoggerFilter", + "name": subprocess_logger.name, + }, + }, +diff --git a/pipenv/patched/pip/_internal/utils/misc.py b/pipenv/patched/pip/_internal/utils/misc.py +index b8482635..2fae4e08 100644 +--- a/pipenv/patched/pip/_internal/utils/misc.py ++++ b/pipenv/patched/pip/_internal/utils/misc.py +@@ -136,8 +136,8 @@ def get_prog(): + @retry(stop_max_delay=3000, wait_fixed=500) + def rmtree(dir, ignore_errors=False): + # type: (str, bool) -> None +- shutil.rmtree(dir, ignore_errors=ignore_errors, +- onerror=rmtree_errorhandler) ++ from pipenv.vendor.vistir.path import rmtree as vistir_rmtree, handle_remove_readonly ++ vistir_rmtree(dir, onerror=handle_remove_readonly, ignore_errors=ignore_errors) + + + def rmtree_errorhandler(func, path, exc_info): diff --git a/pipenv/patched/pip/_internal/utils/packaging.py b/pipenv/patched/pip/_internal/utils/packaging.py -index 7aaf7b5e..d56f0512 100644 +index 68aa86ed..8577d387 100644 --- a/pipenv/patched/pip/_internal/utils/packaging.py +++ b/pipenv/patched/pip/_internal/utils/packaging.py -@@ -37,7 +37,7 @@ def check_requires_python(requires_python): +@@ -1,6 +1,7 @@ + from __future__ import absolute_import + + import logging ++import sys + from email.parser import FeedParser + + from pip._vendor import pkg_resources +@@ -37,7 +38,7 @@ def check_requires_python(requires_python, version_info): + return True requires_python_specifier = specifiers.SpecifierSet(requires_python) - # We only use major.minor.micro -- python_version = version.parse('.'.join(map(str, sys.version_info[:3]))) +- python_version = version.parse('.'.join(map(str, version_info))) + python_version = version.parse('{0}.{1}.{2}'.format(*sys.version_info[:3])) return python_version in requires_python_specifier -@@ -57,9 +57,11 @@ def get_metadata(dist): - return feed_parser.close() - +diff --git a/pipenv/patched/pip/_internal/utils/setuptools_build.py b/pipenv/patched/pip/_internal/utils/setuptools_build.py +index 12d866e0..28649a4d 100644 +--- a/pipenv/patched/pip/_internal/utils/setuptools_build.py ++++ b/pipenv/patched/pip/_internal/utils/setuptools_build.py +@@ -1,3 +1,4 @@ ++import os + import sys --def check_dist_requires_python(dist): -+def check_dist_requires_python(dist, absorb=False): - pkg_info_dict = get_metadata(dist) - requires_python = pkg_info_dict.get('Requires-Python') -+ if absorb: -+ return requires_python - try: - if not check_requires_python(requires_python): - raise exceptions.UnsupportedPythonVersion( + from pip._internal.utils.typing import MYPY_CHECK_RUNNING +@@ -36,7 +37,8 @@ def make_setuptools_shim_args( + :param unbuffered_output: If True, adds the unbuffered switch to the + argument list. + """ +- args = [sys.executable] ++ sys_executable = os.environ.get('PIP_PYTHON_PATH', sys.executable) ++ args = [sys_executable] + if unbuffered_output: + args.append('-u') + args.extend(['-c', _SETUPTOOLS_SHIM.format(setup_py_path)]) diff --git a/pipenv/patched/pip/_internal/utils/temp_dir.py b/pipenv/patched/pip/_internal/utils/temp_dir.py -index 2c81ad55..ff2ccc5a 100644 +index 77d40be6..8a32cf2d 100644 --- a/pipenv/patched/pip/_internal/utils/temp_dir.py +++ b/pipenv/patched/pip/_internal/utils/temp_dir.py -@@ -5,8 +5,10 @@ import itertools +@@ -8,9 +8,11 @@ import itertools import logging import os.path import tempfile +import warnings from pip._internal.utils.misc import rmtree + from pip._internal.utils.typing import MYPY_CHECK_RUNNING +from pipenv.vendor.vistir.compat import finalize, ResourceWarning - logger = logging.getLogger(__name__) - -@@ -47,6 +49,20 @@ class TempDirectory(object): - self.path = path + if MYPY_CHECK_RUNNING: + from typing import Optional +@@ -60,6 +62,20 @@ class TempDirectory(object): + self._deleted = False self.delete = delete self.kind = kind + self._finalizer = None -+ if path: ++ if self._path: + self._register_finalizer() + + def _register_finalizer(self): -+ if self.delete and self.path: ++ if self.delete and self._path: + self._finalizer = finalize( + self, + self._cleanup, -+ self.path, ++ self._path, + warn_message = None + ) + else: + self._finalizer = None - def __repr__(self): - return "<{} {!r}>".format(self.__class__.__name__, self.path) -@@ -74,14 +90,30 @@ class TempDirectory(object): - self.path = os.path.realpath( - tempfile.mkdtemp(prefix="pip-{}-".format(self.kind)) - ) -+ self._register_finalizer() - logger.debug("Created temporary directory: {}".format(self.path)) + @property + def path(self): +@@ -92,12 +108,28 @@ class TempDirectory(object): + logger.debug("Created temporary directory: {}".format(path)) + return path + @classmethod + def _cleanup(cls, name, warn_message=None): ++ if not os.path.exists(name): ++ return + try: + rmtree(name) + except OSError: @@ -471,72 +502,23 @@ index 2c81ad55..ff2ccc5a 100644 def cleanup(self): """Remove the temporary directory created and reset state """ -- if self.path is not None and os.path.exists(self.path): -- rmtree(self.path) -- self.path = None +- self._deleted = True +- if os.path.exists(self._path): +- rmtree(self._path) + if getattr(self._finalizer, "detach", None) and self._finalizer.detach(): -+ if os.path.exists(self.path): ++ if os.path.exists(self._path): ++ self._deleted = True + try: -+ rmtree(self.path) ++ rmtree(self._path) + except OSError: + pass -+ else: -+ self.path = None class AdjacentTempDirectory(TempDirectory): -@@ -152,4 +184,5 @@ class AdjacentTempDirectory(TempDirectory): - self.path = os.path.realpath( - tempfile.mkdtemp(prefix="pip-{}-".format(self.kind)) +@@ -169,4 +201,4 @@ class AdjacentTempDirectory(TempDirectory): ) -+ self._register_finalizer() - logger.debug("Created temporary directory: {}".format(self.path)) -diff --git a/pipenv/patched/pip/_internal/wheel.py b/pipenv/patched/pip/_internal/wheel.py -index 67bcc7f7..968cdff9 100644 ---- a/pipenv/patched/pip/_internal/wheel.py -+++ b/pipenv/patched/pip/_internal/wheel.py -@@ -114,7 +114,7 @@ def fix_script(path): - firstline = script.readline() - if not firstline.startswith(b'#!python'): - return False -- exename = sys.executable.encode(sys.getfilesystemencoding()) -+ exename = os.environ.get('PIP_PYTHON_PATH', sys.executable).encode(sys.getfilesystemencoding()) - firstline = b'#!' + exename + os.linesep.encode("ascii") - rest = script.read() - with open(path, 'wb') as script: -@@ -201,7 +201,8 @@ def message_about_scripts_not_on_PATH(scripts): - ] - # If an executable sits with sys.executable, we don't warn for it. - # This covers the case of venv invocations without activating the venv. -- not_warn_dirs.append(os.path.normcase(os.path.dirname(sys.executable))) -+ executable_loc = os.environ.get("PIP_PYTHON_PATH", sys.executable) -+ not_warn_dirs.append(os.path.normcase(os.path.dirname(executable_loc))) - warn_for = { - parent_dir: scripts for parent_dir, scripts in grouped_by_dir.items() - if os.path.normcase(parent_dir) not in not_warn_dirs -@@ -901,8 +902,9 @@ class WheelBuilder(object): - # isolating. Currently, it breaks Python in virtualenvs, because it - # relies on site.py to find parts of the standard library outside the - # virtualenv. -+ executable_loc = os.environ.get('PIP_PYTHON_PATH', sys.executable) - return [ -- sys.executable, '-u', '-c', -+ executable_loc, '-u', '-c', - SETUPTOOLS_SHIM % req.setup_py - ] + list(self.global_options) -diff --git a/pipenv/patched/pip/_internal/utils/misc.py b/pipenv/patched/pip/_internal/utils/misc.py -index 84605ee3..649311c0 100644 ---- a/pipenv/patched/pip/_internal/utils/misc.py -+++ b/pipenv/patched/pip/_internal/utils/misc.py -@@ -117,8 +117,8 @@ def get_prog(): - @retry(stop_max_delay=3000, wait_fixed=500) - def rmtree(dir, ignore_errors=False): - # type: (str, bool) -> None -- shutil.rmtree(dir, ignore_errors=ignore_errors, -- onerror=rmtree_errorhandler) -+ from pipenv.vendor.vistir.path import rmtree as vistir_rmtree, handle_remove_readonly -+ vistir_rmtree(dir, onerror=handle_remove_readonly, ignore_errors=ignore_errors) - - - def rmtree_errorhandler(func, path, exc_info): + logger.debug("Created temporary directory: {}".format(path)) +- return path ++ return path +\ No newline at end of file diff --git a/tasks/vendoring/patches/patched/piptools.patch b/tasks/vendoring/patches/patched/piptools.patch index 52d66f513e..fe8d8fe9c1 100644 --- a/tasks/vendoring/patches/patched/piptools.patch +++ b/tasks/vendoring/patches/patched/piptools.patch @@ -1,202 +1,219 @@ diff --git a/pipenv/patched/piptools/_compat/__init__.py b/pipenv/patched/piptools/_compat/__init__.py -index e4ac717..19adcbc 100644 +index eccbf36..fd8ecdd 100644 --- a/pipenv/patched/piptools/_compat/__init__.py +++ b/pipenv/patched/piptools/_compat/__init__.py -@@ -31,4 +31,6 @@ from .pip_compat import ( - install_req_from_editable, - stdlib_pkgs, - DEV_PKGS, +@@ -11,6 +11,7 @@ from .pip_compat import ( + FormatControl, + InstallationCandidate, + InstallCommand, ++ InstallationError, + InstallRequirement, + Link, + PackageFinder, +@@ -18,6 +19,8 @@ from .pip_compat import ( + RequirementSet, + RequirementTracker, + Resolver, + SafeFileCache, -+ InstallationError - ) ++ VcsSupport, + Wheel, + WheelCache, + cmdoptions, +@@ -29,6 +32,7 @@ from .pip_compat import ( + is_vcs_url, + parse_requirements, + path_to_url, ++ pip_version, + stdlib_pkgs, + url_to_path, + user_cache_dir, diff --git a/pipenv/patched/piptools/_compat/pip_compat.py b/pipenv/patched/piptools/_compat/pip_compat.py -index 82ccb8b..715144a 100644 +index 67da307..765bd49 100644 --- a/pipenv/patched/piptools/_compat/pip_compat.py +++ b/pipenv/patched/piptools/_compat/pip_compat.py -@@ -1,48 +1,51 @@ +@@ -1,26 +1,24 @@ # -*- coding=utf-8 -*- --import importlib -+__all__ = [ -+ "InstallRequirement", -+ "parse_requirements", -+ "RequirementSet", -+ "FAVORITE_HASH", -+ "is_file_url", -+ "path_to_url", -+ "url_to_path", -+ "PackageFinder", -+ "FormatControl", -+ "Wheel", -+ "Command", -+ "cmdoptions", -+ "get_installed_distributions", -+ "PyPI", -+ "stdlib_pkgs", -+ "DEV_PKGS", -+ "install_req_from_line", -+ "install_req_from_editable", -+ "user_cache_dir", -+ "SafeFileCache", -+ "InstallationError" -+] - + from __future__ import absolute_import +- + import importlib +-from contextlib import contextmanager +- -import pip --import pkg_resources +-from pip._vendor.packaging.version import parse as parse_version +- +-PIP_VERSION = tuple(map(int, parse_version(pip.__version__).base_version.split("."))) +import os ++from appdirs import user_cache_dir +os.environ["PIP_SHIMS_BASE_MODULE"] = str("pipenv.patched.notpip") ++import pip_shims.shims ++from pip_shims.models import ShimmedPathCollection, ImportTypes + +-try: +- from pip._internal.req.req_tracker import RequirementTracker +-except ImportError: ++InstallationCandidate = ShimmedPathCollection("InstallationCandidate", ImportTypes.CLASS) ++InstallationCandidate.create_path("models.candidate", "18.0", "9999") ++InstallationCandidate.create_path("index", "7.0.3", "10.9.9") + +- @contextmanager +- def RequirementTracker(): +- yield ++PIP_VERSION = tuple(map(int, pip_shims.shims.parsed_pip_version.parsed_version.base_version.split("."))) + ++RequirementTracker = pip_shims.shims.RequirementTracker --def do_import(module_path, subimport=None, old_path=None): -- old_path = old_path or module_path + def do_import(module_path, subimport=None, old_path=None): + old_path = old_path or module_path - prefixes = ["pip._internal", "pip"] -- paths = [module_path, old_path] -- search_order = ["{0}.{1}".format(p, pth) for p in prefixes for pth in paths if pth is not None] -- package = subimport if subimport else None -- for to_import in search_order: -- if not subimport: -- to_import, _, package = to_import.rpartition(".") -- try: -- imported = importlib.import_module(to_import) -- except ImportError: -- continue -- else: -- return getattr(imported, package) -- -- --InstallRequirement = do_import('req.req_install', 'InstallRequirement') --parse_requirements = do_import('req.req_file', 'parse_requirements') --RequirementSet = do_import('req.req_set', 'RequirementSet') --user_cache_dir = do_import('utils.appdirs', 'user_cache_dir') --FAVORITE_HASH = do_import('utils.hashes', 'FAVORITE_HASH') --is_file_url = do_import('download', 'is_file_url') --path_to_url = do_import('download', 'path_to_url') --url_to_path = do_import('download', 'url_to_path') --PackageFinder = do_import('index', 'PackageFinder') --FormatControl = do_import('index', 'FormatControl') --Wheel = do_import('wheel', 'Wheel') --Command = do_import('cli.base_command', 'Command', old_path='basecommand') --cmdoptions = do_import('cli.cmdoptions', old_path='cmdoptions') --get_installed_distributions = do_import('utils.misc', 'get_installed_distributions', old_path='utils') --PyPI = do_import('models.index', 'PyPI') --stdlib_pkgs = do_import('utils.compat', 'stdlib_pkgs', old_path='compat') --DEV_PKGS = do_import('commands.freeze', 'DEV_PKGS') -- --# pip 18.1 has refactored InstallRequirement constructors use by pip-tools. --if pkg_resources.parse_version(pip.__version__) < pkg_resources.parse_version('18.1'): -- install_req_from_line = InstallRequirement.from_line -- install_req_from_editable = InstallRequirement.from_editable --else: -- install_req_from_line = do_import('req.constructors', 'install_req_from_line') -- install_req_from_editable = do_import('req.constructors', 'install_req_from_editable') -+from pip_shims.shims import ( -+ InstallRequirement, -+ parse_requirements, -+ RequirementSet, -+ FAVORITE_HASH, -+ is_file_url, -+ path_to_url, -+ url_to_path, -+ PackageFinder, -+ FormatControl, -+ Wheel, -+ Command, -+ cmdoptions, -+ get_installed_distributions, -+ PyPI, -+ stdlib_pkgs, -+ DEV_PKGS, -+ install_req_from_line, -+ install_req_from_editable, -+ USER_CACHE_DIR as user_cache_dir, -+ SafeFileCache, -+ InstallationError -+) ++ pip_path = os.environ.get("PIP_SHIMS_BASE_MODULE", "pip") ++ prefixes = ["{}._internal".format(pip_path), pip_path] + paths = [module_path, old_path] + search_order = [ + "{0}.{1}".format(p, pth) for p in prefixes for pth in paths if pth is not None +@@ -37,31 +35,29 @@ def do_import(module_path, subimport=None, old_path=None): + return getattr(imported, package) + + +-InstallRequirement = do_import("req.req_install", "InstallRequirement") +-InstallationCandidate = do_import( +- "models.candidate", "InstallationCandidate", old_path="index" +-) +-parse_requirements = do_import("req.req_file", "parse_requirements") +-RequirementSet = do_import("req.req_set", "RequirementSet") +-user_cache_dir = do_import("utils.appdirs", "user_cache_dir") +-FAVORITE_HASH = do_import("utils.hashes", "FAVORITE_HASH") +-path_to_url = do_import("utils.urls", "path_to_url", old_path="download") +-url_to_path = do_import("utils.urls", "url_to_path", old_path="download") +-PackageFinder = do_import("index.package_finder", "PackageFinder", old_path="index") +-FormatControl = do_import("models.format_control", "FormatControl", old_path="index") +-InstallCommand = do_import("commands.install", "InstallCommand") +-Wheel = do_import("wheel", "Wheel") +-cmdoptions = do_import("cli.cmdoptions", old_path="cmdoptions") +-get_installed_distributions = do_import( +- "utils.misc", "get_installed_distributions", old_path="utils" +-) +-PyPI = do_import("models.index", "PyPI") +-stdlib_pkgs = do_import("utils.compat", "stdlib_pkgs", old_path="compat") +-DEV_PKGS = do_import("commands.freeze", "DEV_PKGS") +-Link = do_import("models.link", "Link", old_path="index") ++InstallRequirement = pip_shims.shims.InstallRequirement ++InstallationError = pip_shims.shims.InstallationError ++parse_requirements = pip_shims.shims.parse_requirements ++RequirementSet = pip_shims.shims.RequirementSet ++SafeFileCache = pip_shims.shims.SafeFileCache ++FAVORITE_HASH = pip_shims.shims.FAVORITE_HASH ++path_to_url = pip_shims.shims.path_to_url ++url_to_path = pip_shims.shims.url_to_path ++PackageFinder = pip_shims.shims.PackageFinder ++FormatControl = pip_shims.shims.FormatControl ++InstallCommand = pip_shims.shims.InstallCommand ++Wheel = pip_shims.shims.Wheel ++cmdoptions = pip_shims.shims.cmdoptions ++get_installed_distributions = pip_shims.shims.get_installed_distributions ++PyPI = pip_shims.shims.PyPI ++stdlib_pkgs = pip_shims.shims.stdlib_pkgs ++DEV_PKGS = pip_shims.shims.DEV_PKGS ++Link = pip_shims.shims.Link + Session = do_import("_vendor.requests.sessions", "Session") +-Resolver = do_import("legacy_resolve", "Resolver", old_path="resolve") +-WheelCache = do_import("cache", "WheelCache", old_path="wheel") ++Resolver = pip_shims.shims.Resolver ++VcsSupport = pip_shims.shims.VcsSupport ++WheelCache = pip_shims.shims.WheelCache ++pip_version = pip_shims.shims.pip_version + + # pip 18.1 has refactored InstallRequirement constructors use by pip-tools. + if PIP_VERSION < (18, 1): diff --git a/pipenv/patched/piptools/locations.py b/pipenv/patched/piptools/locations.py -index 4e6174c..9e0c6f1 100644 +index fb66cf3..bb199f6 100644 --- a/pipenv/patched/piptools/locations.py +++ b/pipenv/patched/piptools/locations.py -@@ -5,7 +5,11 @@ from .click import secho - from ._compat import user_cache_dir +@@ -5,7 +5,10 @@ from ._compat import user_cache_dir + from .click import secho # The user_cache_dir helper comes straight from pip itself --CACHE_DIR = user_cache_dir('pip-tools') +-CACHE_DIR = user_cache_dir("pip-tools") +try: -+ from pipenv.environments import PIPENV_CACHE_DIR -+ CACHE_DIR = PIPENV_CACHE_DIR ++ from pipenv.environments import PIPENV_CACHE_DIR as CACHE_DIR +except ImportError: -+ CACHE_DIR = user_cache_dir('pipenv') ++ CACHE_DIR = user_cache_dir("pipenv") # NOTE # We used to store the cache dir under ~/.pip-tools, which is not the diff --git a/pipenv/patched/piptools/repositories/local.py b/pipenv/patched/piptools/repositories/local.py -index 08dabe1..36bafdb 100644 +index f389784..c1bcf9d 100644 --- a/pipenv/patched/piptools/repositories/local.py +++ b/pipenv/patched/piptools/repositories/local.py -@@ -56,7 +56,8 @@ class LocalRequirementsRepository(BaseRepository): +@@ -61,7 +61,8 @@ class LocalRequirementsRepository(BaseRepository): if existing_pin and ireq_satisfied_by_existing_pin(ireq, existing_pin): project, version, _ = as_tuple(existing_pin) return make_install_requirement( - project, version, ireq.extras, constraint=ireq.constraint -+ project, version, ireq.extras, constraint=ireq.constraint, -+ markers=ireq.markers ++ project, version, ireq.extras, ireq.markers, ++ constraint=ireq.constraint ) else: return self.repository.find_best_match(ireq, prereleases) diff --git a/pipenv/patched/piptools/repositories/pypi.py b/pipenv/patched/piptools/repositories/pypi.py -index e54ae08..75b8208 100644 +index acbd680..c9a23ad 100644 --- a/pipenv/patched/piptools/repositories/pypi.py +++ b/pipenv/patched/piptools/repositories/pypi.py -@@ -2,14 +2,22 @@ - from __future__ import (absolute_import, division, print_function, - unicode_literals) +@@ -2,21 +2,29 @@ + from __future__ import absolute_import, division, print_function, unicode_literals + import collections +import copy import hashlib import os from contextlib import contextmanager + from functools import partial from shutil import rmtree --import pip - import pkg_resources - +from packaging.requirements import Requirement -+from packaging.specifiers import SpecifierSet, Specifier -+ -+os.environ["PIP_SHIMS_BASE_MODULE"] = str("pipenv.patched.notpip") -+import pip_shims -+from pip_shims.shims import VcsSupport, WheelCache, InstallationError, pip_version -+ ++from packaging.specifiers import Specifier, SpecifierSet + from .._compat import ( - is_file_url, - url_to_path, -@@ -18,13 +26,15 @@ from .._compat import ( - Wheel, FAVORITE_HASH, - TemporaryDirectory, -- PyPI -+ PyPI, + PIP_VERSION, ++ InstallationError, + InstallRequirement, -+ SafeFileCache + Link, + PyPI, + RequirementSet, + RequirementTracker, + Resolver as PipResolver, ++ SafeFileCache, + TemporaryDirectory, ++ VcsSupport, + Wheel, + WheelCache, + contextlib, +@@ -24,6 +32,7 @@ from .._compat import ( + is_file_url, + is_vcs_url, + path_to_url, ++ pip_version, + url_to_path, ) - from ..cache import CACHE_DIR +@@ -31,6 +40,8 @@ from ..click import progressbar from ..exceptions import NoCandidateFound --from ..utils import (fs_str, is_pinned_requirement, lookup_table, -- make_install_requirement) -+from ..utils import (fs_str, is_pinned_requirement, lookup_table, dedup, -+ make_install_requirement, clean_requires_python) + from ..logging import log + from ..utils import ( ++ dedup, ++ clean_requires_python, + create_install_command, + fs_str, + is_pinned_requirement, +@@ -40,10 +51,50 @@ from ..utils import ( + ) from .base import BaseRepository - try: -@@ -34,10 +44,44 @@ except ImportError: - def RequirementTracker(): - yield ++os.environ["PIP_SHIMS_BASE_MODULE"] = str("pipenv.patched.notpip") + FILE_CHUNK_SIZE = 4096 + FileStream = collections.namedtuple("FileStream", "stream size") + --try: -- from pip._internal.cache import WheelCache --except ImportError: -- from pip.wheel import WheelCache -+ +class HashCache(SafeFileCache): + """Caches hashes of PyPI artifacts so we do not need to re-download them + @@ -234,32 +251,34 @@ index e54ae08..75b8208 100644 + for chunk in iter(lambda: fp.read(8096), b""): + h.update(chunk) + return ":".join([FAVORITE_HASH, h.hexdigest()]) - - ++ ++ class PyPIRepository(BaseRepository): -@@ -49,10 +93,11 @@ class PyPIRepository(BaseRepository): - config), but any other PyPI mirror can be used if index_urls is + DEFAULT_INDEX_URL = PyPI.simple_url + +@@ -54,8 +105,9 @@ class PyPIRepository(BaseRepository): changed/configured on the Finder. """ -- def __init__(self, pip_options, session, build_isolation=False): -+ def __init__(self, pip_options, session, build_isolation=False, use_json=False): - self.session = session - self.pip_options = pip_options + +- def __init__(self, pip_args, build_isolation=False): ++ def __init__(self, pip_args, session=None, build_isolation=False, use_json=False): self.build_isolation = build_isolation + self.use_json = use_json - index_urls = [pip_options.index_url] + pip_options.extra_index_urls - if pip_options.no_index: -@@ -67,7 +112,7 @@ class PyPIRepository(BaseRepository): - } - - # pip 19.0 has removed process_dependency_links from the PackageFinder constructor -- if pkg_resources.parse_version(pip.__version__) < pkg_resources.parse_version('19.0'): -+ if pkg_resources.parse_version(pip_version) < pkg_resources.parse_version('19.0'): - finder_kwargs["process_dependency_links"] = pip_options.process_dependency_links + # Use pip's parser for pip.conf management and defaults. + # General options (find_links, index_url, extra_index_url, trusted_host, +@@ -63,7 +115,9 @@ class PyPIRepository(BaseRepository): + command = create_install_command() + self.options, _ = command.parse_args(pip_args) - self.finder = PackageFinder(**finder_kwargs) -@@ -82,6 +127,10 @@ class PyPIRepository(BaseRepository): +- self.session = command._build_session(self.options) ++ if session is None: ++ session = command._build_session(self.options) ++ self.session = session + self.finder = command._build_package_finder( + options=self.options, session=self.session + ) +@@ -78,6 +132,10 @@ class PyPIRepository(BaseRepository): # of all secondary dependencies for the given requirement, so we # only have to go to disk once for each requirement self._dependencies_cache = {} @@ -270,15 +289,18 @@ index e54ae08..75b8208 100644 # Setup file paths self.freshen_build_caches() -@@ -122,10 +171,13 @@ class PyPIRepository(BaseRepository): - if ireq.editable: +@@ -118,13 +176,15 @@ class PyPIRepository(BaseRepository): + if ireq.editable or is_url_requirement(ireq): return ireq # return itself as the best match - all_candidates = self.find_all_candidates(ireq.name) + all_candidates = clean_requires_python(self.find_all_candidates(ireq.name)) - candidates_by_version = lookup_table(all_candidates, key=lambda c: c.version, unique=True) -- matching_versions = ireq.specifier.filter((candidate.version for candidate in all_candidates), -- prereleases=prereleases) + candidates_by_version = lookup_table( + all_candidates, key=lambda c: c.version, unique=True + ) +- matching_versions = ireq.specifier.filter( +- (candidate.version for candidate in all_candidates), prereleases=prereleases +- ) + try: + matching_versions = ireq.specifier.filter((candidate.version for candidate in all_candidates), + prereleases=prereleases) @@ -287,12 +309,12 @@ index e54ae08..75b8208 100644 # Reuses pip's internal candidate sort key to sort matching_candidates = [candidates_by_version[ver] for ver in matching_versions] -@@ -135,14 +187,75 @@ class PyPIRepository(BaseRepository): - - # Turn the candidate into a pinned InstallRequirement - return make_install_requirement( -- best_candidate.project, best_candidate.version, ireq.extras, constraint=ireq.constraint -+ best_candidate.project, best_candidate.version, ireq.extras, ireq.markers, constraint=ireq.constraint +@@ -153,11 +213,74 @@ class PyPIRepository(BaseRepository): + best_candidate.project, + best_candidate.version, + ireq.extras, ++ ireq.markers, + constraint=ireq.constraint, ) + def get_dependencies(self, ireq): @@ -359,126 +381,140 @@ index e54ae08..75b8208 100644 + ireq.populate_link(self.finder, False, False) + if ireq.link and not ireq.link.is_wheel: + ireq.ensure_has_source_dir(self.source_dir) - try: - from pip._internal.operations.prepare import RequirementPreparer -- from pip._internal.resolve import Resolver as PipResolver - except ImportError: - # Pip 9 and below + + if PIP_VERSION < (10,): reqset = RequirementSet( -@@ -151,9 +264,11 @@ class PyPIRepository(BaseRepository): +@@ -166,11 +289,13 @@ class PyPIRepository(BaseRepository): download_dir=download_dir, wheel_download_dir=self._wheel_download_dir, session=self.session, + ignore_installed=True, + ignore_compatibility=False, - wheel_cache=wheel_cache + wheel_cache=wheel_cache, ) - results = reqset._prepare_file(self.finder, ireq) + results = reqset._prepare_file(self.finder, ireq, ignore_requires_python=True) else: - # pip >= 10 +- from pip._internal.operations.prepare import RequirementPreparer ++ from pip_shims.shims import RequirementPreparer + preparer_kwargs = { -@@ -170,11 +285,13 @@ class PyPIRepository(BaseRepository): - 'upgrade_strategy': "to-satisfy-only", - 'force_reinstall': False, - 'ignore_dependencies': False, -- 'ignore_requires_python': False, -+ 'ignore_requires_python': True, - 'ignore_installed': True, -- 'isolated': False, -+ 'ignore_compatibility': False, -+ 'isolated': True, - 'wheel_cache': wheel_cache, -- 'use_user_site': False -+ 'use_user_site': False, -+ 'use_pep517': True + "build_dir": self.build_dir, +@@ -186,9 +311,11 @@ class PyPIRepository(BaseRepository): + "upgrade_strategy": "to-satisfy-only", + "force_reinstall": False, + "ignore_dependencies": False, +- "ignore_requires_python": False, ++ "ignore_requires_python": True, + "ignore_installed": True, + "use_user_site": False, ++ "ignore_compatibility": False, ++ "use_pep517": True, } + make_install_req_kwargs = {"isolated": False, "wheel_cache": wheel_cache} + +@@ -208,6 +335,7 @@ class PyPIRepository(BaseRepository): + resolver = None preparer = None -@@ -186,15 +303,21 @@ class PyPIRepository(BaseRepository): - resolver_kwargs['preparer'] = preparer ++ reqset = None + with RequirementTracker() as req_tracker: + # Pip 18 uses a requirement tracker to prevent fork bombs + if req_tracker: +@@ -216,7 +344,6 @@ class PyPIRepository(BaseRepository): + resolver_kwargs["preparer"] = preparer reqset = RequirementSet() ireq.is_direct = True - reqset.add_requirement(ireq) -- resolver = PipResolver(**resolver_kwargs) -+ # reqset.add_requirement(ireq) -+ resolver = pip_shims.shims.Resolver(**resolver_kwargs) - resolver.require_hashes = False - results = resolver._resolve_one(reqset, ireq) -- reqset.cleanup_files() -- return set(results) -+ cleanup_fn = getattr(reqset, "cleanup_files", None) -+ if cleanup_fn is not None: -+ try: -+ cleanup_fn() -+ except OSError: -+ pass + resolver = PipResolver(**resolver_kwargs) + require_hashes = False +@@ -225,12 +352,16 @@ class PyPIRepository(BaseRepository): + results = resolver._resolve_one(reqset, ireq) + else: + results = resolver._resolve_one(reqset, ireq, require_hashes) ++ try: ++ reqset.cleanup_files() ++ except (AttributeError, OSError): ++ pass + +- reqset.cleanup_files() + results = set(results) if results else set() -+ return results, ireq + + return set(results) - def get_dependencies(self, ireq): + def get_legacy_dependencies(self, ireq): """ - Given a pinned or an editable InstallRequirement, returns a set of + Given a pinned, URL, or editable InstallRequirement, returns a set of dependencies (also InstallRequirements, but not necessarily pinned). -@@ -223,7 +346,8 @@ class PyPIRepository(BaseRepository): - wheel_cache = WheelCache(CACHE_DIR, self.pip_options.format_control) - prev_tracker = os.environ.get('PIP_REQ_TRACKER') +@@ -265,9 +396,8 @@ class PyPIRepository(BaseRepository): + wheel_cache = WheelCache(CACHE_DIR, self.options.format_control) + prev_tracker = os.environ.get("PIP_REQ_TRACKER") try: -- self._dependencies_cache[ireq] = self.resolve_reqs(download_dir, ireq, wheel_cache) +- self._dependencies_cache[ireq] = self.resolve_reqs( +- download_dir, ireq, wheel_cache +- ) + results, ireq = self.resolve_reqs(download_dir, ireq, wheel_cache) + self._dependencies_cache[ireq] = results finally: - if 'PIP_REQ_TRACKER' in os.environ: + if "PIP_REQ_TRACKER" in os.environ: if prev_tracker: -@@ -245,6 +369,10 @@ class PyPIRepository(BaseRepository): - if ireq.editable: - return set() - -+ vcs = VcsSupport() -+ if ireq.link and ireq.link.scheme in vcs.all_schemes and 'ssh' in ireq.link.scheme: -+ return set() -+ - if not is_pinned_requirement(ireq): - raise TypeError( - "Expected pinned requirement, got {}".format(ireq)) -@@ -252,24 +380,16 @@ class PyPIRepository(BaseRepository): +@@ -313,12 +443,10 @@ class PyPIRepository(BaseRepository): # We need to get all of the candidates that match our current version # pin, these will represent all of the files that could possibly # satisfy this constraint. - all_candidates = self.find_all_candidates(ireq.name) - candidates_by_version = lookup_table(all_candidates, key=lambda c: c.version) - matching_versions = list( -- ireq.specifier.filter((candidate.version for candidate in all_candidates))) -- matching_candidates = candidates_by_version[matching_versions[0]] +- ireq.specifier.filter((candidate.version for candidate in all_candidates)) + matching_candidates = ( + c for c in clean_requires_python(self.find_all_candidates(ireq.name)) + if c.version in ireq.specifier -+ ) + ) +- matching_candidates = candidates_by_version[matching_versions[0]] + + log.debug(" {}".format(ireq.name)) + +@@ -328,30 +456,11 @@ class PyPIRepository(BaseRepository): + return candidate.link return { -- self._get_file_hash(candidate.location) +- self._get_file_hash(get_candidate_link(candidate)) - for candidate in matching_candidates -+ h for h in map(lambda c: self._hash_cache.get_hash(c.location), -+ matching_candidates) if h is not None ++ h for h in ++ map(lambda c: self._hash_cache.get_hash(get_candidate_link(c)), matching_candidates) ++ if h is not None } -- def _get_file_hash(self, location): +- def _get_file_hash(self, link): +- log.debug(" Hashing {}".format(link.url_without_fragment)) - h = hashlib.new(FAVORITE_HASH) -- with open_local_or_remote_file(location, self.session) as fp: -- for chunk in iter(lambda: fp.read(8096), b""): -- h.update(chunk) +- with open_local_or_remote_file(link, self.session) as f: +- # Chunks to iterate +- chunks = iter(lambda: f.stream.read(FILE_CHUNK_SIZE), b"") +- +- # Choose a context manager depending on verbosity +- if log.verbosity >= 1: +- iter_length = f.size / FILE_CHUNK_SIZE if f.size else None +- context_manager = progressbar(chunks, length=iter_length, label=" ") +- else: +- context_manager = contextlib.nullcontext(chunks) +- +- # Iterate over the chosen context manager +- with context_manager as bar: +- for chunk in bar: +- h.update(chunk) - return ":".join([FAVORITE_HASH, h.hexdigest()]) - @contextmanager def allow_all_wheels(self): """ diff --git a/pipenv/patched/piptools/resolver.py b/pipenv/patched/piptools/resolver.py -index 494d385..b642bc9 100644 +index fc53f18..c056665 100644 --- a/pipenv/patched/piptools/resolver.py +++ b/pipenv/patched/piptools/resolver.py -@@ -28,6 +28,7 @@ class RequirementSummary(object): +@@ -34,6 +34,7 @@ class RequirementSummary(object): self.req = ireq.req self.key = key_from_req(ireq.req) self.extras = str(sorted(ireq.extras)) @@ -486,29 +522,20 @@ index 494d385..b642bc9 100644 self.specifier = str(ireq.specifier) def __eq__(self, other): -@@ -119,7 +120,7 @@ class Resolver(object): - @staticmethod - def check_constraints(constraints): - for constraint in constraints: -- if constraint.link is not None and not constraint.editable: -+ if constraint.link is not None and not constraint.editable and not constraint.is_wheel: - msg = ('pip-compile does not support URLs as packages, unless they are editable. ' - 'Perhaps add -e option?') - raise UnsupportedConstraint(msg, constraint) -@@ -155,6 +156,12 @@ class Resolver(object): - # NOTE we may be losing some info on dropped reqs here - combined_ireq.req.specifier &= ireq.req.specifier - combined_ireq.constraint &= ireq.constraint -+ if not combined_ireq.markers: -+ combined_ireq.markers = ireq.markers -+ else: -+ _markers = combined_ireq.markers._markers -+ if not isinstance(_markers[0], (tuple, list)): -+ combined_ireq.markers._markers = [_markers, 'and', ireq.markers._markers] - # Return a sorted, de-duped tuple of extras - combined_ireq.extras = tuple(sorted(set(tuple(combined_ireq.extras) + tuple(ireq.extras)))) - yield combined_ireq -@@ -272,6 +279,15 @@ class Resolver(object): +@@ -63,6 +64,12 @@ def combine_install_requirements(ireqs): + # NOTE we may be losing some info on dropped reqs here + combined_ireq.req.specifier &= ireq.req.specifier + combined_ireq.constraint &= ireq.constraint ++ if not combined_ireq.markers: ++ combined_ireq.markers = ireq.markers ++ else: ++ _markers = combined_ireq.markers._markers ++ if not isinstance(_markers[0], (tuple, list)): ++ combined_ireq.markers._markers = [_markers, 'and', ireq.markers._markers] + # Return a sorted, de-duped tuple of extras + combined_ireq.extras = tuple( + sorted(set(tuple(combined_ireq.extras) + tuple(ireq.extras))) +@@ -339,6 +346,15 @@ class Resolver(object): for dependency in self.repository.get_dependencies(ireq): yield dependency return @@ -522,44 +549,54 @@ index 494d385..b642bc9 100644 + ireq.extras = ireq.extra + elif not is_pinned_requirement(ireq): - raise TypeError('Expected pinned or editable requirement, got {}'.format(ireq)) - -@@ -282,7 +298,7 @@ class Resolver(object): - if ireq not in self.dependency_cache: - log.debug(' {} not in cache, need to check index'.format(format_requirement(ireq)), fg='yellow') + raise TypeError( + "Expected pinned or editable requirement, got {}".format(ireq) +@@ -356,7 +372,7 @@ class Resolver(object): + fg="yellow", + ) dependencies = self.repository.get_dependencies(ireq) - self.dependency_cache[ireq] = sorted(str(ireq.req) for ireq in dependencies) + self.dependency_cache[ireq] = sorted(set(format_requirement(ireq) for ireq in dependencies)) # Example: ['Werkzeug>=0.9', 'Jinja2>=2.4'] dependency_strings = self.dependency_cache[ireq] +@@ -372,7 +388,8 @@ class Resolver(object): + ) + + def reverse_dependencies(self, ireqs): ++ is_non_wheel_url = lambda r: is_url_requirement(r) and not r.link.is_wheel + non_editable = [ +- ireq for ireq in ireqs if not (ireq.editable or is_url_requirement(ireq)) ++ ireq for ireq in ireqs if not (ireq.editable or is_non_wheel_url(ireq)) + ] + return self.dependency_cache.reverse_dependencies(non_editable) diff --git a/pipenv/patched/piptools/utils.py b/pipenv/patched/piptools/utils.py -index 9b4b4c2..8875543 100644 +index 8727f1e..1f4c10a 100644 --- a/pipenv/patched/piptools/utils.py +++ b/pipenv/patched/piptools/utils.py -@@ -2,10 +2,17 @@ - from __future__ import (absolute_import, division, print_function, - unicode_literals) +@@ -1,6 +1,7 @@ + # coding: utf-8 + from __future__ import absolute_import, division, print_function, unicode_literals +import os import sys - from itertools import chain, groupby from collections import OrderedDict - -+import six -+ + from itertools import chain, groupby +@@ -8,6 +9,10 @@ from itertools import chain, groupby + import six + from click.utils import LazyFile + from six.moves import shlex_quote +from pipenv.vendor.packaging.specifiers import SpecifierSet, InvalidSpecifier +from pipenv.vendor.packaging.version import Version, InvalidVersion, parse as parse_version +from pipenv.vendor.packaging.markers import Marker, Op, Value, Variable + - from ._compat import install_req_from_line + from ._compat import PIP_VERSION, InstallCommand, install_req_from_line from .click import style -@@ -14,6 +21,71 @@ from .click import style - UNSAFE_PACKAGES = {'setuptools', 'distribute', 'pip'} +@@ -23,6 +28,70 @@ COMPILE_EXCLUDE_OPTIONS = { + } -+ +def simplify_markers(ireq): + """simplify_markers "This code cleans up markers for a specific :class:`~InstallRequirement`" + @@ -627,8 +664,8 @@ index 9b4b4c2..8875543 100644 def key_from_ireq(ireq): """Get a standardized key for an InstallRequirement.""" if ireq.req is None and ireq.link is not None: -@@ -39,16 +111,51 @@ def comment(text): - return style(text, fg='green') +@@ -48,16 +117,51 @@ def comment(text): + return style(text, fg="green") -def make_install_requirement(name, version, extras, constraint=False): @@ -640,8 +677,8 @@ index 9b4b4c2..8875543 100644 extras_string = "[{}]".format(",".join(sorted(extras))) - return install_req_from_line( -- str('{}{}=={}'.format(name, extras_string, version)), -- constraint=constraint) +- str("{}{}=={}".format(name, extras_string, version)), constraint=constraint +- ) + if not markers: + return install_req_from_line( + str('{}{}=={}'.format(name, extras_string, version)), @@ -682,18 +719,18 @@ index 9b4b4c2..8875543 100644 + return "".join(parts) - def format_requirement(ireq, marker=None, hashes=None): -@@ -59,10 +166,10 @@ def format_requirement(ireq, marker=None, hashes=None): - if ireq.editable: - line = '-e {}'.format(ireq.link) + def is_url_requirement(ireq): +@@ -78,10 +182,10 @@ def format_requirement(ireq, marker=None, hashes=None): + elif is_url_requirement(ireq): + line = ireq.link.url else: - line = str(ireq.req).lower() + line = _requirement_to_str_lowercase_name(ireq.req) - if marker: -- line = '{} ; {}'.format(line, marker) +- line = "{} ; {}".format(line, marker) + if marker and ';' not in line: -+ line = '{}; {}'.format(line, marker) ++ line = "{}; {}".format(line, marker) if hashes: for hash_ in sorted(hashes): diff --git a/tasks/vendoring/patches/vendor/dotenv-typing-imports.patch b/tasks/vendoring/patches/vendor/dotenv-typing-imports.patch index 386cecd131..b0fcac2e8c 100644 --- a/tasks/vendoring/patches/vendor/dotenv-typing-imports.patch +++ b/tasks/vendoring/patches/vendor/dotenv-typing-imports.patch @@ -1,9 +1,9 @@ diff --git a/pipenv/vendor/dotenv/__init__.py b/pipenv/vendor/dotenv/__init__.py -index 1867868..b88d9bc 100644 +index 105a32a..b88d9bc 100644 --- a/pipenv/vendor/dotenv/__init__.py +++ b/pipenv/vendor/dotenv/__init__.py @@ -1,6 +1,9 @@ --from typing import Any, Optional +-from typing import Any, Optional # noqa +from .compat import IS_TYPE_CHECKING from .main import load_dotenv, get_key, set_key, unset_key, find_dotenv, dotenv_values @@ -14,97 +14,148 @@ index 1867868..b88d9bc 100644 def load_ipython_extension(ipython): # type: (Any) -> None diff --git a/pipenv/vendor/dotenv/cli.py b/pipenv/vendor/dotenv/cli.py -index 45f4b76..829b14a 100644 +index 235f329..d2a021a 100644 --- a/pipenv/vendor/dotenv/cli.py +++ b/pipenv/vendor/dotenv/cli.py -@@ -1,6 +1,5 @@ +@@ -1,7 +1,6 @@ import os import sys --from typing import Any, List + from subprocess import Popen +-from typing import Any, Dict, List # noqa try: import click -@@ -9,9 +8,13 @@ except ImportError: +@@ -10,9 +9,13 @@ except ImportError: 'Run pip install "python-dotenv[cli]" to fix this.') sys.exit(1) +from .compat import IS_TYPE_CHECKING - from .main import dotenv_values, get_key, set_key, unset_key, run_command + from .main import dotenv_values, get_key, set_key, unset_key from .version import __version__ +if IS_TYPE_CHECKING: -+ from typing import Any, List ++ from typing import Any, List, Dict + @click.group() @click.option('-f', '--file', default=os.path.join(os.getcwd(), '.env'), diff --git a/pipenv/vendor/dotenv/compat.py b/pipenv/vendor/dotenv/compat.py -index 99ffb39..7a8694f 100644 +index 394d3a3..61f555d 100644 --- a/pipenv/vendor/dotenv/compat.py +++ b/pipenv/vendor/dotenv/compat.py -@@ -1,3 +1,4 @@ -+import os +@@ -1,5 +1,4 @@ import sys - - if sys.version_info >= (3, 0): -@@ -6,3 +7,15 @@ else: - from StringIO import StringIO # noqa +-from typing import Text # noqa PY2 = sys.version_info[0] == 2 # type: bool -+ -+ + +@@ -9,6 +8,22 @@ else: + from io import StringIO # noqa + + +def is_type_checking(): + # type: () -> bool + try: + from typing import TYPE_CHECKING -+ except ImportError: ++ except ImportError: # pragma: no cover + return False + return TYPE_CHECKING + + -+IS_TYPE_CHECKING = os.environ.get("MYPY_RUNNING", is_type_checking()) ++IS_TYPE_CHECKING = is_type_checking() ++ ++ ++if IS_TYPE_CHECKING: ++ from typing import Text ++ ++ + def to_env(text): + # type: (Text) -> str + """ diff --git a/pipenv/vendor/dotenv/main.py b/pipenv/vendor/dotenv/main.py -index 0812282..64d4269 100644 +index 04d2241..06a210e 100644 --- a/pipenv/vendor/dotenv/main.py +++ b/pipenv/vendor/dotenv/main.py -@@ -9,15 +9,17 @@ import shutil +@@ -7,16 +7,17 @@ import re + import shutil import sys - from subprocess import Popen import tempfile --from typing import (Any, Dict, Iterator, List, Match, NamedTuple, Optional, # noqa -- Pattern, Union, TYPE_CHECKING, Text, IO, Tuple) # noqa +-from typing import (Dict, Iterator, List, Match, Optional, # noqa +- Pattern, Union, TYPE_CHECKING, Text, IO, Tuple) import warnings from collections import OrderedDict from contextlib import contextmanager --from .compat import StringIO, PY2 -+from .compat import StringIO, PY2, IS_TYPE_CHECKING +-from .compat import StringIO, PY2, to_env ++from .compat import StringIO, PY2, to_env, IS_TYPE_CHECKING + from .parser import parse_stream -if TYPE_CHECKING: # pragma: no cover -+if IS_TYPE_CHECKING: # pragma: no cover ++if IS_TYPE_CHECKING: + from typing import ( -+ Dict, Iterator, List, Match, Optional, Pattern, Union, -+ Text, IO, Tuple ++ Dict, Iterator, Match, Optional, Pattern, Union, Text, IO, Tuple + ) if sys.version_info >= (3, 6): _PathLike = os.PathLike else: -@@ -59,10 +61,14 @@ _binding = re.compile( +@@ -273,6 +274,14 @@ def find_dotenv(filename='.env', raise_error_if_not_found=False, usecwd=False): + + def load_dotenv(dotenv_path=None, stream=None, verbose=False, override=False, **kwargs): + # type: (Union[Text, _PathLike, None], Optional[_StringIO], bool, bool, Union[None, Text]) -> bool ++ """Parse a .env file and then load all the variables found as environment variables. ++ ++ - *dotenv_path*: absolute or relative path to .env file. ++ - *stream*: `StringIO` object with .env content. ++ - *verbose*: whether to output the warnings related to missing .env file etc. Defaults to `False`. ++ - *override*: where to override the system environment variables with the variables in `.env` file. ++ Defaults to `False`. ++ """ + f = dotenv_path or stream or find_dotenv() + return DotEnv(f, verbose=verbose, **kwargs).set_as_environment_variables(override=override) + +diff --git a/pipenv/vendor/dotenv/parser.py b/pipenv/vendor/dotenv/parser.py +index b63cb3a..034ebfd 100644 +--- a/pipenv/vendor/dotenv/parser.py ++++ b/pipenv/vendor/dotenv/parser.py +@@ -1,9 +1,14 @@ + import codecs + import re +-from typing import (IO, Iterator, Match, NamedTuple, Optional, Pattern, # noqa +- Sequence, Text) + +-from .compat import to_text ++from .compat import to_text, IS_TYPE_CHECKING ++ ++ ++if IS_TYPE_CHECKING: ++ from typing import ( # noqa:F401 ++ IO, Iterator, Match, NamedTuple, Optional, Pattern, Sequence, Text, ++ Tuple ++ ) + - _escape_sequence = re.compile(r"\\[\\'\"abfnrtv]") # type: Pattern[Text] + def make_regex(string, extra_flags=0): +@@ -25,9 +30,20 @@ _rest_of_line = make_regex(r"[^\r\n]*(?:\r|\n|\r\n)?") + _double_quote_escapes = make_regex(r"\\[\\'\"abfnrtv]") + _single_quote_escapes = make_regex(r"\\[\\']") -- -Binding = NamedTuple("Binding", [("key", Optional[Text]), - ("value", Optional[Text]), - ("original", Text)]) ++ +try: -+ from typing import NamedTuple, Optional, Text -+ Binding = NamedTuple("Binding", [("key", Optional[Text]), -+ ("value", Optional[Text]), -+ ("original", Text)]) -+except ImportError: ++ # this is necessary because we only import these from typing ++ # when we are type checking, and the linter is upset if we ++ # re-import ++ import typing ++ Binding = typing.NamedTuple("Binding", [("key", typing.Optional[typing.Text]), ++ ("value", typing.Optional[typing.Text]), ++ ("original", typing.Text)]) ++except ImportError: # pragma: no cover + from collections import namedtuple -+ Binding = namedtuple("Binding", ["key", "value", "original"]) ++ Binding = namedtuple("Binding", ["key", # type: ignore ++ "value", ++ "original"]) # type: Tuple[Optional[Text], Optional[Text], Text] - def decode_escapes(string): + class Error(Exception): diff --git a/tasks/vendoring/patches/vendor/pip_shims_module_names.patch b/tasks/vendoring/patches/vendor/pip_shims_module_names.patch index 8658dcaaf0..dfddacca6a 100644 --- a/tasks/vendoring/patches/vendor/pip_shims_module_names.patch +++ b/tasks/vendoring/patches/vendor/pip_shims_module_names.patch @@ -1,17 +1,20 @@ diff --git a/pipenv/vendor/pip_shims/__init__.py b/pipenv/vendor/pip_shims/__init__.py -index 1342f793..70fb0d58 100644 +index 2af4166e..598b9ad8 100644 --- a/pipenv/vendor/pip_shims/__init__.py +++ b/pipenv/vendor/pip_shims/__init__.py -@@ -8,10 +8,10 @@ __version__ = '0.3.1' - from . import shims - - --old_module = sys.modules["pip_shims"] -+old_module = sys.modules[__name__] +@@ -11,10 +11,13 @@ __version__ = "0.4.1.dev0" + if "pip_shims" in sys.modules: + # mainly to keep a reference to the old module on hand so it doesn't get + # weakref'd away +- old_module = sys.modules["pip_shims"] ++ if __name__ != "pip_shims": ++ del sys.modules["pip_shims"] ++if __name__ in sys.modules: ++ old_module = sys.modules[__name__] -module = sys.modules["pip_shims"] = shims._new() -+module = sys.modules[__name__] = shims._new() ++module = sys.modules[__name__] = sys.modules["pip_shims"] = shims._new() module.shims = shims - module.__dict__.update({ - '__file__': __file__, + module.__dict__.update( + { diff --git a/tasks/vendoring/patches/vendor/tomlkit-fix.patch b/tasks/vendoring/patches/vendor/tomlkit-fix.patch index 4aa6c16f0c..26b761ccbe 100644 --- a/tasks/vendoring/patches/vendor/tomlkit-fix.patch +++ b/tasks/vendoring/patches/vendor/tomlkit-fix.patch @@ -30,14 +30,6 @@ index cb8af1d5..9b5db5cb 100644 from ._compat import decode from .exceptions import KeyAlreadyPresent from .exceptions import NonExistentKey -@@ -17,6 +9,7 @@ from .items import Item - from .items import Key - from .items import Null - from .items import Table -+from .items import Trivia - from .items import Whitespace - from .items import item as _item - @@ -221,7 +214,12 @@ class Container(dict): for i in idx: self._body[i] = (None, Null()) @@ -80,8 +72,8 @@ index 375b5f02..cccfd4a1 100644 - from ._compat import PY2 - from ._compat import decode -@@ -22,9 +14,12 @@ from ._compat import unicode + from ._compat import PY38 +@@ -23,9 +14,12 @@ from ._compat import unicode from ._utils import escape_string if PY2: @@ -94,7 +86,7 @@ index 375b5f02..cccfd4a1 100644 def item(value, _parent=None): -@@ -40,7 +35,10 @@ def item(value, _parent=None): +@@ -41,7 +35,10 @@ def item(value, _parent=None): elif isinstance(value, float): return Float(value, Trivia(), str(value)) elif isinstance(value, dict): From b5892c1d03abfeca27c1df40c3c9e46bc1a1fbef Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Wed, 4 Mar 2020 16:37:00 -0500 Subject: [PATCH 02/49] Add missing dependencies, fix patched pip path - Add missing dependencies: - `zipp==0.6.0` - `more-itertools==5.0.0` - `importlib-metadata==1.3.0` - `funcsigs==1.0.2` - `contextlib2==0.6.0.post1` - Fix patched pip import paths for CLI and resolver - Update patches Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- .../notpip/_internal/commands/__init__.py | 30 +- .../notpip/_internal/legacy_resolve.py | 11 - pipenv/patched/piptools/repositories/pypi.py | 8 +- pipenv/patched/piptools/utils.py | 4 +- pipenv/vendor/contextlib2.LICENSE.txt | 122 + pipenv/vendor/contextlib2.py | 518 ++++ pipenv/vendor/funcsigs/__init__.py | 829 ++++++ pipenv/vendor/funcsigs/version.py | 1 + pipenv/vendor/importlib_metadata/LICENSE | 13 + pipenv/vendor/importlib_metadata/__init__.py | 554 ++++ pipenv/vendor/importlib_metadata/_compat.py | 134 + .../importlib_metadata/docs/__init__.py | 0 .../importlib_metadata/docs/changelog.rst | 237 ++ pipenv/vendor/importlib_metadata/docs/conf.py | 182 ++ .../vendor/importlib_metadata/docs/index.rst | 54 + .../vendor/importlib_metadata/docs/using.rst | 259 ++ .../importlib_metadata/tests/__init__.py | 0 .../importlib_metadata/tests/data/__init__.py | 0 .../tests/data/example-21.12-py3-none-any.whl | Bin 0 -> 1455 bytes .../importlib_metadata/tests/fixtures.py | 200 ++ .../importlib_metadata/tests/test_api.py | 176 ++ .../tests/test_integration.py | 22 + .../importlib_metadata/tests/test_main.py | 224 ++ .../importlib_metadata/tests/test_zip.py | 70 + pipenv/vendor/more_itertools/LICENSE | 19 + pipenv/vendor/more_itertools/__init__.py | 2 + pipenv/vendor/more_itertools/more.py | 2333 +++++++++++++++++ pipenv/vendor/more_itertools/recipes.py | 577 ++++ .../vendor/more_itertools/tests/__init__.py | 0 .../vendor/more_itertools/tests/test_more.py | 2313 ++++++++++++++++ .../more_itertools/tests/test_recipes.py | 616 +++++ .../requirementslib/models/dependencies.py | 105 +- .../vendor/requirementslib/models/markers.py | 114 +- .../requirementslib/models/requirements.py | 84 +- .../requirementslib/models/setup_info.py | 82 +- pipenv/vendor/requirementslib/models/utils.py | 17 +- pipenv/vendor/vendor.txt | 5 + pipenv/vendor/zipp.LICENSE | 7 + pipenv/vendor/zipp.py | 220 ++ tasks/vendoring/patches/patched/pip19.patch | 105 +- .../vendoring/patches/patched/piptools.patch | 39 +- 41 files changed, 10046 insertions(+), 240 deletions(-) create mode 100644 pipenv/vendor/contextlib2.LICENSE.txt create mode 100644 pipenv/vendor/contextlib2.py create mode 100644 pipenv/vendor/funcsigs/__init__.py create mode 100644 pipenv/vendor/funcsigs/version.py create mode 100644 pipenv/vendor/importlib_metadata/LICENSE create mode 100644 pipenv/vendor/importlib_metadata/__init__.py create mode 100644 pipenv/vendor/importlib_metadata/_compat.py create mode 100644 pipenv/vendor/importlib_metadata/docs/__init__.py create mode 100644 pipenv/vendor/importlib_metadata/docs/changelog.rst create mode 100644 pipenv/vendor/importlib_metadata/docs/conf.py create mode 100644 pipenv/vendor/importlib_metadata/docs/index.rst create mode 100644 pipenv/vendor/importlib_metadata/docs/using.rst create mode 100644 pipenv/vendor/importlib_metadata/tests/__init__.py create mode 100644 pipenv/vendor/importlib_metadata/tests/data/__init__.py create mode 100644 pipenv/vendor/importlib_metadata/tests/data/example-21.12-py3-none-any.whl create mode 100644 pipenv/vendor/importlib_metadata/tests/fixtures.py create mode 100644 pipenv/vendor/importlib_metadata/tests/test_api.py create mode 100644 pipenv/vendor/importlib_metadata/tests/test_integration.py create mode 100644 pipenv/vendor/importlib_metadata/tests/test_main.py create mode 100644 pipenv/vendor/importlib_metadata/tests/test_zip.py create mode 100644 pipenv/vendor/more_itertools/LICENSE create mode 100644 pipenv/vendor/more_itertools/__init__.py create mode 100644 pipenv/vendor/more_itertools/more.py create mode 100644 pipenv/vendor/more_itertools/recipes.py create mode 100644 pipenv/vendor/more_itertools/tests/__init__.py create mode 100644 pipenv/vendor/more_itertools/tests/test_more.py create mode 100644 pipenv/vendor/more_itertools/tests/test_recipes.py create mode 100644 pipenv/vendor/zipp.LICENSE create mode 100644 pipenv/vendor/zipp.py diff --git a/pipenv/patched/notpip/_internal/commands/__init__.py b/pipenv/patched/notpip/_internal/commands/__init__.py index abcafa5502..ca155a9459 100644 --- a/pipenv/patched/notpip/_internal/commands/__init__.py +++ b/pipenv/patched/notpip/_internal/commands/__init__.py @@ -21,7 +21,7 @@ # The ordering matters for help display. # Also, even though the module path starts with the same -# "pip._internal.commands" prefix in each case, we include the full path +# "pipenv.patched.notpip._internal.commands" prefix in each case, we include the full path # because it makes testing easier (specifically when modifying commands_dict # in test setup / teardown by adding info for a FakeCommand class defined # in a test-related module). @@ -29,59 +29,59 @@ # so that the ordering won't be lost when using Python 2.7. commands_dict = OrderedDict([ ('install', CommandInfo( - 'pip._internal.commands.install', 'InstallCommand', + 'pipenv.patched.notpip._internal.commands.install', 'InstallCommand', 'Install packages.', )), ('download', CommandInfo( - 'pip._internal.commands.download', 'DownloadCommand', + 'pipenv.patched.notpip._internal.commands.download', 'DownloadCommand', 'Download packages.', )), ('uninstall', CommandInfo( - 'pip._internal.commands.uninstall', 'UninstallCommand', + 'pipenv.patched.notpip._internal.commands.uninstall', 'UninstallCommand', 'Uninstall packages.', )), ('freeze', CommandInfo( - 'pip._internal.commands.freeze', 'FreezeCommand', + 'pipenv.patched.notpip._internal.commands.freeze', 'FreezeCommand', 'Output installed packages in requirements format.', )), ('list', CommandInfo( - 'pip._internal.commands.list', 'ListCommand', + 'pipenv.patched.notpip._internal.commands.list', 'ListCommand', 'List installed packages.', )), ('show', CommandInfo( - 'pip._internal.commands.show', 'ShowCommand', + 'pipenv.patched.notpip._internal.commands.show', 'ShowCommand', 'Show information about installed packages.', )), ('check', CommandInfo( - 'pip._internal.commands.check', 'CheckCommand', + 'pipenv.patched.notpip._internal.commands.check', 'CheckCommand', 'Verify installed packages have compatible dependencies.', )), ('config', CommandInfo( - 'pip._internal.commands.configuration', 'ConfigurationCommand', + 'pipenv.patched.notpip._internal.commands.configuration', 'ConfigurationCommand', 'Manage local and global configuration.', )), ('search', CommandInfo( - 'pip._internal.commands.search', 'SearchCommand', + 'pipenv.patched.notpip._internal.commands.search', 'SearchCommand', 'Search PyPI for packages.', )), ('wheel', CommandInfo( - 'pip._internal.commands.wheel', 'WheelCommand', + 'pipenv.patched.notpip._internal.commands.wheel', 'WheelCommand', 'Build wheels from your requirements.', )), ('hash', CommandInfo( - 'pip._internal.commands.hash', 'HashCommand', + 'pipenv.patched.notpip._internal.commands.hash', 'HashCommand', 'Compute hashes of package archives.', )), ('completion', CommandInfo( - 'pip._internal.commands.completion', 'CompletionCommand', + 'pipenv.patched.notpip._internal.commands.completion', 'CompletionCommand', 'A helper command used for command completion.', )), ('debug', CommandInfo( - 'pip._internal.commands.debug', 'DebugCommand', + 'pipenv.patched.notpip._internal.commands.debug', 'DebugCommand', 'Show information useful for debugging.', )), ('help', CommandInfo( - 'pip._internal.commands.help', 'HelpCommand', + 'pipenv.patched.notpip._internal.commands.help', 'HelpCommand', 'Show help for commands.', )), ]) # type: OrderedDict[str, CommandInfo] diff --git a/pipenv/patched/notpip/_internal/legacy_resolve.py b/pipenv/patched/notpip/_internal/legacy_resolve.py index 9fc1ae1efb..674efd09c8 100644 --- a/pipenv/patched/notpip/_internal/legacy_resolve.py +++ b/pipenv/patched/notpip/_internal/legacy_resolve.py @@ -443,17 +443,6 @@ def add_req(subreq, extras_requested): for subreq in dist.requires(available_requested): add_req(subreq, extras_requested=available_requested) - # Hack for deep-resolving extras. - for available in available_requested: - if hasattr(dist, '_DistInfoDistribution__dep_map'): - for req in dist._DistInfoDistribution__dep_map[available]: - req = self._make_install_req( - req, - req_to_install - ) - - more_reqs.append(req) - if not req_to_install.editable and not req_to_install.satisfied_by: # XXX: --no-install leads this to report 'Successfully # downloaded' for only non-editable reqs, even though we took diff --git a/pipenv/patched/piptools/repositories/pypi.py b/pipenv/patched/piptools/repositories/pypi.py index 05a266c471..a0ea5647f7 100644 --- a/pipenv/patched/piptools/repositories/pypi.py +++ b/pipenv/patched/piptools/repositories/pypi.py @@ -5,6 +5,7 @@ import copy import hashlib import os +import sys from contextlib import contextmanager from functools import partial from shutil import rmtree @@ -89,7 +90,7 @@ def get_hash(self, location): def _get_file_hash(self, location): h = hashlib.new(FAVORITE_HASH) - with open_local_or_remote_file(location, self.session) as fp: + with open_local_or_remote_file(location, self.session) as (fp, size): for chunk in iter(lambda: fp.read(8096), b""): h.update(chunk) return ":".join([FAVORITE_HASH, h.hexdigest()]) @@ -322,12 +323,13 @@ def resolve_reqs(self, download_dir, ireq, wheel_cache): if PIP_VERSION < (19, 3): resolver_kwargs.update(**make_install_req_kwargs) else: - from pipenv.patched.notpip._internal.req.constructors import install_req_from_req_string + from pipenv.vendor.pip_shims.shims import install_req_from_req_string make_install_req = partial( install_req_from_req_string, **make_install_req_kwargs ) resolver_kwargs["make_install_req"] = make_install_req + del resolver_kwargs["use_pep517"] if PIP_VERSION >= (20,): preparer_kwargs["session"] = self.session @@ -359,7 +361,7 @@ def resolve_reqs(self, download_dir, ireq, wheel_cache): results = set(results) if results else set() - return set(results) + return results, ireq def get_legacy_dependencies(self, ireq): """ diff --git a/pipenv/patched/piptools/utils.py b/pipenv/patched/piptools/utils.py index 688158348b..6bd01c0bb5 100644 --- a/pipenv/patched/piptools/utils.py +++ b/pipenv/patched/piptools/utils.py @@ -79,8 +79,8 @@ def clean_requires_python(candidates): if getattr(c, "requires_python", None): # Old specifications had people setting this to single digits # which is effectively the same as '>=digit,<digit+1' - if c.requires_python.isdigit(): - c.requires_python = '>={0},<{1}'.format(c.requires_python, int(c.requires_python) + 1) + if len(c.requires_python) == 1 and c.requires_python in ("2", "3"): + c.requires_python = '>={0},<{1!s}'.format(c.requires_python, int(c.requires_python) + 1) try: specifierset = SpecifierSet(c.requires_python) except InvalidSpecifier: diff --git a/pipenv/vendor/contextlib2.LICENSE.txt b/pipenv/vendor/contextlib2.LICENSE.txt new file mode 100644 index 0000000000..5de20277df --- /dev/null +++ b/pipenv/vendor/contextlib2.LICENSE.txt @@ -0,0 +1,122 @@ + + +A. HISTORY OF THE SOFTWARE +========================== + +contextlib2 is a derivative of the contextlib module distributed by the PSF +as part of the Python standard library. According, it is itself redistributed +under the PSF license (reproduced in full below). As the contextlib module +was added only in Python 2.5, the licenses for earlier Python versions are +not applicable and have not been included. + +Python was created in the early 1990s by Guido van Rossum at Stichting +Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands +as a successor of a language called ABC. Guido remains Python's +principal author, although it includes many contributions from others. + +In 1995, Guido continued his work on Python at the Corporation for +National Research Initiatives (CNRI, see http://www.cnri.reston.va.us) +in Reston, Virginia where he released several versions of the +software. + +In May 2000, Guido and the Python core development team moved to +BeOpen.com to form the BeOpen PythonLabs team. In October of the same +year, the PythonLabs team moved to Digital Creations (now Zope +Corporation, see http://www.zope.com). In 2001, the Python Software +Foundation (PSF, see http://www.python.org/psf/) was formed, a +non-profit organization created specifically to own Python-related +Intellectual Property. Zope Corporation is a sponsoring member of +the PSF. + +All Python releases are Open Source (see http://www.opensource.org for +the Open Source Definition). Historically, most, but not all, Python +releases have also been GPL-compatible; the table below summarizes +the various releases that included the contextlib module. + + Release Derived Year Owner GPL- + from compatible? (1) + + 2.5 2.4 2006 PSF yes + 2.5.1 2.5 2007 PSF yes + 2.5.2 2.5.1 2008 PSF yes + 2.5.3 2.5.2 2008 PSF yes + 2.6 2.5 2008 PSF yes + 2.6.1 2.6 2008 PSF yes + 2.6.2 2.6.1 2009 PSF yes + 2.6.3 2.6.2 2009 PSF yes + 2.6.4 2.6.3 2009 PSF yes + 2.6.5 2.6.4 2010 PSF yes + 3.0 2.6 2008 PSF yes + 3.0.1 3.0 2009 PSF yes + 3.1 3.0.1 2009 PSF yes + 3.1.1 3.1 2009 PSF yes + 3.1.2 3.1.1 2010 PSF yes + 3.1.3 3.1.2 2010 PSF yes + 3.1.4 3.1.3 2011 PSF yes + 3.2 3.1 2011 PSF yes + 3.2.1 3.2 2011 PSF yes + 3.2.2 3.2.1 2011 PSF yes + 3.3 3.2 2012 PSF yes + +Footnotes: + +(1) GPL-compatible doesn't mean that we're distributing Python under + the GPL. All Python licenses, unlike the GPL, let you distribute + a modified version without making your changes open source. The + GPL-compatible licenses make it possible to combine Python with + other software that is released under the GPL; the others don't. + +Thanks to the many outside volunteers who have worked under Guido's +direction to make these releases possible. + + +B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON +=============================================================== + +PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +-------------------------------------------- + +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby +grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +analyze, test, perform and/or display publicly, prepare derivative works, +distribute, and otherwise use Python alone or in any derivative version, +provided, however, that PSF's License Agreement and PSF's notice of copyright, +i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +2011 Python Software Foundation; All Rights Reserved" are retained in Python +alone or in any derivative version prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. diff --git a/pipenv/vendor/contextlib2.py b/pipenv/vendor/contextlib2.py new file mode 100644 index 0000000000..3aae8f4117 --- /dev/null +++ b/pipenv/vendor/contextlib2.py @@ -0,0 +1,518 @@ +"""contextlib2 - backports and enhancements to the contextlib module""" + +import abc +import sys +import warnings +from collections import deque +from functools import wraps + +__all__ = ["contextmanager", "closing", "nullcontext", + "AbstractContextManager", + "ContextDecorator", "ExitStack", + "redirect_stdout", "redirect_stderr", "suppress"] + +# Backwards compatibility +__all__ += ["ContextStack"] + + +# Backport abc.ABC +if sys.version_info[:2] >= (3, 4): + _abc_ABC = abc.ABC +else: + _abc_ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()}) + + +# Backport classic class MRO +def _classic_mro(C, result): + if C in result: + return + result.append(C) + for B in C.__bases__: + _classic_mro(B, result) + return result + + +# Backport _collections_abc._check_methods +def _check_methods(C, *methods): + try: + mro = C.__mro__ + except AttributeError: + mro = tuple(_classic_mro(C, [])) + + for method in methods: + for B in mro: + if method in B.__dict__: + if B.__dict__[method] is None: + return NotImplemented + break + else: + return NotImplemented + return True + + +class AbstractContextManager(_abc_ABC): + """An abstract base class for context managers.""" + + def __enter__(self): + """Return `self` upon entering the runtime context.""" + return self + + @abc.abstractmethod + def __exit__(self, exc_type, exc_value, traceback): + """Raise any exception triggered within the runtime context.""" + return None + + @classmethod + def __subclasshook__(cls, C): + """Check whether subclass is considered a subclass of this ABC.""" + if cls is AbstractContextManager: + return _check_methods(C, "__enter__", "__exit__") + return NotImplemented + + +class ContextDecorator(object): + """A base class or mixin that enables context managers to work as decorators.""" + + def refresh_cm(self): + """Returns the context manager used to actually wrap the call to the + decorated function. + + The default implementation just returns *self*. + + Overriding this method allows otherwise one-shot context managers + like _GeneratorContextManager to support use as decorators via + implicit recreation. + + DEPRECATED: refresh_cm was never added to the standard library's + ContextDecorator API + """ + warnings.warn("refresh_cm was never added to the standard library", + DeprecationWarning) + return self._recreate_cm() + + def _recreate_cm(self): + """Return a recreated instance of self. + + Allows an otherwise one-shot context manager like + _GeneratorContextManager to support use as + a decorator via implicit recreation. + + This is a private interface just for _GeneratorContextManager. + See issue #11647 for details. + """ + return self + + def __call__(self, func): + @wraps(func) + def inner(*args, **kwds): + with self._recreate_cm(): + return func(*args, **kwds) + return inner + + +class _GeneratorContextManager(ContextDecorator): + """Helper for @contextmanager decorator.""" + + def __init__(self, func, args, kwds): + self.gen = func(*args, **kwds) + self.func, self.args, self.kwds = func, args, kwds + # Issue 19330: ensure context manager instances have good docstrings + doc = getattr(func, "__doc__", None) + if doc is None: + doc = type(self).__doc__ + self.__doc__ = doc + # Unfortunately, this still doesn't provide good help output when + # inspecting the created context manager instances, since pydoc + # currently bypasses the instance docstring and shows the docstring + # for the class instead. + # See http://bugs.python.org/issue19404 for more details. + + def _recreate_cm(self): + # _GCM instances are one-shot context managers, so the + # CM must be recreated each time a decorated function is + # called + return self.__class__(self.func, self.args, self.kwds) + + def __enter__(self): + try: + return next(self.gen) + except StopIteration: + raise RuntimeError("generator didn't yield") + + def __exit__(self, type, value, traceback): + if type is None: + try: + next(self.gen) + except StopIteration: + return + else: + raise RuntimeError("generator didn't stop") + else: + if value is None: + # Need to force instantiation so we can reliably + # tell if we get the same exception back + value = type() + try: + self.gen.throw(type, value, traceback) + raise RuntimeError("generator didn't stop after throw()") + except StopIteration as exc: + # Suppress StopIteration *unless* it's the same exception that + # was passed to throw(). This prevents a StopIteration + # raised inside the "with" statement from being suppressed. + return exc is not value + except RuntimeError as exc: + # Don't re-raise the passed in exception + if exc is value: + return False + # Likewise, avoid suppressing if a StopIteration exception + # was passed to throw() and later wrapped into a RuntimeError + # (see PEP 479). + if _HAVE_EXCEPTION_CHAINING and exc.__cause__ is value: + return False + raise + except: + # only re-raise if it's *not* the exception that was + # passed to throw(), because __exit__() must not raise + # an exception unless __exit__() itself failed. But throw() + # has to raise the exception to signal propagation, so this + # fixes the impedance mismatch between the throw() protocol + # and the __exit__() protocol. + # + if sys.exc_info()[1] is not value: + raise + + +def contextmanager(func): + """@contextmanager decorator. + + Typical usage: + + @contextmanager + def some_generator(<arguments>): + <setup> + try: + yield <value> + finally: + <cleanup> + + This makes this: + + with some_generator(<arguments>) as <variable>: + <body> + + equivalent to this: + + <setup> + try: + <variable> = <value> + <body> + finally: + <cleanup> + + """ + @wraps(func) + def helper(*args, **kwds): + return _GeneratorContextManager(func, args, kwds) + return helper + + +class closing(object): + """Context to automatically close something at the end of a block. + + Code like this: + + with closing(<module>.open(<arguments>)) as f: + <block> + + is equivalent to this: + + f = <module>.open(<arguments>) + try: + <block> + finally: + f.close() + + """ + def __init__(self, thing): + self.thing = thing + + def __enter__(self): + return self.thing + + def __exit__(self, *exc_info): + self.thing.close() + + +class _RedirectStream(object): + + _stream = None + + def __init__(self, new_target): + self._new_target = new_target + # We use a list of old targets to make this CM re-entrant + self._old_targets = [] + + def __enter__(self): + self._old_targets.append(getattr(sys, self._stream)) + setattr(sys, self._stream, self._new_target) + return self._new_target + + def __exit__(self, exctype, excinst, exctb): + setattr(sys, self._stream, self._old_targets.pop()) + + +class redirect_stdout(_RedirectStream): + """Context manager for temporarily redirecting stdout to another file. + + # How to send help() to stderr + with redirect_stdout(sys.stderr): + help(dir) + + # How to write help() to a file + with open('help.txt', 'w') as f: + with redirect_stdout(f): + help(pow) + """ + + _stream = "stdout" + + +class redirect_stderr(_RedirectStream): + """Context manager for temporarily redirecting stderr to another file.""" + + _stream = "stderr" + + +class suppress(object): + """Context manager to suppress specified exceptions + + After the exception is suppressed, execution proceeds with the next + statement following the with statement. + + with suppress(FileNotFoundError): + os.remove(somefile) + # Execution still resumes here if the file was already removed + """ + + def __init__(self, *exceptions): + self._exceptions = exceptions + + def __enter__(self): + pass + + def __exit__(self, exctype, excinst, exctb): + # Unlike isinstance and issubclass, CPython exception handling + # currently only looks at the concrete type hierarchy (ignoring + # the instance and subclass checking hooks). While Guido considers + # that a bug rather than a feature, it's a fairly hard one to fix + # due to various internal implementation details. suppress provides + # the simpler issubclass based semantics, rather than trying to + # exactly reproduce the limitations of the CPython interpreter. + # + # See http://bugs.python.org/issue12029 for more details + return exctype is not None and issubclass(exctype, self._exceptions) + + +# Context manipulation is Python 3 only +_HAVE_EXCEPTION_CHAINING = sys.version_info[0] >= 3 +if _HAVE_EXCEPTION_CHAINING: + def _make_context_fixer(frame_exc): + def _fix_exception_context(new_exc, old_exc): + # Context may not be correct, so find the end of the chain + while 1: + exc_context = new_exc.__context__ + if exc_context is old_exc: + # Context is already set correctly (see issue 20317) + return + if exc_context is None or exc_context is frame_exc: + break + new_exc = exc_context + # Change the end of the chain to point to the exception + # we expect it to reference + new_exc.__context__ = old_exc + return _fix_exception_context + + def _reraise_with_existing_context(exc_details): + try: + # bare "raise exc_details[1]" replaces our carefully + # set-up context + fixed_ctx = exc_details[1].__context__ + raise exc_details[1] + except BaseException: + exc_details[1].__context__ = fixed_ctx + raise +else: + # No exception context in Python 2 + def _make_context_fixer(frame_exc): + return lambda new_exc, old_exc: None + + # Use 3 argument raise in Python 2, + # but use exec to avoid SyntaxError in Python 3 + def _reraise_with_existing_context(exc_details): + exc_type, exc_value, exc_tb = exc_details + exec("raise exc_type, exc_value, exc_tb") + +# Handle old-style classes if they exist +try: + from types import InstanceType +except ImportError: + # Python 3 doesn't have old-style classes + _get_type = type +else: + # Need to handle old-style context managers on Python 2 + def _get_type(obj): + obj_type = type(obj) + if obj_type is InstanceType: + return obj.__class__ # Old-style class + return obj_type # New-style class + + +# Inspired by discussions on http://bugs.python.org/issue13585 +class ExitStack(object): + """Context manager for dynamic management of a stack of exit callbacks + + For example: + + with ExitStack() as stack: + files = [stack.enter_context(open(fname)) for fname in filenames] + # All opened files will automatically be closed at the end of + # the with statement, even if attempts to open files later + # in the list raise an exception + + """ + def __init__(self): + self._exit_callbacks = deque() + + def pop_all(self): + """Preserve the context stack by transferring it to a new instance""" + new_stack = type(self)() + new_stack._exit_callbacks = self._exit_callbacks + self._exit_callbacks = deque() + return new_stack + + def _push_cm_exit(self, cm, cm_exit): + """Helper to correctly register callbacks to __exit__ methods""" + def _exit_wrapper(*exc_details): + return cm_exit(cm, *exc_details) + _exit_wrapper.__self__ = cm + self.push(_exit_wrapper) + + def push(self, exit): + """Registers a callback with the standard __exit__ method signature + + Can suppress exceptions the same way __exit__ methods can. + + Also accepts any object with an __exit__ method (registering a call + to the method instead of the object itself) + """ + # We use an unbound method rather than a bound method to follow + # the standard lookup behaviour for special methods + _cb_type = _get_type(exit) + try: + exit_method = _cb_type.__exit__ + except AttributeError: + # Not a context manager, so assume its a callable + self._exit_callbacks.append(exit) + else: + self._push_cm_exit(exit, exit_method) + return exit # Allow use as a decorator + + def callback(self, callback, *args, **kwds): + """Registers an arbitrary callback and arguments. + + Cannot suppress exceptions. + """ + def _exit_wrapper(exc_type, exc, tb): + callback(*args, **kwds) + # We changed the signature, so using @wraps is not appropriate, but + # setting __wrapped__ may still help with introspection + _exit_wrapper.__wrapped__ = callback + self.push(_exit_wrapper) + return callback # Allow use as a decorator + + def enter_context(self, cm): + """Enters the supplied context manager + + If successful, also pushes its __exit__ method as a callback and + returns the result of the __enter__ method. + """ + # We look up the special methods on the type to match the with statement + _cm_type = _get_type(cm) + _exit = _cm_type.__exit__ + result = _cm_type.__enter__(cm) + self._push_cm_exit(cm, _exit) + return result + + def close(self): + """Immediately unwind the context stack""" + self.__exit__(None, None, None) + + def __enter__(self): + return self + + def __exit__(self, *exc_details): + received_exc = exc_details[0] is not None + + # We manipulate the exception state so it behaves as though + # we were actually nesting multiple with statements + frame_exc = sys.exc_info()[1] + _fix_exception_context = _make_context_fixer(frame_exc) + + # Callbacks are invoked in LIFO order to match the behaviour of + # nested context managers + suppressed_exc = False + pending_raise = False + while self._exit_callbacks: + cb = self._exit_callbacks.pop() + try: + if cb(*exc_details): + suppressed_exc = True + pending_raise = False + exc_details = (None, None, None) + except: + new_exc_details = sys.exc_info() + # simulate the stack of exceptions by setting the context + _fix_exception_context(new_exc_details[1], exc_details[1]) + pending_raise = True + exc_details = new_exc_details + if pending_raise: + _reraise_with_existing_context(exc_details) + return received_exc and suppressed_exc + + +# Preserve backwards compatibility +class ContextStack(ExitStack): + """Backwards compatibility alias for ExitStack""" + + def __init__(self): + warnings.warn("ContextStack has been renamed to ExitStack", + DeprecationWarning) + super(ContextStack, self).__init__() + + def register_exit(self, callback): + return self.push(callback) + + def register(self, callback, *args, **kwds): + return self.callback(callback, *args, **kwds) + + def preserve(self): + return self.pop_all() + + +class nullcontext(AbstractContextManager): + """Context manager that does no additional processing. + Used as a stand-in for a normal context manager, when a particular + block of code is only sometimes used with a normal context manager: + cm = optional_cm if condition else nullcontext() + with cm: + # Perform operation, using optional_cm if condition is True + """ + + def __init__(self, enter_result=None): + self.enter_result = enter_result + + def __enter__(self): + return self.enter_result + + def __exit__(self, *excinfo): + pass diff --git a/pipenv/vendor/funcsigs/__init__.py b/pipenv/vendor/funcsigs/__init__.py new file mode 100644 index 0000000000..5f5378b42a --- /dev/null +++ b/pipenv/vendor/funcsigs/__init__.py @@ -0,0 +1,829 @@ +# Copyright 2001-2013 Python Software Foundation; All Rights Reserved +"""Function signature objects for callables + +Back port of Python 3.3's function signature tools from the inspect module, +modified to be compatible with Python 2.6, 2.7 and 3.3+. +""" +from __future__ import absolute_import, division, print_function +import itertools +import functools +import re +import types + +try: + from collections import OrderedDict +except ImportError: + from ordereddict import OrderedDict + +from funcsigs.version import __version__ + +__all__ = ['BoundArguments', 'Parameter', 'Signature', 'signature'] + + +_WrapperDescriptor = type(type.__call__) +_MethodWrapper = type(all.__call__) + +_NonUserDefinedCallables = (_WrapperDescriptor, + _MethodWrapper, + types.BuiltinFunctionType) + + +def formatannotation(annotation, base_module=None): + if isinstance(annotation, type): + if annotation.__module__ in ('builtins', '__builtin__', base_module): + return annotation.__name__ + return annotation.__module__+'.'+annotation.__name__ + return repr(annotation) + + +def _get_user_defined_method(cls, method_name, *nested): + try: + if cls is type: + return + meth = getattr(cls, method_name) + for name in nested: + meth = getattr(meth, name, meth) + except AttributeError: + return + else: + if not isinstance(meth, _NonUserDefinedCallables): + # Once '__signature__' will be added to 'C'-level + # callables, this check won't be necessary + return meth + + +def signature(obj): + '''Get a signature object for the passed callable.''' + + if not callable(obj): + raise TypeError('{0!r} is not a callable object'.format(obj)) + + if isinstance(obj, types.MethodType): + sig = signature(obj.__func__) + if obj.__self__ is None: + # Unbound method - preserve as-is. + return sig + else: + # Bound method. Eat self - if we can. + params = tuple(sig.parameters.values()) + + if not params or params[0].kind in (_VAR_KEYWORD, _KEYWORD_ONLY): + raise ValueError('invalid method signature') + + kind = params[0].kind + if kind in (_POSITIONAL_OR_KEYWORD, _POSITIONAL_ONLY): + # Drop first parameter: + # '(p1, p2[, ...])' -> '(p2[, ...])' + params = params[1:] + else: + if kind is not _VAR_POSITIONAL: + # Unless we add a new parameter type we never + # get here + raise ValueError('invalid argument type') + # It's a var-positional parameter. + # Do nothing. '(*args[, ...])' -> '(*args[, ...])' + + return sig.replace(parameters=params) + + try: + sig = obj.__signature__ + except AttributeError: + pass + else: + if sig is not None: + return sig + + try: + # Was this function wrapped by a decorator? + wrapped = obj.__wrapped__ + except AttributeError: + pass + else: + return signature(wrapped) + + if isinstance(obj, types.FunctionType): + return Signature.from_function(obj) + + if isinstance(obj, functools.partial): + sig = signature(obj.func) + + new_params = OrderedDict(sig.parameters.items()) + + partial_args = obj.args or () + partial_keywords = obj.keywords or {} + try: + ba = sig.bind_partial(*partial_args, **partial_keywords) + except TypeError as ex: + msg = 'partial object {0!r} has incorrect arguments'.format(obj) + raise ValueError(msg) + + for arg_name, arg_value in ba.arguments.items(): + param = new_params[arg_name] + if arg_name in partial_keywords: + # We set a new default value, because the following code + # is correct: + # + # >>> def foo(a): print(a) + # >>> print(partial(partial(foo, a=10), a=20)()) + # 20 + # >>> print(partial(partial(foo, a=10), a=20)(a=30)) + # 30 + # + # So, with 'partial' objects, passing a keyword argument is + # like setting a new default value for the corresponding + # parameter + # + # We also mark this parameter with '_partial_kwarg' + # flag. Later, in '_bind', the 'default' value of this + # parameter will be added to 'kwargs', to simulate + # the 'functools.partial' real call. + new_params[arg_name] = param.replace(default=arg_value, + _partial_kwarg=True) + + elif (param.kind not in (_VAR_KEYWORD, _VAR_POSITIONAL) and + not param._partial_kwarg): + new_params.pop(arg_name) + + return sig.replace(parameters=new_params.values()) + + sig = None + if isinstance(obj, type): + # obj is a class or a metaclass + + # First, let's see if it has an overloaded __call__ defined + # in its metaclass + call = _get_user_defined_method(type(obj), '__call__') + if call is not None: + sig = signature(call) + else: + # Now we check if the 'obj' class has a '__new__' method + new = _get_user_defined_method(obj, '__new__') + if new is not None: + sig = signature(new) + else: + # Finally, we should have at least __init__ implemented + init = _get_user_defined_method(obj, '__init__') + if init is not None: + sig = signature(init) + elif not isinstance(obj, _NonUserDefinedCallables): + # An object with __call__ + # We also check that the 'obj' is not an instance of + # _WrapperDescriptor or _MethodWrapper to avoid + # infinite recursion (and even potential segfault) + call = _get_user_defined_method(type(obj), '__call__', 'im_func') + if call is not None: + sig = signature(call) + + if sig is not None: + # For classes and objects we skip the first parameter of their + # __call__, __new__, or __init__ methods + return sig.replace(parameters=tuple(sig.parameters.values())[1:]) + + if isinstance(obj, types.BuiltinFunctionType): + # Raise a nicer error message for builtins + msg = 'no signature found for builtin function {0!r}'.format(obj) + raise ValueError(msg) + + raise ValueError('callable {0!r} is not supported by signature'.format(obj)) + + +class _void(object): + '''A private marker - used in Parameter & Signature''' + + +class _empty(object): + pass + + +class _ParameterKind(int): + def __new__(self, *args, **kwargs): + obj = int.__new__(self, *args) + obj._name = kwargs['name'] + return obj + + def __str__(self): + return self._name + + def __repr__(self): + return '<_ParameterKind: {0!r}>'.format(self._name) + + +_POSITIONAL_ONLY = _ParameterKind(0, name='POSITIONAL_ONLY') +_POSITIONAL_OR_KEYWORD = _ParameterKind(1, name='POSITIONAL_OR_KEYWORD') +_VAR_POSITIONAL = _ParameterKind(2, name='VAR_POSITIONAL') +_KEYWORD_ONLY = _ParameterKind(3, name='KEYWORD_ONLY') +_VAR_KEYWORD = _ParameterKind(4, name='VAR_KEYWORD') + + +class Parameter(object): + '''Represents a parameter in a function signature. + + Has the following public attributes: + + * name : str + The name of the parameter as a string. + * default : object + The default value for the parameter if specified. If the + parameter has no default value, this attribute is not set. + * annotation + The annotation for the parameter if specified. If the + parameter has no annotation, this attribute is not set. + * kind : str + Describes how argument values are bound to the parameter. + Possible values: `Parameter.POSITIONAL_ONLY`, + `Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`, + `Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`. + ''' + + __slots__ = ('_name', '_kind', '_default', '_annotation', '_partial_kwarg') + + POSITIONAL_ONLY = _POSITIONAL_ONLY + POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD + VAR_POSITIONAL = _VAR_POSITIONAL + KEYWORD_ONLY = _KEYWORD_ONLY + VAR_KEYWORD = _VAR_KEYWORD + + empty = _empty + + def __init__(self, name, kind, default=_empty, annotation=_empty, + _partial_kwarg=False): + + if kind not in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD, + _VAR_POSITIONAL, _KEYWORD_ONLY, _VAR_KEYWORD): + raise ValueError("invalid value for 'Parameter.kind' attribute") + self._kind = kind + + if default is not _empty: + if kind in (_VAR_POSITIONAL, _VAR_KEYWORD): + msg = '{0} parameters cannot have default values'.format(kind) + raise ValueError(msg) + self._default = default + self._annotation = annotation + + if name is None: + if kind != _POSITIONAL_ONLY: + raise ValueError("None is not a valid name for a " + "non-positional-only parameter") + self._name = name + else: + name = str(name) + if kind != _POSITIONAL_ONLY and not re.match(r'[a-z_]\w*$', name, re.I): + msg = '{0!r} is not a valid parameter name'.format(name) + raise ValueError(msg) + self._name = name + + self._partial_kwarg = _partial_kwarg + + @property + def name(self): + return self._name + + @property + def default(self): + return self._default + + @property + def annotation(self): + return self._annotation + + @property + def kind(self): + return self._kind + + def replace(self, name=_void, kind=_void, annotation=_void, + default=_void, _partial_kwarg=_void): + '''Creates a customized copy of the Parameter.''' + + if name is _void: + name = self._name + + if kind is _void: + kind = self._kind + + if annotation is _void: + annotation = self._annotation + + if default is _void: + default = self._default + + if _partial_kwarg is _void: + _partial_kwarg = self._partial_kwarg + + return type(self)(name, kind, default=default, annotation=annotation, + _partial_kwarg=_partial_kwarg) + + def __str__(self): + kind = self.kind + + formatted = self._name + if kind == _POSITIONAL_ONLY: + if formatted is None: + formatted = '' + formatted = '<{0}>'.format(formatted) + + # Add annotation and default value + if self._annotation is not _empty: + formatted = '{0}:{1}'.format(formatted, + formatannotation(self._annotation)) + + if self._default is not _empty: + formatted = '{0}={1}'.format(formatted, repr(self._default)) + + if kind == _VAR_POSITIONAL: + formatted = '*' + formatted + elif kind == _VAR_KEYWORD: + formatted = '**' + formatted + + return formatted + + def __repr__(self): + return '<{0} at {1:#x} {2!r}>'.format(self.__class__.__name__, + id(self), self.name) + + def __hash__(self): + msg = "unhashable type: '{0}'".format(self.__class__.__name__) + raise TypeError(msg) + + def __eq__(self, other): + return (issubclass(other.__class__, Parameter) and + self._name == other._name and + self._kind == other._kind and + self._default == other._default and + self._annotation == other._annotation) + + def __ne__(self, other): + return not self.__eq__(other) + + +class BoundArguments(object): + '''Result of `Signature.bind` call. Holds the mapping of arguments + to the function's parameters. + + Has the following public attributes: + + * arguments : OrderedDict + An ordered mutable mapping of parameters' names to arguments' values. + Does not contain arguments' default values. + * signature : Signature + The Signature object that created this instance. + * args : tuple + Tuple of positional arguments values. + * kwargs : dict + Dict of keyword arguments values. + ''' + + def __init__(self, signature, arguments): + self.arguments = arguments + self._signature = signature + + @property + def signature(self): + return self._signature + + @property + def args(self): + args = [] + for param_name, param in self._signature.parameters.items(): + if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or + param._partial_kwarg): + # Keyword arguments mapped by 'functools.partial' + # (Parameter._partial_kwarg is True) are mapped + # in 'BoundArguments.kwargs', along with VAR_KEYWORD & + # KEYWORD_ONLY + break + + try: + arg = self.arguments[param_name] + except KeyError: + # We're done here. Other arguments + # will be mapped in 'BoundArguments.kwargs' + break + else: + if param.kind == _VAR_POSITIONAL: + # *args + args.extend(arg) + else: + # plain argument + args.append(arg) + + return tuple(args) + + @property + def kwargs(self): + kwargs = {} + kwargs_started = False + for param_name, param in self._signature.parameters.items(): + if not kwargs_started: + if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or + param._partial_kwarg): + kwargs_started = True + else: + if param_name not in self.arguments: + kwargs_started = True + continue + + if not kwargs_started: + continue + + try: + arg = self.arguments[param_name] + except KeyError: + pass + else: + if param.kind == _VAR_KEYWORD: + # **kwargs + kwargs.update(arg) + else: + # plain keyword argument + kwargs[param_name] = arg + + return kwargs + + def __hash__(self): + msg = "unhashable type: '{0}'".format(self.__class__.__name__) + raise TypeError(msg) + + def __eq__(self, other): + return (issubclass(other.__class__, BoundArguments) and + self.signature == other.signature and + self.arguments == other.arguments) + + def __ne__(self, other): + return not self.__eq__(other) + + +class Signature(object): + '''A Signature object represents the overall signature of a function. + It stores a Parameter object for each parameter accepted by the + function, as well as information specific to the function itself. + + A Signature object has the following public attributes and methods: + + * parameters : OrderedDict + An ordered mapping of parameters' names to the corresponding + Parameter objects (keyword-only arguments are in the same order + as listed in `code.co_varnames`). + * return_annotation : object + The annotation for the return type of the function if specified. + If the function has no annotation for its return type, this + attribute is not set. + * bind(*args, **kwargs) -> BoundArguments + Creates a mapping from positional and keyword arguments to + parameters. + * bind_partial(*args, **kwargs) -> BoundArguments + Creates a partial mapping from positional and keyword arguments + to parameters (simulating 'functools.partial' behavior.) + ''' + + __slots__ = ('_return_annotation', '_parameters') + + _parameter_cls = Parameter + _bound_arguments_cls = BoundArguments + + empty = _empty + + def __init__(self, parameters=None, return_annotation=_empty, + __validate_parameters__=True): + '''Constructs Signature from the given list of Parameter + objects and 'return_annotation'. All arguments are optional. + ''' + + if parameters is None: + params = OrderedDict() + else: + if __validate_parameters__: + params = OrderedDict() + top_kind = _POSITIONAL_ONLY + + for idx, param in enumerate(parameters): + kind = param.kind + if kind < top_kind: + msg = 'wrong parameter order: {0} before {1}' + msg = msg.format(top_kind, param.kind) + raise ValueError(msg) + else: + top_kind = kind + + name = param.name + if name is None: + name = str(idx) + param = param.replace(name=name) + + if name in params: + msg = 'duplicate parameter name: {0!r}'.format(name) + raise ValueError(msg) + params[name] = param + else: + params = OrderedDict(((param.name, param) + for param in parameters)) + + self._parameters = params + self._return_annotation = return_annotation + + @classmethod + def from_function(cls, func): + '''Constructs Signature for the given python function''' + + if not isinstance(func, types.FunctionType): + raise TypeError('{0!r} is not a Python function'.format(func)) + + Parameter = cls._parameter_cls + + # Parameter information. + func_code = func.__code__ + pos_count = func_code.co_argcount + arg_names = func_code.co_varnames + positional = tuple(arg_names[:pos_count]) + keyword_only_count = getattr(func_code, 'co_kwonlyargcount', 0) + keyword_only = arg_names[pos_count:(pos_count + keyword_only_count)] + annotations = getattr(func, '__annotations__', {}) + defaults = func.__defaults__ + kwdefaults = getattr(func, '__kwdefaults__', None) + + if defaults: + pos_default_count = len(defaults) + else: + pos_default_count = 0 + + parameters = [] + + # Non-keyword-only parameters w/o defaults. + non_default_count = pos_count - pos_default_count + for name in positional[:non_default_count]: + annotation = annotations.get(name, _empty) + parameters.append(Parameter(name, annotation=annotation, + kind=_POSITIONAL_OR_KEYWORD)) + + # ... w/ defaults. + for offset, name in enumerate(positional[non_default_count:]): + annotation = annotations.get(name, _empty) + parameters.append(Parameter(name, annotation=annotation, + kind=_POSITIONAL_OR_KEYWORD, + default=defaults[offset])) + + # *args + if func_code.co_flags & 0x04: + name = arg_names[pos_count + keyword_only_count] + annotation = annotations.get(name, _empty) + parameters.append(Parameter(name, annotation=annotation, + kind=_VAR_POSITIONAL)) + + # Keyword-only parameters. + for name in keyword_only: + default = _empty + if kwdefaults is not None: + default = kwdefaults.get(name, _empty) + + annotation = annotations.get(name, _empty) + parameters.append(Parameter(name, annotation=annotation, + kind=_KEYWORD_ONLY, + default=default)) + # **kwargs + if func_code.co_flags & 0x08: + index = pos_count + keyword_only_count + if func_code.co_flags & 0x04: + index += 1 + + name = arg_names[index] + annotation = annotations.get(name, _empty) + parameters.append(Parameter(name, annotation=annotation, + kind=_VAR_KEYWORD)) + + return cls(parameters, + return_annotation=annotations.get('return', _empty), + __validate_parameters__=False) + + @property + def parameters(self): + try: + return types.MappingProxyType(self._parameters) + except AttributeError: + return OrderedDict(self._parameters.items()) + + @property + def return_annotation(self): + return self._return_annotation + + def replace(self, parameters=_void, return_annotation=_void): + '''Creates a customized copy of the Signature. + Pass 'parameters' and/or 'return_annotation' arguments + to override them in the new copy. + ''' + + if parameters is _void: + parameters = self.parameters.values() + + if return_annotation is _void: + return_annotation = self._return_annotation + + return type(self)(parameters, + return_annotation=return_annotation) + + def __hash__(self): + msg = "unhashable type: '{0}'".format(self.__class__.__name__) + raise TypeError(msg) + + def __eq__(self, other): + if (not issubclass(type(other), Signature) or + self.return_annotation != other.return_annotation or + len(self.parameters) != len(other.parameters)): + return False + + other_positions = dict((param, idx) + for idx, param in enumerate(other.parameters.keys())) + + for idx, (param_name, param) in enumerate(self.parameters.items()): + if param.kind == _KEYWORD_ONLY: + try: + other_param = other.parameters[param_name] + except KeyError: + return False + else: + if param != other_param: + return False + else: + try: + other_idx = other_positions[param_name] + except KeyError: + return False + else: + if (idx != other_idx or + param != other.parameters[param_name]): + return False + + return True + + def __ne__(self, other): + return not self.__eq__(other) + + def _bind(self, args, kwargs, partial=False): + '''Private method. Don't use directly.''' + + arguments = OrderedDict() + + parameters = iter(self.parameters.values()) + parameters_ex = () + arg_vals = iter(args) + + if partial: + # Support for binding arguments to 'functools.partial' objects. + # See 'functools.partial' case in 'signature()' implementation + # for details. + for param_name, param in self.parameters.items(): + if (param._partial_kwarg and param_name not in kwargs): + # Simulating 'functools.partial' behavior + kwargs[param_name] = param.default + + while True: + # Let's iterate through the positional arguments and corresponding + # parameters + try: + arg_val = next(arg_vals) + except StopIteration: + # No more positional arguments + try: + param = next(parameters) + except StopIteration: + # No more parameters. That's it. Just need to check that + # we have no `kwargs` after this while loop + break + else: + if param.kind == _VAR_POSITIONAL: + # That's OK, just empty *args. Let's start parsing + # kwargs + break + elif param.name in kwargs: + if param.kind == _POSITIONAL_ONLY: + msg = '{arg!r} parameter is positional only, ' \ + 'but was passed as a keyword' + msg = msg.format(arg=param.name) + raise TypeError(msg) + parameters_ex = (param,) + break + elif (param.kind == _VAR_KEYWORD or + param.default is not _empty): + # That's fine too - we have a default value for this + # parameter. So, lets start parsing `kwargs`, starting + # with the current parameter + parameters_ex = (param,) + break + else: + if partial: + parameters_ex = (param,) + break + else: + msg = '{arg!r} parameter lacking default value' + msg = msg.format(arg=param.name) + raise TypeError(msg) + else: + # We have a positional argument to process + try: + param = next(parameters) + except StopIteration: + raise TypeError('too many positional arguments') + else: + if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY): + # Looks like we have no parameter for this positional + # argument + raise TypeError('too many positional arguments') + + if param.kind == _VAR_POSITIONAL: + # We have an '*args'-like argument, let's fill it with + # all positional arguments we have left and move on to + # the next phase + values = [arg_val] + values.extend(arg_vals) + arguments[param.name] = tuple(values) + break + + if param.name in kwargs: + raise TypeError('multiple values for argument ' + '{arg!r}'.format(arg=param.name)) + + arguments[param.name] = arg_val + + # Now, we iterate through the remaining parameters to process + # keyword arguments + kwargs_param = None + for param in itertools.chain(parameters_ex, parameters): + if param.kind == _POSITIONAL_ONLY: + # This should never happen in case of a properly built + # Signature object (but let's have this check here + # to ensure correct behaviour just in case) + raise TypeError('{arg!r} parameter is positional only, ' + 'but was passed as a keyword'. \ + format(arg=param.name)) + + if param.kind == _VAR_KEYWORD: + # Memorize that we have a '**kwargs'-like parameter + kwargs_param = param + continue + + param_name = param.name + try: + arg_val = kwargs.pop(param_name) + except KeyError: + # We have no value for this parameter. It's fine though, + # if it has a default value, or it is an '*args'-like + # parameter, left alone by the processing of positional + # arguments. + if (not partial and param.kind != _VAR_POSITIONAL and + param.default is _empty): + raise TypeError('{arg!r} parameter lacking default value'. \ + format(arg=param_name)) + + else: + arguments[param_name] = arg_val + + if kwargs: + if kwargs_param is not None: + # Process our '**kwargs'-like parameter + arguments[kwargs_param.name] = kwargs + else: + raise TypeError('too many keyword arguments %r' % kwargs) + + return self._bound_arguments_cls(self, arguments) + + def bind(*args, **kwargs): + '''Get a BoundArguments object, that maps the passed `args` + and `kwargs` to the function's signature. Raises `TypeError` + if the passed arguments can not be bound. + ''' + return args[0]._bind(args[1:], kwargs) + + def bind_partial(self, *args, **kwargs): + '''Get a BoundArguments object, that partially maps the + passed `args` and `kwargs` to the function's signature. + Raises `TypeError` if the passed arguments can not be bound. + ''' + return self._bind(args, kwargs, partial=True) + + def __str__(self): + result = [] + render_kw_only_separator = True + for idx, param in enumerate(self.parameters.values()): + formatted = str(param) + + kind = param.kind + if kind == _VAR_POSITIONAL: + # OK, we have an '*args'-like parameter, so we won't need + # a '*' to separate keyword-only arguments + render_kw_only_separator = False + elif kind == _KEYWORD_ONLY and render_kw_only_separator: + # We have a keyword-only parameter to render and we haven't + # rendered an '*args'-like parameter before, so add a '*' + # separator to the parameters list ("foo(arg1, *, arg2)" case) + result.append('*') + # This condition should be only triggered once, so + # reset the flag + render_kw_only_separator = False + + result.append(formatted) + + rendered = '({0})'.format(', '.join(result)) + + if self.return_annotation is not _empty: + anno = formatannotation(self.return_annotation) + rendered += ' -> {0}'.format(anno) + + return rendered diff --git a/pipenv/vendor/funcsigs/version.py b/pipenv/vendor/funcsigs/version.py new file mode 100644 index 0000000000..7863915fa5 --- /dev/null +++ b/pipenv/vendor/funcsigs/version.py @@ -0,0 +1 @@ +__version__ = "1.0.2" diff --git a/pipenv/vendor/importlib_metadata/LICENSE b/pipenv/vendor/importlib_metadata/LICENSE new file mode 100644 index 0000000000..be7e092b0b --- /dev/null +++ b/pipenv/vendor/importlib_metadata/LICENSE @@ -0,0 +1,13 @@ +Copyright 2017-2019 Jason R. Coombs, Barry Warsaw + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/pipenv/vendor/importlib_metadata/__init__.py b/pipenv/vendor/importlib_metadata/__init__.py new file mode 100644 index 0000000000..6da7fd2cd4 --- /dev/null +++ b/pipenv/vendor/importlib_metadata/__init__.py @@ -0,0 +1,554 @@ +from __future__ import unicode_literals, absolute_import + +import io +import os +import re +import abc +import csv +import sys +import zipp +import operator +import functools +import itertools +import collections + +from ._compat import ( + install, + NullFinder, + ConfigParser, + suppress, + map, + FileNotFoundError, + IsADirectoryError, + NotADirectoryError, + PermissionError, + pathlib, + PYPY_OPEN_BUG, + ModuleNotFoundError, + MetaPathFinder, + email_message_from_string, + PyPy_repr, + ) +from importlib import import_module +from itertools import starmap + + +__metaclass__ = type + + +__all__ = [ + 'Distribution', + 'DistributionFinder', + 'PackageNotFoundError', + 'distribution', + 'distributions', + 'entry_points', + 'files', + 'metadata', + 'requires', + 'version', + ] + + +class PackageNotFoundError(ModuleNotFoundError): + """The package was not found.""" + + +class EntryPoint( + PyPy_repr, + collections.namedtuple('EntryPointBase', 'name value group')): + """An entry point as defined by Python packaging conventions. + + See `the packaging docs on entry points + <https://packaging.python.org/specifications/entry-points/>`_ + for more information. + """ + + pattern = re.compile( + r'(?P<module>[\w.]+)\s*' + r'(:\s*(?P<attr>[\w.]+))?\s*' + r'(?P<extras>\[.*\])?\s*$' + ) + """ + A regular expression describing the syntax for an entry point, + which might look like: + + - module + - package.module + - package.module:attribute + - package.module:object.attribute + - package.module:attr [extra1, extra2] + + Other combinations are possible as well. + + The expression is lenient about whitespace around the ':', + following the attr, and following any extras. + """ + + def load(self): + """Load the entry point from its definition. If only a module + is indicated by the value, return that module. Otherwise, + return the named object. + """ + match = self.pattern.match(self.value) + module = import_module(match.group('module')) + attrs = filter(None, (match.group('attr') or '').split('.')) + return functools.reduce(getattr, attrs, module) + + @property + def extras(self): + match = self.pattern.match(self.value) + return list(re.finditer(r'\w+', match.group('extras') or '')) + + @classmethod + def _from_config(cls, config): + return [ + cls(name, value, group) + for group in config.sections() + for name, value in config.items(group) + ] + + @classmethod + def _from_text(cls, text): + config = ConfigParser(delimiters='=') + # case sensitive: https://stackoverflow.com/q/1611799/812183 + config.optionxform = str + try: + config.read_string(text) + except AttributeError: # pragma: nocover + # Python 2 has no read_string + config.readfp(io.StringIO(text)) + return EntryPoint._from_config(config) + + def __iter__(self): + """ + Supply iter so one may construct dicts of EntryPoints easily. + """ + return iter((self.name, self)) + + def __reduce__(self): + return ( + self.__class__, + (self.name, self.value, self.group), + ) + + +class PackagePath(pathlib.PurePosixPath): + """A reference to a path in a package""" + + def read_text(self, encoding='utf-8'): + with self.locate().open(encoding=encoding) as stream: + return stream.read() + + def read_binary(self): + with self.locate().open('rb') as stream: + return stream.read() + + def locate(self): + """Return a path-like object for this path""" + return self.dist.locate_file(self) + + +class FileHash: + def __init__(self, spec): + self.mode, _, self.value = spec.partition('=') + + def __repr__(self): + return '<FileHash mode: {} value: {}>'.format(self.mode, self.value) + + +class Distribution: + """A Python distribution package.""" + + @abc.abstractmethod + def read_text(self, filename): + """Attempt to load metadata file given by the name. + + :param filename: The name of the file in the distribution info. + :return: The text if found, otherwise None. + """ + + @abc.abstractmethod + def locate_file(self, path): + """ + Given a path to a file in this distribution, return a path + to it. + """ + + @classmethod + def from_name(cls, name): + """Return the Distribution for the given package name. + + :param name: The name of the distribution package to search for. + :return: The Distribution instance (or subclass thereof) for the named + package, if found. + :raises PackageNotFoundError: When the named package's distribution + metadata cannot be found. + """ + for resolver in cls._discover_resolvers(): + dists = resolver(DistributionFinder.Context(name=name)) + dist = next(dists, None) + if dist is not None: + return dist + else: + raise PackageNotFoundError(name) + + @classmethod + def discover(cls, **kwargs): + """Return an iterable of Distribution objects for all packages. + + Pass a ``context`` or pass keyword arguments for constructing + a context. + + :context: A ``DistributionFinder.Context`` object. + :return: Iterable of Distribution objects for all packages. + """ + context = kwargs.pop('context', None) + if context and kwargs: + raise ValueError("cannot accept context and kwargs") + context = context or DistributionFinder.Context(**kwargs) + return itertools.chain.from_iterable( + resolver(context) + for resolver in cls._discover_resolvers() + ) + + @staticmethod + def at(path): + """Return a Distribution for the indicated metadata path + + :param path: a string or path-like object + :return: a concrete Distribution instance for the path + """ + return PathDistribution(pathlib.Path(path)) + + @staticmethod + def _discover_resolvers(): + """Search the meta_path for resolvers.""" + declared = ( + getattr(finder, 'find_distributions', None) + for finder in sys.meta_path + ) + return filter(None, declared) + + @property + def metadata(self): + """Return the parsed metadata for this Distribution. + + The returned object will have keys that name the various bits of + metadata. See PEP 566 for details. + """ + text = ( + self.read_text('METADATA') + or self.read_text('PKG-INFO') + # This last clause is here to support old egg-info files. Its + # effect is to just end up using the PathDistribution's self._path + # (which points to the egg-info file) attribute unchanged. + or self.read_text('') + ) + return email_message_from_string(text) + + @property + def version(self): + """Return the 'Version' metadata for the distribution package.""" + return self.metadata['Version'] + + @property + def entry_points(self): + return EntryPoint._from_text(self.read_text('entry_points.txt')) + + @property + def files(self): + """Files in this distribution. + + :return: List of PackagePath for this distribution or None + + Result is `None` if the metadata file that enumerates files + (i.e. RECORD for dist-info or SOURCES.txt for egg-info) is + missing. + Result may be empty if the metadata exists but is empty. + """ + file_lines = self._read_files_distinfo() or self._read_files_egginfo() + + def make_file(name, hash=None, size_str=None): + result = PackagePath(name) + result.hash = FileHash(hash) if hash else None + result.size = int(size_str) if size_str else None + result.dist = self + return result + + return file_lines and list(starmap(make_file, csv.reader(file_lines))) + + def _read_files_distinfo(self): + """ + Read the lines of RECORD + """ + text = self.read_text('RECORD') + return text and text.splitlines() + + def _read_files_egginfo(self): + """ + SOURCES.txt might contain literal commas, so wrap each line + in quotes. + """ + text = self.read_text('SOURCES.txt') + return text and map('"{}"'.format, text.splitlines()) + + @property + def requires(self): + """Generated requirements specified for this Distribution""" + reqs = self._read_dist_info_reqs() or self._read_egg_info_reqs() + return reqs and list(reqs) + + def _read_dist_info_reqs(self): + return self.metadata.get_all('Requires-Dist') + + def _read_egg_info_reqs(self): + source = self.read_text('requires.txt') + return source and self._deps_from_requires_text(source) + + @classmethod + def _deps_from_requires_text(cls, source): + section_pairs = cls._read_sections(source.splitlines()) + sections = { + section: list(map(operator.itemgetter('line'), results)) + for section, results in + itertools.groupby(section_pairs, operator.itemgetter('section')) + } + return cls._convert_egg_info_reqs_to_simple_reqs(sections) + + @staticmethod + def _read_sections(lines): + section = None + for line in filter(None, lines): + section_match = re.match(r'\[(.*)\]$', line) + if section_match: + section = section_match.group(1) + continue + yield locals() + + @staticmethod + def _convert_egg_info_reqs_to_simple_reqs(sections): + """ + Historically, setuptools would solicit and store 'extra' + requirements, including those with environment markers, + in separate sections. More modern tools expect each + dependency to be defined separately, with any relevant + extras and environment markers attached directly to that + requirement. This method converts the former to the + latter. See _test_deps_from_requires_text for an example. + """ + def make_condition(name): + return name and 'extra == "{name}"'.format(name=name) + + def parse_condition(section): + section = section or '' + extra, sep, markers = section.partition(':') + if extra and markers: + markers = '({markers})'.format(markers=markers) + conditions = list(filter(None, [markers, make_condition(extra)])) + return '; ' + ' and '.join(conditions) if conditions else '' + + for section, deps in sections.items(): + for dep in deps: + yield dep + parse_condition(section) + + +class DistributionFinder(MetaPathFinder): + """ + A MetaPathFinder capable of discovering installed distributions. + """ + + class Context: + """ + Keyword arguments presented by the caller to + ``distributions()`` or ``Distribution.discover()`` + to narrow the scope of a search for distributions + in all DistributionFinders. + + Each DistributionFinder may expect any parameters + and should attempt to honor the canonical + parameters defined below when appropriate. + """ + + name = None + """ + Specific name for which a distribution finder should match. + A name of ``None`` matches all distributions. + """ + + def __init__(self, **kwargs): + vars(self).update(kwargs) + + @property + def path(self): + """ + The path that a distribution finder should search. + + Typically refers to Python package paths and defaults + to ``sys.path``. + """ + return vars(self).get('path', sys.path) + + @property + def pattern(self): + return '.*' if self.name is None else re.escape(self.name) + + @abc.abstractmethod + def find_distributions(self, context=Context()): + """ + Find distributions. + + Return an iterable of all Distribution instances capable of + loading the metadata for packages matching the ``context``, + a DistributionFinder.Context instance. + """ + + +@install +class MetadataPathFinder(NullFinder, DistributionFinder): + """A degenerate finder for distribution packages on the file system. + + This finder supplies only a find_distributions() method for versions + of Python that do not have a PathFinder find_distributions(). + """ + + def find_distributions(self, context=DistributionFinder.Context()): + """ + Find distributions. + + Return an iterable of all Distribution instances capable of + loading the metadata for packages matching ``context.name`` + (or all names if ``None`` indicated) along the paths in the list + of directories ``context.path``. + """ + found = self._search_paths(context.pattern, context.path) + return map(PathDistribution, found) + + @classmethod + def _search_paths(cls, pattern, paths): + """Find metadata directories in paths heuristically.""" + return itertools.chain.from_iterable( + cls._search_path(path, pattern) + for path in map(cls._switch_path, paths) + ) + + @staticmethod + def _switch_path(path): + if not PYPY_OPEN_BUG or os.path.isfile(path): # pragma: no branch + with suppress(Exception): + return zipp.Path(path) + return pathlib.Path(path) + + @classmethod + def _matches_info(cls, normalized, item): + template = r'{pattern}(-.*)?\.(dist|egg)-info' + manifest = template.format(pattern=normalized) + return re.match(manifest, item.name, flags=re.IGNORECASE) + + @classmethod + def _matches_legacy(cls, normalized, item): + template = r'{pattern}-.*\.egg[\\/]EGG-INFO' + manifest = template.format(pattern=normalized) + return re.search(manifest, str(item), flags=re.IGNORECASE) + + @classmethod + def _search_path(cls, root, pattern): + if not root.is_dir(): + return () + normalized = pattern.replace('-', '_') + return (item for item in root.iterdir() + if cls._matches_info(normalized, item) + or cls._matches_legacy(normalized, item)) + + +class PathDistribution(Distribution): + def __init__(self, path): + """Construct a distribution from a path to the metadata directory. + + :param path: A pathlib.Path or similar object supporting + .joinpath(), __div__, .parent, and .read_text(). + """ + self._path = path + + def read_text(self, filename): + with suppress(FileNotFoundError, IsADirectoryError, KeyError, + NotADirectoryError, PermissionError): + return self._path.joinpath(filename).read_text(encoding='utf-8') + read_text.__doc__ = Distribution.read_text.__doc__ + + def locate_file(self, path): + return self._path.parent / path + + +def distribution(distribution_name): + """Get the ``Distribution`` instance for the named package. + + :param distribution_name: The name of the distribution package as a string. + :return: A ``Distribution`` instance (or subclass thereof). + """ + return Distribution.from_name(distribution_name) + + +def distributions(**kwargs): + """Get all ``Distribution`` instances in the current environment. + + :return: An iterable of ``Distribution`` instances. + """ + return Distribution.discover(**kwargs) + + +def metadata(distribution_name): + """Get the metadata for the named package. + + :param distribution_name: The name of the distribution package to query. + :return: An email.Message containing the parsed metadata. + """ + return Distribution.from_name(distribution_name).metadata + + +def version(distribution_name): + """Get the version string for the named package. + + :param distribution_name: The name of the distribution package to query. + :return: The version string for the package as defined in the package's + "Version" metadata key. + """ + return distribution(distribution_name).version + + +def entry_points(): + """Return EntryPoint objects for all installed packages. + + :return: EntryPoint objects for all installed packages. + """ + eps = itertools.chain.from_iterable( + dist.entry_points for dist in distributions()) + by_group = operator.attrgetter('group') + ordered = sorted(eps, key=by_group) + grouped = itertools.groupby(ordered, by_group) + return { + group: tuple(eps) + for group, eps in grouped + } + + +def files(distribution_name): + """Return a list of files for the named package. + + :param distribution_name: The name of the distribution package to query. + :return: List of files composing the distribution. + """ + return distribution(distribution_name).files + + +def requires(distribution_name): + """ + Return a list of requirements for the named package. + + :return: An iterator of requirements, suitable for + packaging.requirement.Requirement. + """ + return distribution(distribution_name).requires + + +__version__ = version(__name__) diff --git a/pipenv/vendor/importlib_metadata/_compat.py b/pipenv/vendor/importlib_metadata/_compat.py new file mode 100644 index 0000000000..3fd65ffddd --- /dev/null +++ b/pipenv/vendor/importlib_metadata/_compat.py @@ -0,0 +1,134 @@ +from __future__ import absolute_import + +import io +import abc +import sys +import email + + +if sys.version_info > (3,): # pragma: nocover + import builtins + from configparser import ConfigParser + from contextlib import suppress + FileNotFoundError = builtins.FileNotFoundError + IsADirectoryError = builtins.IsADirectoryError + NotADirectoryError = builtins.NotADirectoryError + PermissionError = builtins.PermissionError + map = builtins.map +else: # pragma: nocover + from backports.configparser import ConfigParser + from itertools import imap as map # type: ignore + from contextlib2 import suppress # noqa + FileNotFoundError = IOError, OSError + IsADirectoryError = IOError, OSError + NotADirectoryError = IOError, OSError + PermissionError = IOError, OSError + +if sys.version_info > (3, 5): # pragma: nocover + import pathlib +else: # pragma: nocover + import pathlib2 as pathlib + +try: + ModuleNotFoundError = builtins.FileNotFoundError +except (NameError, AttributeError): # pragma: nocover + ModuleNotFoundError = ImportError # type: ignore + + +if sys.version_info >= (3,): # pragma: nocover + from importlib.abc import MetaPathFinder +else: # pragma: nocover + class MetaPathFinder(object): + __metaclass__ = abc.ABCMeta + + +__metaclass__ = type +__all__ = [ + 'install', 'NullFinder', 'MetaPathFinder', 'ModuleNotFoundError', + 'pathlib', 'ConfigParser', 'map', 'suppress', 'FileNotFoundError', + 'NotADirectoryError', 'email_message_from_string', + ] + + +def install(cls): + """ + Class decorator for installation on sys.meta_path. + + Adds the backport DistributionFinder to sys.meta_path and + attempts to disable the finder functionality of the stdlib + DistributionFinder. + """ + sys.meta_path.append(cls()) + disable_stdlib_finder() + return cls + + +def disable_stdlib_finder(): + """ + Give the backport primacy for discovering path-based distributions + by monkey-patching the stdlib O_O. + + See #91 for more background for rationale on this sketchy + behavior. + """ + def matches(finder): + return ( + finder.__module__ == '_frozen_importlib_external' + and hasattr(finder, 'find_distributions') + ) + for finder in filter(matches, sys.meta_path): # pragma: nocover + del finder.find_distributions + + +class NullFinder: + """ + A "Finder" (aka "MetaClassFinder") that never finds any modules, + but may find distributions. + """ + @staticmethod + def find_spec(*args, **kwargs): + return None + + # In Python 2, the import system requires finders + # to have a find_module() method, but this usage + # is deprecated in Python 3 in favor of find_spec(). + # For the purposes of this finder (i.e. being present + # on sys.meta_path but having no other import + # system functionality), the two methods are identical. + find_module = find_spec + + +def py2_message_from_string(text): # nocoverpy3 + # Work around https://bugs.python.org/issue25545 where + # email.message_from_string cannot handle Unicode on Python 2. + io_buffer = io.StringIO(text) + return email.message_from_file(io_buffer) + + +email_message_from_string = ( + py2_message_from_string + if sys.version_info < (3,) else + email.message_from_string + ) + +# https://bitbucket.org/pypy/pypy/issues/3021/ioopen-directory-leaks-a-file-descriptor +PYPY_OPEN_BUG = getattr(sys, 'pypy_version_info', (9, 9, 9))[:3] <= (7, 1, 1) + + +class PyPy_repr: + """ + Override repr for EntryPoint objects on PyPy to avoid __iter__ access. + Ref #97, #102. + """ + affected = hasattr(sys, 'pypy_version_info') + + def __compat_repr__(self): # pragma: nocover + def make_param(name): + value = getattr(self, name) + return '{name}={value!r}'.format(**locals()) + params = ', '.join(map(make_param, self._fields)) + return 'EntryPoint({params})'.format(**locals()) + + if affected: # pragma: nocover + __repr__ = __compat_repr__ + del affected diff --git a/pipenv/vendor/importlib_metadata/docs/__init__.py b/pipenv/vendor/importlib_metadata/docs/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pipenv/vendor/importlib_metadata/docs/changelog.rst b/pipenv/vendor/importlib_metadata/docs/changelog.rst new file mode 100644 index 0000000000..d38b36f21f --- /dev/null +++ b/pipenv/vendor/importlib_metadata/docs/changelog.rst @@ -0,0 +1,237 @@ +========================= + importlib_metadata NEWS +========================= + +v1.3.0 +====== + +* Improve custom finders documentation. Closes #105. + +v1.2.0 +====== + +* Once again, drop support for Python 3.4. Ref #104. + +v1.1.3 +====== + +* Restored support for Python 3.4 due to improper version + compatibility declarations in the v1.1.0 and v1.1.1 + releases. Closes #104. + +v1.1.2 +====== + +* Repaired project metadata to correctly declare the + ``python_requires`` directive. Closes #103. + +v1.1.1 +====== + +* Fixed ``repr(EntryPoint)`` on PyPy 3 also. Closes #102. + +v1.1.0 +====== + +* Dropped support for Python 3.4. +* EntryPoints are now pickleable. Closes #96. +* Fixed ``repr(EntryPoint)`` on PyPy 2. Closes #97. + +v1.0.0 +====== + +* Project adopts semver for versioning. + +* Removed compatibility shim introduced in 0.23. + +* For better compatibility with the stdlib implementation and to + avoid the same distributions being discovered by the stdlib and + backport implementations, the backport now disables the + stdlib DistributionFinder during initialization (import time). + Closes #91 and closes #100. + +0.23 +==== +* Added a compatibility shim to prevent failures on beta releases + of Python before the signature changed to accept the + "context" parameter on find_distributions. This workaround + will have a limited lifespan, not to extend beyond release of + Python 3.8 final. + +0.22 +==== +* Renamed ``package`` parameter to ``distribution_name`` + as `recommended <https://bugs.python.org/issue34632#msg349423>`_ + in the following functions: ``distribution``, ``metadata``, + ``version``, ``files``, and ``requires``. This + backward-incompatible change is expected to have little impact + as these functions are assumed to be primarily used with + positional parameters. + +0.21 +==== +* ``importlib.metadata`` now exposes the ``DistributionFinder`` + metaclass and references it in the docs for extending the + search algorithm. +* Add ``Distribution.at`` for constructing a Distribution object + from a known metadata directory on the file system. Closes #80. +* Distribution finders now receive a context object that + supplies ``.path`` and ``.name`` properties. This change + introduces a fundamental backward incompatibility for + any projects implementing a ``find_distributions`` method + on a ``MetaPathFinder``. This new layer of abstraction + allows this context to be supplied directly or constructed + on demand and opens the opportunity for a + ``find_distributions`` method to solicit additional + context from the caller. Closes #85. + +0.20 +==== +* Clarify in the docs that calls to ``.files`` could return + ``None`` when the metadata is not present. Closes #69. +* Return all requirements and not just the first for dist-info + packages. Closes #67. + +0.19 +==== +* Restrain over-eager egg metadata resolution. +* Add support for entry points with colons in the name. Closes #75. + +0.18 +==== +* Parse entry points case sensitively. Closes #68 +* Add a version constraint on the backport configparser package. Closes #66 + +0.17 +==== +* Fix a permission problem in the tests on Windows. + +0.16 +==== +* Don't crash if there exists an EGG-INFO directory on sys.path. + +0.15 +==== +* Fix documentation. + +0.14 +==== +* Removed ``local_distribution`` function from the API. + **This backward-incompatible change removes this + behavior summarily**. Projects should remove their + reliance on this behavior. A replacement behavior is + under review in the `pep517 project + <https://github.com/pypa/pep517>`_. Closes #42. + +0.13 +==== +* Update docstrings to match PEP 8. Closes #63. +* Merged modules into one module. Closes #62. + +0.12 +==== +* Add support for eggs. !65; Closes #19. + +0.11 +==== +* Support generic zip files (not just wheels). Closes #59 +* Support zip files with multiple distributions in them. Closes #60 +* Fully expose the public API in ``importlib_metadata.__all__``. + +0.10 +==== +* The ``Distribution`` ABC is now officially part of the public API. + Closes #37. +* Fixed support for older single file egg-info formats. Closes #43. +* Fixed a testing bug when ``$CWD`` has spaces in the path. Closes #50. +* Add Python 3.8 to the ``tox`` testing matrix. + +0.9 +=== +* Fixed issue where entry points without an attribute would raise an + Exception. Closes #40. +* Removed unused ``name`` parameter from ``entry_points()``. Closes #44. +* ``DistributionFinder`` classes must now be instantiated before + being placed on ``sys.meta_path``. + +0.8 +=== +* This library can now discover/enumerate all installed packages. **This + backward-incompatible change alters the protocol finders must + implement to support distribution package discovery.** Closes #24. +* The signature of ``find_distributions()`` on custom installer finders + should now accept two parameters, ``name`` and ``path`` and + these parameters must supply defaults. +* The ``entry_points()`` method no longer accepts a package name + but instead returns all entry points in a dictionary keyed by the + ``EntryPoint.group``. The ``resolve`` method has been removed. Instead, + call ``EntryPoint.load()``, which has the same semantics as + ``pkg_resources`` and ``entrypoints``. **This is a backward incompatible + change.** +* Metadata is now always returned as Unicode text regardless of + Python version. Closes #29. +* This library can now discover metadata for a 'local' package (found + in the current-working directory). Closes #27. +* Added ``files()`` function for resolving files from a distribution. +* Added a new ``requires()`` function, which returns the requirements + for a package suitable for parsing by + ``packaging.requirements.Requirement``. Closes #18. +* The top-level ``read_text()`` function has been removed. Use + ``PackagePath.read_text()`` on instances returned by the ``files()`` + function. **This is a backward incompatible change.** +* Release dates are now automatically injected into the changelog + based on SCM tags. + +0.7 +=== +* Fixed issue where packages with dashes in their names would + not be discovered. Closes #21. +* Distribution lookup is now case-insensitive. Closes #20. +* Wheel distributions can no longer be discovered by their module + name. Like Path distributions, they must be indicated by their + distribution package name. + +0.6 +=== +* Removed ``importlib_metadata.distribution`` function. Now + the public interface is primarily the utility functions exposed + in ``importlib_metadata.__all__``. Closes #14. +* Added two new utility functions ``read_text`` and + ``metadata``. + +0.5 +=== +* Updated README and removed details about Distribution + class, now considered private. Closes #15. +* Added test suite support for Python 3.4+. +* Fixed SyntaxErrors on Python 3.4 and 3.5. !12 +* Fixed errors on Windows joining Path elements. !15 + +0.4 +=== +* Housekeeping. + +0.3 +=== +* Added usage documentation. Closes #8 +* Add support for getting metadata from wheels on ``sys.path``. Closes #9 + +0.2 +=== +* Added ``importlib_metadata.entry_points()``. Closes #1 +* Added ``importlib_metadata.resolve()``. Closes #12 +* Add support for Python 2.7. Closes #4 + +0.1 +=== +* Initial release. + + +.. + Local Variables: + mode: change-log-mode + indent-tabs-mode: nil + sentence-end-double-space: t + fill-column: 78 + coding: utf-8 + End: diff --git a/pipenv/vendor/importlib_metadata/docs/conf.py b/pipenv/vendor/importlib_metadata/docs/conf.py new file mode 100644 index 0000000000..af9f0e2667 --- /dev/null +++ b/pipenv/vendor/importlib_metadata/docs/conf.py @@ -0,0 +1,182 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# +# importlib_metadata documentation build configuration file, created by +# sphinx-quickstart on Thu Nov 30 10:21:00 2017. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +# import os +# import sys +# sys.path.insert(0, os.path.abspath('.')) + + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'rst.linker', + 'sphinx.ext.autodoc', + 'sphinx.ext.coverage', + 'sphinx.ext.doctest', + 'sphinx.ext.intersphinx', + 'sphinx.ext.viewcode', + ] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +# source_suffix = ['.rst', '.md'] +source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = 'importlib_metadata' +copyright = '2017-2019, Jason R. Coombs, Barry Warsaw' +author = 'Jason R. Coombs, Barry Warsaw' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = '0.1' +# The full version, including alpha/beta/rc tags. +release = '0.1' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This patterns also effect to html_static_path and html_extra_path +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'default' + +# Custom sidebar templates, must be a dictionary that maps document names +# to template names. +# +# This is required for the alabaster theme +# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars +html_sidebars = { + '**': [ + 'relations.html', # needs 'show_related': True theme option to display + 'searchbox.html', + ] + } + + +# -- Options for HTMLHelp output ------------------------------------------ + +# Output file base name for HTML help builder. +htmlhelp_basename = 'importlib_metadatadoc' + + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', + } + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'importlib_metadata.tex', + 'importlib\\_metadata Documentation', + 'Brett Cannon, Barry Warsaw', 'manual'), + ] + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'importlib_metadata', 'importlib_metadata Documentation', + [author], 1) + ] + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'importlib_metadata', 'importlib_metadata Documentation', + author, 'importlib_metadata', 'One line description of project.', + 'Miscellaneous'), + ] + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + 'python': ('https://docs.python.org/3', None), + } + + +# For rst.linker, inject release dates into changelog.rst +link_files = { + 'changelog.rst': dict( + replace=[ + dict( + pattern=r'^(?m)((?P<scm_version>v?\d+(\.\d+){1,2}))\n[-=]+\n', + with_scm='{text}\n{rev[timestamp]:%Y-%m-%d}\n\n', + ), + ], + ), + } diff --git a/pipenv/vendor/importlib_metadata/docs/index.rst b/pipenv/vendor/importlib_metadata/docs/index.rst new file mode 100644 index 0000000000..91e815c0c4 --- /dev/null +++ b/pipenv/vendor/importlib_metadata/docs/index.rst @@ -0,0 +1,54 @@ +=============================== + Welcome to importlib_metadata +=============================== + +``importlib_metadata`` is a library which provides an API for accessing an +installed package's `metadata`_, such as its entry points or its top-level +name. This functionality intends to replace most uses of ``pkg_resources`` +`entry point API`_ and `metadata API`_. Along with ``importlib.resources`` in +`Python 3.7 and newer`_ (backported as `importlib_resources`_ for older +versions of Python), this can eliminate the need to use the older and less +efficient ``pkg_resources`` package. + +``importlib_metadata`` is a backport of Python 3.8's standard library +`importlib.metadata`_ module for Python 2.7, and 3.4 through 3.7. Users of +Python 3.8 and beyond are encouraged to use the standard library module. +When imported on Python 3.8 and later, ``importlib_metadata`` replaces the +DistributionFinder behavior from the stdlib, but leaves the API in tact. +Developers looking for detailed API descriptions should refer to the Python +3.8 standard library documentation. + +The documentation here includes a general :ref:`usage <using>` guide. + + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + using.rst + changelog (links).rst + + +Project details +=============== + + * Project home: https://gitlab.com/python-devs/importlib_metadata + * Report bugs at: https://gitlab.com/python-devs/importlib_metadata/issues + * Code hosting: https://gitlab.com/python-devs/importlib_metadata.git + * Documentation: http://importlib_metadata.readthedocs.io/ + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` + + +.. _`metadata`: https://www.python.org/dev/peps/pep-0566/ +.. _`entry point API`: https://setuptools.readthedocs.io/en/latest/pkg_resources.html#entry-points +.. _`metadata API`: https://setuptools.readthedocs.io/en/latest/pkg_resources.html#metadata-api +.. _`Python 3.7 and newer`: https://docs.python.org/3/library/importlib.html#module-importlib.resources +.. _`importlib_resources`: https://importlib-resources.readthedocs.io/en/latest/index.html +.. _`importlib.metadata`: https://docs.python.org/3/library/importlib.metadata.html diff --git a/pipenv/vendor/importlib_metadata/docs/using.rst b/pipenv/vendor/importlib_metadata/docs/using.rst new file mode 100644 index 0000000000..bd73339489 --- /dev/null +++ b/pipenv/vendor/importlib_metadata/docs/using.rst @@ -0,0 +1,259 @@ +.. _using: + +========================== + Using importlib_metadata +========================== + +``importlib_metadata`` is a library that provides for access to installed +package metadata. Built in part on Python's import system, this library +intends to replace similar functionality in the `entry point +API`_ and `metadata API`_ of ``pkg_resources``. Along with +``importlib.resources`` in `Python 3.7 +and newer`_ (backported as `importlib_resources`_ for older versions of +Python), this can eliminate the need to use the older and less efficient +``pkg_resources`` package. + +By "installed package" we generally mean a third-party package installed into +Python's ``site-packages`` directory via tools such as `pip +<https://pypi.org/project/pip/>`_. Specifically, +it means a package with either a discoverable ``dist-info`` or ``egg-info`` +directory, and metadata defined by `PEP 566`_ or its older specifications. +By default, package metadata can live on the file system or in zip archives on +``sys.path``. Through an extension mechanism, the metadata can live almost +anywhere. + + +Overview +======== + +Let's say you wanted to get the version string for a package you've installed +using ``pip``. We start by creating a virtual environment and installing +something into it:: + + $ python3 -m venv example + $ source example/bin/activate + (example) $ pip install importlib_metadata + (example) $ pip install wheel + +You can get the version string for ``wheel`` by running the following:: + + (example) $ python + >>> from importlib_metadata import version + >>> version('wheel') + '0.32.3' + +You can also get the set of entry points keyed by group, such as +``console_scripts``, ``distutils.commands`` and others. Each group contains a +sequence of :ref:`EntryPoint <entry-points>` objects. + +You can get the :ref:`metadata for a distribution <metadata>`:: + + >>> list(metadata('wheel')) + ['Metadata-Version', 'Name', 'Version', 'Summary', 'Home-page', 'Author', 'Author-email', 'Maintainer', 'Maintainer-email', 'License', 'Project-URL', 'Project-URL', 'Project-URL', 'Keywords', 'Platform', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Requires-Python', 'Provides-Extra', 'Requires-Dist', 'Requires-Dist'] + +You can also get a :ref:`distribution's version number <version>`, list its +:ref:`constituent files <files>`, and get a list of the distribution's +:ref:`requirements`. + + +Functional API +============== + +This package provides the following functionality via its public API. + + +.. _entry-points: + +Entry points +------------ + +The ``entry_points()`` function returns a dictionary of all entry points, +keyed by group. Entry points are represented by ``EntryPoint`` instances; +each ``EntryPoint`` has a ``.name``, ``.group``, and ``.value`` attributes and +a ``.load()`` method to resolve the value:: + + >>> eps = entry_points() + >>> list(eps) + ['console_scripts', 'distutils.commands', 'distutils.setup_keywords', 'egg_info.writers', 'setuptools.installation'] + >>> scripts = eps['console_scripts'] + >>> wheel = [ep for ep in scripts if ep.name == 'wheel'][0] + >>> wheel + EntryPoint(name='wheel', value='wheel.cli:main', group='console_scripts') + >>> main = wheel.load() + >>> main + <function main at 0x103528488> + +The ``group`` and ``name`` are arbitrary values defined by the package author +and usually a client will wish to resolve all entry points for a particular +group. Read `the setuptools docs +<https://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins>`_ +for more information on entrypoints, their definition, and usage. + + +.. _metadata: + +Distribution metadata +--------------------- + +Every distribution includes some metadata, which you can extract using the +``metadata()`` function:: + + >>> wheel_metadata = metadata('wheel') + +The keys of the returned data structure [#f1]_ name the metadata keywords, and +their values are returned unparsed from the distribution metadata:: + + >>> wheel_metadata['Requires-Python'] + '>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*' + + +.. _version: + +Distribution versions +--------------------- + +The ``version()`` function is the quickest way to get a distribution's version +number, as a string:: + + >>> version('wheel') + '0.32.3' + + +.. _files: + +Distribution files +------------------ + +You can also get the full set of files contained within a distribution. The +``files()`` function takes a distribution package name and returns all of the +files installed by this distribution. Each file object returned is a +``PackagePath``, a `pathlib.Path`_ derived object with additional ``dist``, +``size``, and ``hash`` properties as indicated by the metadata. For example:: + + >>> util = [p for p in files('wheel') if 'util.py' in str(p)][0] + >>> util + PackagePath('wheel/util.py') + >>> util.size + 859 + >>> util.dist + <importlib_metadata._hooks.PathDistribution object at 0x101e0cef0> + >>> util.hash + <FileHash mode: sha256 value: bYkw5oMccfazVCoYQwKkkemoVyMAFoR34mmKBx8R1NI> + +Once you have the file, you can also read its contents:: + + >>> print(util.read_text()) + import base64 + import sys + ... + def as_bytes(s): + if isinstance(s, text_type): + return s.encode('utf-8') + return s + +In the case where the metadata file listing files +(RECORD or SOURCES.txt) is missing, ``files()`` will +return ``None``. The caller may wish to wrap calls to +``files()`` in `always_iterable +<https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.always_iterable>`_ +or otherwise guard against this condition if the target +distribution is not known to have the metadata present. + +.. _requirements: + +Distribution requirements +------------------------- + +To get the full set of requirements for a distribution, use the ``requires()`` +function:: + + >>> requires('wheel') + ["pytest (>=3.0.0) ; extra == 'test'", "pytest-cov ; extra == 'test'"] + + +Distributions +============= + +While the above API is the most common and convenient usage, you can get all +of that information from the ``Distribution`` class. A ``Distribution`` is an +abstract object that represents the metadata for a Python package. You can +get the ``Distribution`` instance:: + + >>> from importlib_metadata import distribution + >>> dist = distribution('wheel') + +Thus, an alternative way to get the version number is through the +``Distribution`` instance:: + + >>> dist.version + '0.32.3' + +There are all kinds of additional metadata available on the ``Distribution`` +instance:: + + >>> d.metadata['Requires-Python'] + '>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*' + >>> d.metadata['License'] + 'MIT' + +The full set of available metadata is not described here. See `PEP 566 +<https://www.python.org/dev/peps/pep-0566/>`_ for additional details. + + +Extending the search algorithm +============================== + +Because package metadata is not available through ``sys.path`` searches, or +package loaders directly, the metadata for a package is found through import +system `finders`_. To find a distribution package's metadata, +``importlib_metadata`` queries the list of `meta path finders`_ on +`sys.meta_path`_. + +By default ``importlib_metadata`` installs a finder for distribution packages +found on the file system. This finder doesn't actually find any *packages*, +but it can find the packages' metadata. + +The abstract class :py:class:`importlib.abc.MetaPathFinder` defines the +interface expected of finders by Python's import system. +``importlib_metadata`` extends this protocol by looking for an optional +``find_distributions`` callable on the finders from +``sys.meta_path`` and presents this extended interface as the +``DistributionFinder`` abstract base class, which defines this abstract +method:: + + @abc.abstractmethod + def find_distributions(context=DistributionFinder.Context()): + """Return an iterable of all Distribution instances capable of + loading the metadata for packages for the indicated ``context``. + """ + +The ``DistributionFinder.Context`` object provides ``.path`` and ``.name`` +properties indicating the path to search and names to match and may +supply other relevant context. + +What this means in practice is that to support finding distribution package +metadata in locations other than the file system, subclass +``Distribution`` and implement the abstract methods. Then from +a custom finder, return instances of this derived ``Distribution`` in the +``find_distributions()`` method. + + +.. _`entry point API`: https://setuptools.readthedocs.io/en/latest/pkg_resources.html#entry-points +.. _`metadata API`: https://setuptools.readthedocs.io/en/latest/pkg_resources.html#metadata-api +.. _`Python 3.7 and newer`: https://docs.python.org/3/library/importlib.html#module-importlib.resources +.. _`importlib_resources`: https://importlib-resources.readthedocs.io/en/latest/index.html +.. _`PEP 566`: https://www.python.org/dev/peps/pep-0566/ +.. _`finders`: https://docs.python.org/3/reference/import.html#finders-and-loaders +.. _`meta path finders`: https://docs.python.org/3/glossary.html#term-meta-path-finder +.. _`sys.meta_path`: https://docs.python.org/3/library/sys.html#sys.meta_path +.. _`pathlib.Path`: https://docs.python.org/3/library/pathlib.html#pathlib.Path + + +.. rubric:: Footnotes + +.. [#f1] Technically, the returned distribution metadata object is an + `email.message.Message + <https://docs.python.org/3/library/email.message.html#email.message.EmailMessage>`_ + instance, but this is an implementation detail, and not part of the + stable API. You should only use dictionary-like methods and syntax + to access the metadata contents. diff --git a/pipenv/vendor/importlib_metadata/tests/__init__.py b/pipenv/vendor/importlib_metadata/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pipenv/vendor/importlib_metadata/tests/data/__init__.py b/pipenv/vendor/importlib_metadata/tests/data/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pipenv/vendor/importlib_metadata/tests/data/example-21.12-py3-none-any.whl b/pipenv/vendor/importlib_metadata/tests/data/example-21.12-py3-none-any.whl new file mode 100644 index 0000000000000000000000000000000000000000..641ab07f7aadd5c3ffe199b1e397b84504444994 GIT binary patch literal 1455 zcmWIWW@Zs#U|`^2sH#5hC#89?LJ`OVVPPOntw_u*$Vt_YkI&4@EQycTE2#AL^bJ1Y zd*;mL3tJuqF*Gf@GU?JH8`iH^x{lmwniEp0#}EKFaSqT#mVX%$)j*SW1F-_aL|r38 zJwqeCl+5B1-ORkSe0^Wn5Jwls5Xac=ja-KeL|ngr7ya2Na>+2-+_Beb<+<s<vY0D< zJ%X?7zwXGh<8C$I^AoJ*A}_d~Up;<w)i%Q?3OS<oikb$YsZy7N_IE1mUn6zr-rRM4 zWq}ducXlqy(=I;qKYsIpb}#<AC#io^Vh>qte)Tv+?!=mXP3IU>yCSok_B-aII-Kw3 zQMDMLgF#ppkHf<~TwQ&*&wKcuy~yjWt99<o`OQHFSBxJN`JeUH_0l=1zbVMW>!gle zr#}12zzYT!jLohYUwp!K>b(BNuR7OwHFUksp7hxiq@k(h`;^PmXM2#w3X^MRPoMX` z>U(DOlSfQw;W@=u^`JJ;IgUWAfyY^?c_l@a@df#rc_qbqB^4#ze&^0>pF8i_tM8|G zN=HMp?`hFmk2AU_JVQ5xdbDm>QzUHsc}LJj?qWtX3-6|I25|u`;s9b*JQkMZ7sThJ zmZj!^Z1aYg{S@8xHoufE=rJ-dTm)h{Jcb3iI{OE?l%CmH)a1axaN%A)<C6)>u3Q2l zE(Z%%TzIplh-=e{bsROdT|LH`74jc0G1}d-ie(BgQ)O((bmtPEAigxe^3+zD^2ZPA z=Kj_@llnHb%pqs>={~b{JFA<IF7rt`x?90ZK<wpxm(MbXKl%hEIZgL%o5rpjP-+@r z|Mpwa^GR$R7dSfAA~&q6pJV#F$*<mNi$>WU-FsQLpa0z(9XLnn{J!+d2H)mi39tFM zWw~J?v;DQj2W5<}*`3O4x;giZz4dI<U(*Fr7E1-j|9LahY>BjE_o=poHl8P!xz%}R z^2PdUhwx4OykW7N0@LpsPXda2KAh-#t~vYAon3*GEN%;5X|j5I`Pcv4f`xLoSdG>m z=t|fwwtn6&&3ktpt-?;-J{BOG-L!7y`!yP=+rIT|ef?5o>krldZ$>5&X54uh7~Ej6 zq!C16$=S#TV9UA??F<Y{8iNo9;LOPArlIF2glXKs!~k^;hH2p3g>Dpj9zYni2#-<d zIRf2O^n{KuRhS9415uJax{>I~9cJVf#soY@LK1&~H!B-RJsS|72YTxiGl&NO07LVy literal 0 HcmV?d00001 diff --git a/pipenv/vendor/importlib_metadata/tests/fixtures.py b/pipenv/vendor/importlib_metadata/tests/fixtures.py new file mode 100644 index 0000000000..0b4ce18d5a --- /dev/null +++ b/pipenv/vendor/importlib_metadata/tests/fixtures.py @@ -0,0 +1,200 @@ +from __future__ import unicode_literals + +import os +import sys +import shutil +import tempfile +import textwrap +import contextlib + +try: + from contextlib import ExitStack +except ImportError: + from contextlib2 import ExitStack + +try: + import pathlib +except ImportError: + import pathlib2 as pathlib + + +__metaclass__ = type + + +@contextlib.contextmanager +def tempdir(): + tmpdir = tempfile.mkdtemp() + try: + yield pathlib.Path(tmpdir) + finally: + shutil.rmtree(tmpdir) + + +@contextlib.contextmanager +def save_cwd(): + orig = os.getcwd() + try: + yield + finally: + os.chdir(orig) + + +@contextlib.contextmanager +def tempdir_as_cwd(): + with tempdir() as tmp: + with save_cwd(): + os.chdir(str(tmp)) + yield tmp + + +class SiteDir: + def setUp(self): + self.fixtures = ExitStack() + self.addCleanup(self.fixtures.close) + self.site_dir = self.fixtures.enter_context(tempdir()) + + +class OnSysPath: + @staticmethod + @contextlib.contextmanager + def add_sys_path(dir): + sys.path[:0] = [str(dir)] + try: + yield + finally: + sys.path.remove(str(dir)) + + def setUp(self): + super(OnSysPath, self).setUp() + self.fixtures.enter_context(self.add_sys_path(self.site_dir)) + + +class DistInfoPkg(OnSysPath, SiteDir): + files = { + "distinfo_pkg-1.0.0.dist-info": { + "METADATA": """ + Name: distinfo-pkg + Author: Steven Ma + Version: 1.0.0 + Requires-Dist: wheel >= 1.0 + Requires-Dist: pytest; extra == 'test' + """, + "RECORD": "mod.py,sha256=abc,20\n", + "entry_points.txt": """ + [entries] + main = mod:main + ns:sub = mod:main + """ + }, + "mod.py": """ + def main(): + print("hello world") + """, + } + + def setUp(self): + super(DistInfoPkg, self).setUp() + build_files(DistInfoPkg.files, self.site_dir) + + +class DistInfoPkgOffPath(SiteDir): + def setUp(self): + super(DistInfoPkgOffPath, self).setUp() + build_files(DistInfoPkg.files, self.site_dir) + + +class EggInfoPkg(OnSysPath, SiteDir): + files = { + "egginfo_pkg.egg-info": { + "PKG-INFO": """ + Name: egginfo-pkg + Author: Steven Ma + License: Unknown + Version: 1.0.0 + Classifier: Intended Audience :: Developers + Classifier: Topic :: Software Development :: Libraries + """, + "SOURCES.txt": """ + mod.py + egginfo_pkg.egg-info/top_level.txt + """, + "entry_points.txt": """ + [entries] + main = mod:main + """, + "requires.txt": """ + wheel >= 1.0; python_version >= "2.7" + [test] + pytest + """, + "top_level.txt": "mod\n" + }, + "mod.py": """ + def main(): + print("hello world") + """, + } + + def setUp(self): + super(EggInfoPkg, self).setUp() + build_files(EggInfoPkg.files, prefix=self.site_dir) + + +class EggInfoFile(OnSysPath, SiteDir): + files = { + "egginfo_file.egg-info": """ + Metadata-Version: 1.0 + Name: egginfo_file + Version: 0.1 + Summary: An example package + Home-page: www.example.com + Author: Eric Haffa-Vee + Author-email: eric@example.coms + License: UNKNOWN + Description: UNKNOWN + Platform: UNKNOWN + """, + } + + def setUp(self): + super(EggInfoFile, self).setUp() + build_files(EggInfoFile.files, prefix=self.site_dir) + + +def build_files(file_defs, prefix=pathlib.Path()): + """Build a set of files/directories, as described by the + + file_defs dictionary. Each key/value pair in the dictionary is + interpreted as a filename/contents pair. If the contents value is a + dictionary, a directory is created, and the dictionary interpreted + as the files within it, recursively. + + For example: + + {"README.txt": "A README file", + "foo": { + "__init__.py": "", + "bar": { + "__init__.py": "", + }, + "baz.py": "# Some code", + } + } + """ + for name, contents in file_defs.items(): + full_name = prefix / name + if isinstance(contents, dict): + full_name.mkdir() + build_files(contents, prefix=full_name) + else: + if isinstance(contents, bytes): + with full_name.open('wb') as f: + f.write(contents) + else: + with full_name.open('w') as f: + f.write(DALS(contents)) + + +def DALS(str): + "Dedent and left-strip" + return textwrap.dedent(str).lstrip() diff --git a/pipenv/vendor/importlib_metadata/tests/test_api.py b/pipenv/vendor/importlib_metadata/tests/test_api.py new file mode 100644 index 0000000000..aa346ddb20 --- /dev/null +++ b/pipenv/vendor/importlib_metadata/tests/test_api.py @@ -0,0 +1,176 @@ +import re +import textwrap +import unittest + +from . import fixtures +from .. import ( + Distribution, PackageNotFoundError, __version__, distribution, + entry_points, files, metadata, requires, version, + ) + +try: + from collections.abc import Iterator +except ImportError: + from collections import Iterator # noqa: F401 + +try: + from builtins import str as text +except ImportError: + from __builtin__ import unicode as text + + +class APITests( + fixtures.EggInfoPkg, + fixtures.DistInfoPkg, + fixtures.EggInfoFile, + unittest.TestCase): + + version_pattern = r'\d+\.\d+(\.\d)?' + + def test_retrieves_version_of_self(self): + pkg_version = version('egginfo-pkg') + assert isinstance(pkg_version, text) + assert re.match(self.version_pattern, pkg_version) + + def test_retrieves_version_of_distinfo_pkg(self): + pkg_version = version('distinfo-pkg') + assert isinstance(pkg_version, text) + assert re.match(self.version_pattern, pkg_version) + + def test_for_name_does_not_exist(self): + with self.assertRaises(PackageNotFoundError): + distribution('does-not-exist') + + def test_for_top_level(self): + self.assertEqual( + distribution('egginfo-pkg').read_text('top_level.txt').strip(), + 'mod') + + def test_read_text(self): + top_level = [ + path for path in files('egginfo-pkg') + if path.name == 'top_level.txt' + ][0] + self.assertEqual(top_level.read_text(), 'mod\n') + + def test_entry_points(self): + entries = dict(entry_points()['entries']) + ep = entries['main'] + self.assertEqual(ep.value, 'mod:main') + self.assertEqual(ep.extras, []) + + def test_metadata_for_this_package(self): + md = metadata('egginfo-pkg') + assert md['author'] == 'Steven Ma' + assert md['LICENSE'] == 'Unknown' + assert md['Name'] == 'egginfo-pkg' + classifiers = md.get_all('Classifier') + assert 'Topic :: Software Development :: Libraries' in classifiers + + def test_importlib_metadata_version(self): + assert re.match(self.version_pattern, __version__) + + @staticmethod + def _test_files(files): + root = files[0].root + for file in files: + assert file.root == root + assert not file.hash or file.hash.value + assert not file.hash or file.hash.mode == 'sha256' + assert not file.size or file.size >= 0 + assert file.locate().exists() + assert isinstance(file.read_binary(), bytes) + if file.name.endswith('.py'): + file.read_text() + + def test_file_hash_repr(self): + try: + assertRegex = self.assertRegex + except AttributeError: + # Python 2 + assertRegex = self.assertRegexpMatches + + util = [ + p for p in files('distinfo-pkg') + if p.name == 'mod.py' + ][0] + assertRegex( + repr(util.hash), + '<FileHash mode: sha256 value: .*>') + + def test_files_dist_info(self): + self._test_files(files('distinfo-pkg')) + + def test_files_egg_info(self): + self._test_files(files('egginfo-pkg')) + + def test_version_egg_info_file(self): + self.assertEqual(version('egginfo-file'), '0.1') + + def test_requires_egg_info_file(self): + requirements = requires('egginfo-file') + self.assertIsNone(requirements) + + def test_requires_egg_info(self): + deps = requires('egginfo-pkg') + assert len(deps) == 2 + assert any( + dep == 'wheel >= 1.0; python_version >= "2.7"' + for dep in deps + ) + + def test_requires_dist_info(self): + deps = requires('distinfo-pkg') + assert len(deps) == 2 + assert all(deps) + assert 'wheel >= 1.0' in deps + assert "pytest; extra == 'test'" in deps + + def test_more_complex_deps_requires_text(self): + requires = textwrap.dedent(""" + dep1 + dep2 + + [:python_version < "3"] + dep3 + + [extra1] + dep4 + + [extra2:python_version < "3"] + dep5 + """) + deps = sorted(Distribution._deps_from_requires_text(requires)) + expected = [ + 'dep1', + 'dep2', + 'dep3; python_version < "3"', + 'dep4; extra == "extra1"', + 'dep5; (python_version < "3") and extra == "extra2"', + ] + # It's important that the environment marker expression be + # wrapped in parentheses to avoid the following 'and' binding more + # tightly than some other part of the environment expression. + + assert deps == expected + + +class OffSysPathTests(fixtures.DistInfoPkgOffPath, unittest.TestCase): + def test_find_distributions_specified_path(self): + dists = Distribution.discover(path=[str(self.site_dir)]) + assert any( + dist.metadata['Name'] == 'distinfo-pkg' + for dist in dists + ) + + def test_distribution_at_pathlib(self): + """Demonstrate how to load metadata direct from a directory. + """ + dist_info_path = self.site_dir / 'distinfo_pkg-1.0.0.dist-info' + dist = Distribution.at(dist_info_path) + assert dist.version == '1.0.0' + + def test_distribution_at_str(self): + dist_info_path = self.site_dir / 'distinfo_pkg-1.0.0.dist-info' + dist = Distribution.at(str(dist_info_path)) + assert dist.version == '1.0.0' diff --git a/pipenv/vendor/importlib_metadata/tests/test_integration.py b/pipenv/vendor/importlib_metadata/tests/test_integration.py new file mode 100644 index 0000000000..11ed7dc865 --- /dev/null +++ b/pipenv/vendor/importlib_metadata/tests/test_integration.py @@ -0,0 +1,22 @@ +import unittest +import packaging.requirements +import packaging.version + +from . import fixtures +from .. import version + + +class IntegrationTests(fixtures.DistInfoPkg, unittest.TestCase): + + def test_package_spec_installed(self): + """ + Illustrate the recommended procedure to determine if + a specified version of a package is installed. + """ + def is_installed(package_spec): + req = packaging.requirements.Requirement(package_spec) + return version(req.name) in req.specifier + + assert is_installed('distinfo-pkg==1.0') + assert is_installed('distinfo-pkg>=1.0,<2.0') + assert not is_installed('distinfo-pkg<1.0') diff --git a/pipenv/vendor/importlib_metadata/tests/test_main.py b/pipenv/vendor/importlib_metadata/tests/test_main.py new file mode 100644 index 0000000000..cc2efdace1 --- /dev/null +++ b/pipenv/vendor/importlib_metadata/tests/test_main.py @@ -0,0 +1,224 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re +import json +import pickle +import textwrap +import unittest +import importlib +import importlib_metadata + +from . import fixtures +from .. import ( + Distribution, EntryPoint, MetadataPathFinder, + PackageNotFoundError, distributions, + entry_points, metadata, version, + ) + +try: + from builtins import str as text +except ImportError: + from __builtin__ import unicode as text + + +class BasicTests(fixtures.DistInfoPkg, unittest.TestCase): + version_pattern = r'\d+\.\d+(\.\d)?' + + def test_retrieves_version_of_self(self): + dist = Distribution.from_name('distinfo-pkg') + assert isinstance(dist.version, text) + assert re.match(self.version_pattern, dist.version) + + def test_for_name_does_not_exist(self): + with self.assertRaises(PackageNotFoundError): + Distribution.from_name('does-not-exist') + + def test_new_style_classes(self): + self.assertIsInstance(Distribution, type) + self.assertIsInstance(MetadataPathFinder, type) + + +class ImportTests(fixtures.DistInfoPkg, unittest.TestCase): + def test_import_nonexistent_module(self): + # Ensure that the MetadataPathFinder does not crash an import of a + # non-existent module. + with self.assertRaises(ImportError): + importlib.import_module('does_not_exist') + + def test_resolve(self): + entries = dict(entry_points()['entries']) + ep = entries['main'] + self.assertEqual(ep.load().__name__, "main") + + def test_entrypoint_with_colon_in_name(self): + entries = dict(entry_points()['entries']) + ep = entries['ns:sub'] + self.assertEqual(ep.value, 'mod:main') + + def test_resolve_without_attr(self): + ep = EntryPoint( + name='ep', + value='importlib_metadata', + group='grp', + ) + assert ep.load() is importlib_metadata + + +class NameNormalizationTests( + fixtures.OnSysPath, fixtures.SiteDir, unittest.TestCase): + @staticmethod + def pkg_with_dashes(site_dir): + """ + Create minimal metadata for a package with dashes + in the name (and thus underscores in the filename). + """ + metadata_dir = site_dir / 'my_pkg.dist-info' + metadata_dir.mkdir() + metadata = metadata_dir / 'METADATA' + with metadata.open('w') as strm: + strm.write('Version: 1.0\n') + return 'my-pkg' + + def test_dashes_in_dist_name_found_as_underscores(self): + """ + For a package with a dash in the name, the dist-info metadata + uses underscores in the name. Ensure the metadata loads. + """ + pkg_name = self.pkg_with_dashes(self.site_dir) + assert version(pkg_name) == '1.0' + + @staticmethod + def pkg_with_mixed_case(site_dir): + """ + Create minimal metadata for a package with mixed case + in the name. + """ + metadata_dir = site_dir / 'CherryPy.dist-info' + metadata_dir.mkdir() + metadata = metadata_dir / 'METADATA' + with metadata.open('w') as strm: + strm.write('Version: 1.0\n') + return 'CherryPy' + + def test_dist_name_found_as_any_case(self): + """ + Ensure the metadata loads when queried with any case. + """ + pkg_name = self.pkg_with_mixed_case(self.site_dir) + assert version(pkg_name) == '1.0' + assert version(pkg_name.lower()) == '1.0' + assert version(pkg_name.upper()) == '1.0' + + +class NonASCIITests(fixtures.OnSysPath, fixtures.SiteDir, unittest.TestCase): + @staticmethod + def pkg_with_non_ascii_description(site_dir): + """ + Create minimal metadata for a package with non-ASCII in + the description. + """ + metadata_dir = site_dir / 'portend.dist-info' + metadata_dir.mkdir() + metadata = metadata_dir / 'METADATA' + with metadata.open('w', encoding='utf-8') as fp: + fp.write('Description: pôrˈtend\n') + return 'portend' + + @staticmethod + def pkg_with_non_ascii_description_egg_info(site_dir): + """ + Create minimal metadata for an egg-info package with + non-ASCII in the description. + """ + metadata_dir = site_dir / 'portend.dist-info' + metadata_dir.mkdir() + metadata = metadata_dir / 'METADATA' + with metadata.open('w', encoding='utf-8') as fp: + fp.write(textwrap.dedent(""" + Name: portend + + pôrˈtend + """).lstrip()) + return 'portend' + + def test_metadata_loads(self): + pkg_name = self.pkg_with_non_ascii_description(self.site_dir) + meta = metadata(pkg_name) + assert meta['Description'] == 'pôrˈtend' + + def test_metadata_loads_egg_info(self): + pkg_name = self.pkg_with_non_ascii_description_egg_info(self.site_dir) + meta = metadata(pkg_name) + assert meta.get_payload() == 'pôrˈtend\n' + + +class DiscoveryTests(fixtures.EggInfoPkg, + fixtures.DistInfoPkg, + unittest.TestCase): + + def test_package_discovery(self): + dists = list(distributions()) + assert all( + isinstance(dist, Distribution) + for dist in dists + ) + assert any( + dist.metadata['Name'] == 'egginfo-pkg' + for dist in dists + ) + assert any( + dist.metadata['Name'] == 'distinfo-pkg' + for dist in dists + ) + + def test_invalid_usage(self): + with self.assertRaises(ValueError): + list(distributions(context='something', name='else')) + + +class DirectoryTest(fixtures.OnSysPath, fixtures.SiteDir, unittest.TestCase): + def test_egg_info(self): + # make an `EGG-INFO` directory that's unrelated + self.site_dir.joinpath('EGG-INFO').mkdir() + # used to crash with `IsADirectoryError` + with self.assertRaises(PackageNotFoundError): + version('unknown-package') + + def test_egg(self): + egg = self.site_dir.joinpath('foo-3.6.egg') + egg.mkdir() + with self.add_sys_path(egg): + with self.assertRaises(PackageNotFoundError): + version('foo') + + +class TestEntryPoints(unittest.TestCase): + def __init__(self, *args): + super(TestEntryPoints, self).__init__(*args) + self.ep = importlib_metadata.EntryPoint('name', 'value', 'group') + + def test_entry_point_pickleable(self): + revived = pickle.loads(pickle.dumps(self.ep)) + assert revived == self.ep + + def test_immutable(self): + """EntryPoints should be immutable""" + with self.assertRaises(AttributeError): + self.ep.name = 'badactor' + + def test_repr(self): + assert 'EntryPoint' in repr(self.ep) + assert 'name=' in repr(self.ep) + assert "'name'" in repr(self.ep) + + def test_hashable(self): + """EntryPoints should be hashable""" + hash(self.ep) + + def test_json_dump(self): + """ + json should not expect to be able to dump an EntryPoint + """ + with self.assertRaises(Exception): + json.dumps(self.ep) diff --git a/pipenv/vendor/importlib_metadata/tests/test_zip.py b/pipenv/vendor/importlib_metadata/tests/test_zip.py new file mode 100644 index 0000000000..8cbba63ae4 --- /dev/null +++ b/pipenv/vendor/importlib_metadata/tests/test_zip.py @@ -0,0 +1,70 @@ +import sys +import unittest + +from .. import distribution, entry_points, files, PackageNotFoundError, version + +try: + from importlib.resources import path +except ImportError: + from importlib_resources import path + +try: + from contextlib import ExitStack +except ImportError: + from contextlib2 import ExitStack + + +class TestZip(unittest.TestCase): + root = 'importlib_metadata.tests.data' + + def setUp(self): + # Find the path to the example-*.whl so we can add it to the front of + # sys.path, where we'll then try to find the metadata thereof. + self.resources = ExitStack() + self.addCleanup(self.resources.close) + wheel = self.resources.enter_context( + path(self.root, 'example-21.12-py3-none-any.whl')) + sys.path.insert(0, str(wheel)) + self.resources.callback(sys.path.pop, 0) + + def test_zip_version(self): + self.assertEqual(version('example'), '21.12') + + def test_zip_version_does_not_match(self): + with self.assertRaises(PackageNotFoundError): + version('definitely-not-installed') + + def test_zip_entry_points(self): + scripts = dict(entry_points()['console_scripts']) + entry_point = scripts['example'] + self.assertEqual(entry_point.value, 'example:main') + entry_point = scripts['Example'] + self.assertEqual(entry_point.value, 'example:main') + + def test_missing_metadata(self): + self.assertIsNone(distribution('example').read_text('does not exist')) + + def test_case_insensitive(self): + self.assertEqual(version('Example'), '21.12') + + def test_files(self): + for file in files('example'): + path = str(file.dist.locate_file(file)) + assert '.whl/' in path, path + + +class TestEgg(TestZip): + def setUp(self): + # Find the path to the example-*.egg so we can add it to the front of + # sys.path, where we'll then try to find the metadata thereof. + self.resources = ExitStack() + self.addCleanup(self.resources.close) + egg = self.resources.enter_context( + path(self.root, 'example-21.12-py3.6.egg')) + sys.path.insert(0, str(egg)) + self.resources.callback(sys.path.pop, 0) + + def test_files(self): + for file in files('example'): + path = str(file.dist.locate_file(file)) + assert '.egg/' in path, path diff --git a/pipenv/vendor/more_itertools/LICENSE b/pipenv/vendor/more_itertools/LICENSE new file mode 100644 index 0000000000..0a523bece3 --- /dev/null +++ b/pipenv/vendor/more_itertools/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2012 Erik Rose + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/pipenv/vendor/more_itertools/__init__.py b/pipenv/vendor/more_itertools/__init__.py new file mode 100644 index 0000000000..bba462c3db --- /dev/null +++ b/pipenv/vendor/more_itertools/__init__.py @@ -0,0 +1,2 @@ +from more_itertools.more import * # noqa +from more_itertools.recipes import * # noqa diff --git a/pipenv/vendor/more_itertools/more.py b/pipenv/vendor/more_itertools/more.py new file mode 100644 index 0000000000..bd32a26130 --- /dev/null +++ b/pipenv/vendor/more_itertools/more.py @@ -0,0 +1,2333 @@ +from __future__ import print_function + +from collections import Counter, defaultdict, deque +from functools import partial, wraps +from heapq import merge +from itertools import ( + chain, + compress, + count, + cycle, + dropwhile, + groupby, + islice, + repeat, + starmap, + takewhile, + tee +) +from operator import itemgetter, lt, gt, sub +from sys import maxsize, version_info +try: + from collections.abc import Sequence +except ImportError: + from collections import Sequence + +from six import binary_type, string_types, text_type +from six.moves import filter, map, range, zip, zip_longest + +from .recipes import consume, flatten, take + +__all__ = [ + 'adjacent', + 'always_iterable', + 'always_reversible', + 'bucket', + 'chunked', + 'circular_shifts', + 'collapse', + 'collate', + 'consecutive_groups', + 'consumer', + 'count_cycle', + 'difference', + 'distinct_permutations', + 'distribute', + 'divide', + 'exactly_n', + 'first', + 'groupby_transform', + 'ilen', + 'interleave_longest', + 'interleave', + 'intersperse', + 'islice_extended', + 'iterate', + 'last', + 'locate', + 'lstrip', + 'make_decorator', + 'map_reduce', + 'numeric_range', + 'one', + 'padded', + 'peekable', + 'replace', + 'rlocate', + 'rstrip', + 'run_length', + 'seekable', + 'SequenceView', + 'side_effect', + 'sliced', + 'sort_together', + 'split_at', + 'split_after', + 'split_before', + 'split_into', + 'spy', + 'stagger', + 'strip', + 'substrings', + 'unique_to_each', + 'unzip', + 'windowed', + 'with_iter', + 'zip_offset', +] + +_marker = object() + + +def chunked(iterable, n): + """Break *iterable* into lists of length *n*: + + >>> list(chunked([1, 2, 3, 4, 5, 6], 3)) + [[1, 2, 3], [4, 5, 6]] + + If the length of *iterable* is not evenly divisible by *n*, the last + returned list will be shorter: + + >>> list(chunked([1, 2, 3, 4, 5, 6, 7, 8], 3)) + [[1, 2, 3], [4, 5, 6], [7, 8]] + + To use a fill-in value instead, see the :func:`grouper` recipe. + + :func:`chunked` is useful for splitting up a computation on a large number + of keys into batches, to be pickled and sent off to worker processes. One + example is operations on rows in MySQL, which does not implement + server-side cursors properly and would otherwise load the entire dataset + into RAM on the client. + + """ + return iter(partial(take, n, iter(iterable)), []) + + +def first(iterable, default=_marker): + """Return the first item of *iterable*, or *default* if *iterable* is + empty. + + >>> first([0, 1, 2, 3]) + 0 + >>> first([], 'some default') + 'some default' + + If *default* is not provided and there are no items in the iterable, + raise ``ValueError``. + + :func:`first` is useful when you have a generator of expensive-to-retrieve + values and want any arbitrary one. It is marginally shorter than + ``next(iter(iterable), default)``. + + """ + try: + return next(iter(iterable)) + except StopIteration: + # I'm on the edge about raising ValueError instead of StopIteration. At + # the moment, ValueError wins, because the caller could conceivably + # want to do something different with flow control when I raise the + # exception, and it's weird to explicitly catch StopIteration. + if default is _marker: + raise ValueError('first() was called on an empty iterable, and no ' + 'default value was provided.') + return default + + +def last(iterable, default=_marker): + """Return the last item of *iterable*, or *default* if *iterable* is + empty. + + >>> last([0, 1, 2, 3]) + 3 + >>> last([], 'some default') + 'some default' + + If *default* is not provided and there are no items in the iterable, + raise ``ValueError``. + """ + try: + try: + # Try to access the last item directly + return iterable[-1] + except (TypeError, AttributeError, KeyError): + # If not slice-able, iterate entirely using length-1 deque + return deque(iterable, maxlen=1)[0] + except IndexError: # If the iterable was empty + if default is _marker: + raise ValueError('last() was called on an empty iterable, and no ' + 'default value was provided.') + return default + + +class peekable(object): + """Wrap an iterator to allow lookahead and prepending elements. + + Call :meth:`peek` on the result to get the value that will be returned + by :func:`next`. This won't advance the iterator: + + >>> p = peekable(['a', 'b']) + >>> p.peek() + 'a' + >>> next(p) + 'a' + + Pass :meth:`peek` a default value to return that instead of raising + ``StopIteration`` when the iterator is exhausted. + + >>> p = peekable([]) + >>> p.peek('hi') + 'hi' + + peekables also offer a :meth:`prepend` method, which "inserts" items + at the head of the iterable: + + >>> p = peekable([1, 2, 3]) + >>> p.prepend(10, 11, 12) + >>> next(p) + 10 + >>> p.peek() + 11 + >>> list(p) + [11, 12, 1, 2, 3] + + peekables can be indexed. Index 0 is the item that will be returned by + :func:`next`, index 1 is the item after that, and so on: + The values up to the given index will be cached. + + >>> p = peekable(['a', 'b', 'c', 'd']) + >>> p[0] + 'a' + >>> p[1] + 'b' + >>> next(p) + 'a' + + Negative indexes are supported, but be aware that they will cache the + remaining items in the source iterator, which may require significant + storage. + + To check whether a peekable is exhausted, check its truth value: + + >>> p = peekable(['a', 'b']) + >>> if p: # peekable has items + ... list(p) + ['a', 'b'] + >>> if not p: # peekable is exhaused + ... list(p) + [] + + """ + def __init__(self, iterable): + self._it = iter(iterable) + self._cache = deque() + + def __iter__(self): + return self + + def __bool__(self): + try: + self.peek() + except StopIteration: + return False + return True + + def __nonzero__(self): + # For Python 2 compatibility + return self.__bool__() + + def peek(self, default=_marker): + """Return the item that will be next returned from ``next()``. + + Return ``default`` if there are no items left. If ``default`` is not + provided, raise ``StopIteration``. + + """ + if not self._cache: + try: + self._cache.append(next(self._it)) + except StopIteration: + if default is _marker: + raise + return default + return self._cache[0] + + def prepend(self, *items): + """Stack up items to be the next ones returned from ``next()`` or + ``self.peek()``. The items will be returned in + first in, first out order:: + + >>> p = peekable([1, 2, 3]) + >>> p.prepend(10, 11, 12) + >>> next(p) + 10 + >>> list(p) + [11, 12, 1, 2, 3] + + It is possible, by prepending items, to "resurrect" a peekable that + previously raised ``StopIteration``. + + >>> p = peekable([]) + >>> next(p) + Traceback (most recent call last): + ... + StopIteration + >>> p.prepend(1) + >>> next(p) + 1 + >>> next(p) + Traceback (most recent call last): + ... + StopIteration + + """ + self._cache.extendleft(reversed(items)) + + def __next__(self): + if self._cache: + return self._cache.popleft() + + return next(self._it) + + next = __next__ # For Python 2 compatibility + + def _get_slice(self, index): + # Normalize the slice's arguments + step = 1 if (index.step is None) else index.step + if step > 0: + start = 0 if (index.start is None) else index.start + stop = maxsize if (index.stop is None) else index.stop + elif step < 0: + start = -1 if (index.start is None) else index.start + stop = (-maxsize - 1) if (index.stop is None) else index.stop + else: + raise ValueError('slice step cannot be zero') + + # If either the start or stop index is negative, we'll need to cache + # the rest of the iterable in order to slice from the right side. + if (start < 0) or (stop < 0): + self._cache.extend(self._it) + # Otherwise we'll need to find the rightmost index and cache to that + # point. + else: + n = min(max(start, stop) + 1, maxsize) + cache_len = len(self._cache) + if n >= cache_len: + self._cache.extend(islice(self._it, n - cache_len)) + + return list(self._cache)[index] + + def __getitem__(self, index): + if isinstance(index, slice): + return self._get_slice(index) + + cache_len = len(self._cache) + if index < 0: + self._cache.extend(self._it) + elif index >= cache_len: + self._cache.extend(islice(self._it, index + 1 - cache_len)) + + return self._cache[index] + + +def _collate(*iterables, **kwargs): + """Helper for ``collate()``, called when the user is using the ``reverse`` + or ``key`` keyword arguments on Python versions below 3.5. + + """ + key = kwargs.pop('key', lambda a: a) + reverse = kwargs.pop('reverse', False) + + min_or_max = partial(max if reverse else min, key=itemgetter(0)) + peekables = [peekable(it) for it in iterables] + peekables = [p for p in peekables if p] # Kill empties. + while peekables: + _, p = min_or_max((key(p.peek()), p) for p in peekables) + yield next(p) + peekables = [x for x in peekables if x] + + +def collate(*iterables, **kwargs): + """Return a sorted merge of the items from each of several already-sorted + *iterables*. + + >>> list(collate('ACDZ', 'AZ', 'JKL')) + ['A', 'A', 'C', 'D', 'J', 'K', 'L', 'Z', 'Z'] + + Works lazily, keeping only the next value from each iterable in memory. Use + :func:`collate` to, for example, perform a n-way mergesort of items that + don't fit in memory. + + If a *key* function is specified, the iterables will be sorted according + to its result: + + >>> key = lambda s: int(s) # Sort by numeric value, not by string + >>> list(collate(['1', '10'], ['2', '11'], key=key)) + ['1', '2', '10', '11'] + + + If the *iterables* are sorted in descending order, set *reverse* to + ``True``: + + >>> list(collate([5, 3, 1], [4, 2, 0], reverse=True)) + [5, 4, 3, 2, 1, 0] + + If the elements of the passed-in iterables are out of order, you might get + unexpected results. + + On Python 2.7, this function delegates to :func:`heapq.merge` if neither + of the keyword arguments are specified. On Python 3.5+, this function + is an alias for :func:`heapq.merge`. + + """ + if not kwargs: + return merge(*iterables) + + return _collate(*iterables, **kwargs) + + +# If using Python version 3.5 or greater, heapq.merge() will be faster than +# collate - use that instead. +if version_info >= (3, 5, 0): + _collate_docstring = collate.__doc__ + collate = partial(merge) + collate.__doc__ = _collate_docstring + + +def consumer(func): + """Decorator that automatically advances a PEP-342-style "reverse iterator" + to its first yield point so you don't have to call ``next()`` on it + manually. + + >>> @consumer + ... def tally(): + ... i = 0 + ... while True: + ... print('Thing number %s is %s.' % (i, (yield))) + ... i += 1 + ... + >>> t = tally() + >>> t.send('red') + Thing number 0 is red. + >>> t.send('fish') + Thing number 1 is fish. + + Without the decorator, you would have to call ``next(t)`` before + ``t.send()`` could be used. + + """ + @wraps(func) + def wrapper(*args, **kwargs): + gen = func(*args, **kwargs) + next(gen) + return gen + return wrapper + + +def ilen(iterable): + """Return the number of items in *iterable*. + + >>> ilen(x for x in range(1000000) if x % 3 == 0) + 333334 + + This consumes the iterable, so handle with care. + + """ + # This approach was selected because benchmarks showed it's likely the + # fastest of the known implementations at the time of writing. + # See GitHub tracker: #236, #230. + counter = count() + deque(zip(iterable, counter), maxlen=0) + return next(counter) + + +def iterate(func, start): + """Return ``start``, ``func(start)``, ``func(func(start))``, ... + + >>> from itertools import islice + >>> list(islice(iterate(lambda x: 2*x, 1), 10)) + [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + """ + while True: + yield start + start = func(start) + + +def with_iter(context_manager): + """Wrap an iterable in a ``with`` statement, so it closes once exhausted. + + For example, this will close the file when the iterator is exhausted:: + + upper_lines = (line.upper() for line in with_iter(open('foo'))) + + Any context manager which returns an iterable is a candidate for + ``with_iter``. + + """ + with context_manager as iterable: + for item in iterable: + yield item + + +def one(iterable, too_short=None, too_long=None): + """Return the first item from *iterable*, which is expected to contain only + that item. Raise an exception if *iterable* is empty or has more than one + item. + + :func:`one` is useful for ensuring that an iterable contains only one item. + For example, it can be used to retrieve the result of a database query + that is expected to return a single row. + + If *iterable* is empty, ``ValueError`` will be raised. You may specify a + different exception with the *too_short* keyword: + + >>> it = [] + >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + ValueError: too many items in iterable (expected 1)' + >>> too_short = IndexError('too few items') + >>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + IndexError: too few items + + Similarly, if *iterable* contains more than one item, ``ValueError`` will + be raised. You may specify a different exception with the *too_long* + keyword: + + >>> it = ['too', 'many'] + >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + ValueError: too many items in iterable (expected 1)' + >>> too_long = RuntimeError + >>> one(it, too_long=too_long) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + RuntimeError + + Note that :func:`one` attempts to advance *iterable* twice to ensure there + is only one item. If there is more than one, both items will be discarded. + See :func:`spy` or :func:`peekable` to check iterable contents less + destructively. + + """ + it = iter(iterable) + + try: + value = next(it) + except StopIteration: + raise too_short or ValueError('too few items in iterable (expected 1)') + + try: + next(it) + except StopIteration: + pass + else: + raise too_long or ValueError('too many items in iterable (expected 1)') + + return value + + +def distinct_permutations(iterable): + """Yield successive distinct permutations of the elements in *iterable*. + + >>> sorted(distinct_permutations([1, 0, 1])) + [(0, 1, 1), (1, 0, 1), (1, 1, 0)] + + Equivalent to ``set(permutations(iterable))``, except duplicates are not + generated and thrown away. For larger input sequences this is much more + efficient. + + Duplicate permutations arise when there are duplicated elements in the + input iterable. The number of items returned is + `n! / (x_1! * x_2! * ... * x_n!)`, where `n` is the total number of + items input, and each `x_i` is the count of a distinct item in the input + sequence. + + """ + def perm_unique_helper(item_counts, perm, i): + """Internal helper function + + :arg item_counts: Stores the unique items in ``iterable`` and how many + times they are repeated + :arg perm: The permutation that is being built for output + :arg i: The index of the permutation being modified + + The output permutations are built up recursively; the distinct items + are placed until their repetitions are exhausted. + """ + if i < 0: + yield tuple(perm) + else: + for item in item_counts: + if item_counts[item] <= 0: + continue + perm[i] = item + item_counts[item] -= 1 + for x in perm_unique_helper(item_counts, perm, i - 1): + yield x + item_counts[item] += 1 + + item_counts = Counter(iterable) + length = sum(item_counts.values()) + + return perm_unique_helper(item_counts, [None] * length, length - 1) + + +def intersperse(e, iterable, n=1): + """Intersperse filler element *e* among the items in *iterable*, leaving + *n* items between each filler element. + + >>> list(intersperse('!', [1, 2, 3, 4, 5])) + [1, '!', 2, '!', 3, '!', 4, '!', 5] + + >>> list(intersperse(None, [1, 2, 3, 4, 5], n=2)) + [1, 2, None, 3, 4, None, 5] + + """ + if n == 0: + raise ValueError('n must be > 0') + elif n == 1: + # interleave(repeat(e), iterable) -> e, x_0, e, e, x_1, e, x_2... + # islice(..., 1, None) -> x_0, e, e, x_1, e, x_2... + return islice(interleave(repeat(e), iterable), 1, None) + else: + # interleave(filler, chunks) -> [e], [x_0, x_1], [e], [x_2, x_3]... + # islice(..., 1, None) -> [x_0, x_1], [e], [x_2, x_3]... + # flatten(...) -> x_0, x_1, e, x_2, x_3... + filler = repeat([e]) + chunks = chunked(iterable, n) + return flatten(islice(interleave(filler, chunks), 1, None)) + + +def unique_to_each(*iterables): + """Return the elements from each of the input iterables that aren't in the + other input iterables. + + For example, suppose you have a set of packages, each with a set of + dependencies:: + + {'pkg_1': {'A', 'B'}, 'pkg_2': {'B', 'C'}, 'pkg_3': {'B', 'D'}} + + If you remove one package, which dependencies can also be removed? + + If ``pkg_1`` is removed, then ``A`` is no longer necessary - it is not + associated with ``pkg_2`` or ``pkg_3``. Similarly, ``C`` is only needed for + ``pkg_2``, and ``D`` is only needed for ``pkg_3``:: + + >>> unique_to_each({'A', 'B'}, {'B', 'C'}, {'B', 'D'}) + [['A'], ['C'], ['D']] + + If there are duplicates in one input iterable that aren't in the others + they will be duplicated in the output. Input order is preserved:: + + >>> unique_to_each("mississippi", "missouri") + [['p', 'p'], ['o', 'u', 'r']] + + It is assumed that the elements of each iterable are hashable. + + """ + pool = [list(it) for it in iterables] + counts = Counter(chain.from_iterable(map(set, pool))) + uniques = {element for element in counts if counts[element] == 1} + return [list(filter(uniques.__contains__, it)) for it in pool] + + +def windowed(seq, n, fillvalue=None, step=1): + """Return a sliding window of width *n* over the given iterable. + + >>> all_windows = windowed([1, 2, 3, 4, 5], 3) + >>> list(all_windows) + [(1, 2, 3), (2, 3, 4), (3, 4, 5)] + + When the window is larger than the iterable, *fillvalue* is used in place + of missing values:: + + >>> list(windowed([1, 2, 3], 4)) + [(1, 2, 3, None)] + + Each window will advance in increments of *step*: + + >>> list(windowed([1, 2, 3, 4, 5, 6], 3, fillvalue='!', step=2)) + [(1, 2, 3), (3, 4, 5), (5, 6, '!')] + + """ + if n < 0: + raise ValueError('n must be >= 0') + if n == 0: + yield tuple() + return + if step < 1: + raise ValueError('step must be >= 1') + + it = iter(seq) + window = deque([], n) + append = window.append + + # Initial deque fill + for _ in range(n): + append(next(it, fillvalue)) + yield tuple(window) + + # Appending new items to the right causes old items to fall off the left + i = 0 + for item in it: + append(item) + i = (i + 1) % step + if i % step == 0: + yield tuple(window) + + # If there are items from the iterable in the window, pad with the given + # value and emit them. + if (i % step) and (step - i < n): + for _ in range(step - i): + append(fillvalue) + yield tuple(window) + + +def substrings(iterable, join_func=None): + """Yield all of the substrings of *iterable*. + + >>> [''.join(s) for s in substrings('more')] + ['m', 'o', 'r', 'e', 'mo', 'or', 're', 'mor', 'ore', 'more'] + + Note that non-string iterables can also be subdivided. + + >>> list(substrings([0, 1, 2])) + [(0,), (1,), (2,), (0, 1), (1, 2), (0, 1, 2)] + + """ + # The length-1 substrings + seq = [] + for item in iter(iterable): + seq.append(item) + yield (item,) + seq = tuple(seq) + item_count = len(seq) + + # And the rest + for n in range(2, item_count + 1): + for i in range(item_count - n + 1): + yield seq[i:i + n] + + +class bucket(object): + """Wrap *iterable* and return an object that buckets it iterable into + child iterables based on a *key* function. + + >>> iterable = ['a1', 'b1', 'c1', 'a2', 'b2', 'c2', 'b3'] + >>> s = bucket(iterable, key=lambda x: x[0]) + >>> a_iterable = s['a'] + >>> next(a_iterable) + 'a1' + >>> next(a_iterable) + 'a2' + >>> list(s['b']) + ['b1', 'b2', 'b3'] + + The original iterable will be advanced and its items will be cached until + they are used by the child iterables. This may require significant storage. + + By default, attempting to select a bucket to which no items belong will + exhaust the iterable and cache all values. + If you specify a *validator* function, selected buckets will instead be + checked against it. + + >>> from itertools import count + >>> it = count(1, 2) # Infinite sequence of odd numbers + >>> key = lambda x: x % 10 # Bucket by last digit + >>> validator = lambda x: x in {1, 3, 5, 7, 9} # Odd digits only + >>> s = bucket(it, key=key, validator=validator) + >>> 2 in s + False + >>> list(s[2]) + [] + + """ + def __init__(self, iterable, key, validator=None): + self._it = iter(iterable) + self._key = key + self._cache = defaultdict(deque) + self._validator = validator or (lambda x: True) + + def __contains__(self, value): + if not self._validator(value): + return False + + try: + item = next(self[value]) + except StopIteration: + return False + else: + self._cache[value].appendleft(item) + + return True + + def _get_values(self, value): + """ + Helper to yield items from the parent iterator that match *value*. + Items that don't match are stored in the local cache as they + are encountered. + """ + while True: + # If we've cached some items that match the target value, emit + # the first one and evict it from the cache. + if self._cache[value]: + yield self._cache[value].popleft() + # Otherwise we need to advance the parent iterator to search for + # a matching item, caching the rest. + else: + while True: + try: + item = next(self._it) + except StopIteration: + return + item_value = self._key(item) + if item_value == value: + yield item + break + elif self._validator(item_value): + self._cache[item_value].append(item) + + def __getitem__(self, value): + if not self._validator(value): + return iter(()) + + return self._get_values(value) + + +def spy(iterable, n=1): + """Return a 2-tuple with a list containing the first *n* elements of + *iterable*, and an iterator with the same items as *iterable*. + This allows you to "look ahead" at the items in the iterable without + advancing it. + + There is one item in the list by default: + + >>> iterable = 'abcdefg' + >>> head, iterable = spy(iterable) + >>> head + ['a'] + >>> list(iterable) + ['a', 'b', 'c', 'd', 'e', 'f', 'g'] + + You may use unpacking to retrieve items instead of lists: + + >>> (head,), iterable = spy('abcdefg') + >>> head + 'a' + >>> (first, second), iterable = spy('abcdefg', 2) + >>> first + 'a' + >>> second + 'b' + + The number of items requested can be larger than the number of items in + the iterable: + + >>> iterable = [1, 2, 3, 4, 5] + >>> head, iterable = spy(iterable, 10) + >>> head + [1, 2, 3, 4, 5] + >>> list(iterable) + [1, 2, 3, 4, 5] + + """ + it = iter(iterable) + head = take(n, it) + + return head, chain(head, it) + + +def interleave(*iterables): + """Return a new iterable yielding from each iterable in turn, + until the shortest is exhausted. + + >>> list(interleave([1, 2, 3], [4, 5], [6, 7, 8])) + [1, 4, 6, 2, 5, 7] + + For a version that doesn't terminate after the shortest iterable is + exhausted, see :func:`interleave_longest`. + + """ + return chain.from_iterable(zip(*iterables)) + + +def interleave_longest(*iterables): + """Return a new iterable yielding from each iterable in turn, + skipping any that are exhausted. + + >>> list(interleave_longest([1, 2, 3], [4, 5], [6, 7, 8])) + [1, 4, 6, 2, 5, 7, 3, 8] + + This function produces the same output as :func:`roundrobin`, but may + perform better for some inputs (in particular when the number of iterables + is large). + + """ + i = chain.from_iterable(zip_longest(*iterables, fillvalue=_marker)) + return (x for x in i if x is not _marker) + + +def collapse(iterable, base_type=None, levels=None): + """Flatten an iterable with multiple levels of nesting (e.g., a list of + lists of tuples) into non-iterable types. + + >>> iterable = [(1, 2), ([3, 4], [[5], [6]])] + >>> list(collapse(iterable)) + [1, 2, 3, 4, 5, 6] + + String types are not considered iterable and will not be collapsed. + To avoid collapsing other types, specify *base_type*: + + >>> iterable = ['ab', ('cd', 'ef'), ['gh', 'ij']] + >>> list(collapse(iterable, base_type=tuple)) + ['ab', ('cd', 'ef'), 'gh', 'ij'] + + Specify *levels* to stop flattening after a certain level: + + >>> iterable = [('a', ['b']), ('c', ['d'])] + >>> list(collapse(iterable)) # Fully flattened + ['a', 'b', 'c', 'd'] + >>> list(collapse(iterable, levels=1)) # Only one level flattened + ['a', ['b'], 'c', ['d']] + + """ + def walk(node, level): + if ( + ((levels is not None) and (level > levels)) or + isinstance(node, string_types) or + ((base_type is not None) and isinstance(node, base_type)) + ): + yield node + return + + try: + tree = iter(node) + except TypeError: + yield node + return + else: + for child in tree: + for x in walk(child, level + 1): + yield x + + for x in walk(iterable, 0): + yield x + + +def side_effect(func, iterable, chunk_size=None, before=None, after=None): + """Invoke *func* on each item in *iterable* (or on each *chunk_size* group + of items) before yielding the item. + + `func` must be a function that takes a single argument. Its return value + will be discarded. + + *before* and *after* are optional functions that take no arguments. They + will be executed before iteration starts and after it ends, respectively. + + `side_effect` can be used for logging, updating progress bars, or anything + that is not functionally "pure." + + Emitting a status message: + + >>> from more_itertools import consume + >>> func = lambda item: print('Received {}'.format(item)) + >>> consume(side_effect(func, range(2))) + Received 0 + Received 1 + + Operating on chunks of items: + + >>> pair_sums = [] + >>> func = lambda chunk: pair_sums.append(sum(chunk)) + >>> list(side_effect(func, [0, 1, 2, 3, 4, 5], 2)) + [0, 1, 2, 3, 4, 5] + >>> list(pair_sums) + [1, 5, 9] + + Writing to a file-like object: + + >>> from io import StringIO + >>> from more_itertools import consume + >>> f = StringIO() + >>> func = lambda x: print(x, file=f) + >>> before = lambda: print(u'HEADER', file=f) + >>> after = f.close + >>> it = [u'a', u'b', u'c'] + >>> consume(side_effect(func, it, before=before, after=after)) + >>> f.closed + True + + """ + try: + if before is not None: + before() + + if chunk_size is None: + for item in iterable: + func(item) + yield item + else: + for chunk in chunked(iterable, chunk_size): + func(chunk) + for item in chunk: + yield item + finally: + if after is not None: + after() + + +def sliced(seq, n): + """Yield slices of length *n* from the sequence *seq*. + + >>> list(sliced((1, 2, 3, 4, 5, 6), 3)) + [(1, 2, 3), (4, 5, 6)] + + If the length of the sequence is not divisible by the requested slice + length, the last slice will be shorter. + + >>> list(sliced((1, 2, 3, 4, 5, 6, 7, 8), 3)) + [(1, 2, 3), (4, 5, 6), (7, 8)] + + This function will only work for iterables that support slicing. + For non-sliceable iterables, see :func:`chunked`. + + """ + return takewhile(bool, (seq[i: i + n] for i in count(0, n))) + + +def split_at(iterable, pred): + """Yield lists of items from *iterable*, where each list is delimited by + an item where callable *pred* returns ``True``. The lists do not include + the delimiting items. + + >>> list(split_at('abcdcba', lambda x: x == 'b')) + [['a'], ['c', 'd', 'c'], ['a']] + + >>> list(split_at(range(10), lambda n: n % 2 == 1)) + [[0], [2], [4], [6], [8], []] + """ + buf = [] + for item in iterable: + if pred(item): + yield buf + buf = [] + else: + buf.append(item) + yield buf + + +def split_before(iterable, pred): + """Yield lists of items from *iterable*, where each list starts with an + item where callable *pred* returns ``True``: + + >>> list(split_before('OneTwo', lambda s: s.isupper())) + [['O', 'n', 'e'], ['T', 'w', 'o']] + + >>> list(split_before(range(10), lambda n: n % 3 == 0)) + [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] + + """ + buf = [] + for item in iterable: + if pred(item) and buf: + yield buf + buf = [] + buf.append(item) + yield buf + + +def split_after(iterable, pred): + """Yield lists of items from *iterable*, where each list ends with an + item where callable *pred* returns ``True``: + + >>> list(split_after('one1two2', lambda s: s.isdigit())) + [['o', 'n', 'e', '1'], ['t', 'w', 'o', '2']] + + >>> list(split_after(range(10), lambda n: n % 3 == 0)) + [[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]] + + """ + buf = [] + for item in iterable: + buf.append(item) + if pred(item) and buf: + yield buf + buf = [] + if buf: + yield buf + + +def split_into(iterable, sizes): + """Yield a list of sequential items from *iterable* of length 'n' for each + integer 'n' in *sizes*. + + >>> list(split_into([1,2,3,4,5,6], [1,2,3])) + [[1], [2, 3], [4, 5, 6]] + + If the sum of *sizes* is smaller than the length of *iterable*, then the + remaining items of *iterable* will not be returned. + + >>> list(split_into([1,2,3,4,5,6], [2,3])) + [[1, 2], [3, 4, 5]] + + If the sum of *sizes* is larger than the length of *iterable*, fewer items + will be returned in the iteration that overruns *iterable* and further + lists will be empty: + + >>> list(split_into([1,2,3,4], [1,2,3,4])) + [[1], [2, 3], [4], []] + + When a ``None`` object is encountered in *sizes*, the returned list will + contain items up to the end of *iterable* the same way that itertools.slice + does: + + >>> list(split_into([1,2,3,4,5,6,7,8,9,0], [2,3,None])) + [[1, 2], [3, 4, 5], [6, 7, 8, 9, 0]] + + :func:`split_into` can be useful for grouping a series of items where the + sizes of the groups are not uniform. An example would be where in a row + from a table, multiple columns represent elements of the same feature + (e.g. a point represented by x,y,z) but, the format is not the same for + all columns. + """ + # convert the iterable argument into an iterator so its contents can + # be consumed by islice in case it is a generator + it = iter(iterable) + + for size in sizes: + if size is None: + yield list(it) + return + else: + yield list(islice(it, size)) + + +def padded(iterable, fillvalue=None, n=None, next_multiple=False): + """Yield the elements from *iterable*, followed by *fillvalue*, such that + at least *n* items are emitted. + + >>> list(padded([1, 2, 3], '?', 5)) + [1, 2, 3, '?', '?'] + + If *next_multiple* is ``True``, *fillvalue* will be emitted until the + number of items emitted is a multiple of *n*:: + + >>> list(padded([1, 2, 3, 4], n=3, next_multiple=True)) + [1, 2, 3, 4, None, None] + + If *n* is ``None``, *fillvalue* will be emitted indefinitely. + + """ + it = iter(iterable) + if n is None: + for item in chain(it, repeat(fillvalue)): + yield item + elif n < 1: + raise ValueError('n must be at least 1') + else: + item_count = 0 + for item in it: + yield item + item_count += 1 + + remaining = (n - item_count) % n if next_multiple else n - item_count + for _ in range(remaining): + yield fillvalue + + +def distribute(n, iterable): + """Distribute the items from *iterable* among *n* smaller iterables. + + >>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6]) + >>> list(group_1) + [1, 3, 5] + >>> list(group_2) + [2, 4, 6] + + If the length of *iterable* is not evenly divisible by *n*, then the + length of the returned iterables will not be identical: + + >>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7]) + >>> [list(c) for c in children] + [[1, 4, 7], [2, 5], [3, 6]] + + If the length of *iterable* is smaller than *n*, then the last returned + iterables will be empty: + + >>> children = distribute(5, [1, 2, 3]) + >>> [list(c) for c in children] + [[1], [2], [3], [], []] + + This function uses :func:`itertools.tee` and may require significant + storage. If you need the order items in the smaller iterables to match the + original iterable, see :func:`divide`. + + """ + if n < 1: + raise ValueError('n must be at least 1') + + children = tee(iterable, n) + return [islice(it, index, None, n) for index, it in enumerate(children)] + + +def stagger(iterable, offsets=(-1, 0, 1), longest=False, fillvalue=None): + """Yield tuples whose elements are offset from *iterable*. + The amount by which the `i`-th item in each tuple is offset is given by + the `i`-th item in *offsets*. + + >>> list(stagger([0, 1, 2, 3])) + [(None, 0, 1), (0, 1, 2), (1, 2, 3)] + >>> list(stagger(range(8), offsets=(0, 2, 4))) + [(0, 2, 4), (1, 3, 5), (2, 4, 6), (3, 5, 7)] + + By default, the sequence will end when the final element of a tuple is the + last item in the iterable. To continue until the first element of a tuple + is the last item in the iterable, set *longest* to ``True``:: + + >>> list(stagger([0, 1, 2, 3], longest=True)) + [(None, 0, 1), (0, 1, 2), (1, 2, 3), (2, 3, None), (3, None, None)] + + By default, ``None`` will be used to replace offsets beyond the end of the + sequence. Specify *fillvalue* to use some other value. + + """ + children = tee(iterable, len(offsets)) + + return zip_offset( + *children, offsets=offsets, longest=longest, fillvalue=fillvalue + ) + + +def zip_offset(*iterables, **kwargs): + """``zip`` the input *iterables* together, but offset the `i`-th iterable + by the `i`-th item in *offsets*. + + >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1))) + [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e')] + + This can be used as a lightweight alternative to SciPy or pandas to analyze + data sets in which some series have a lead or lag relationship. + + By default, the sequence will end when the shortest iterable is exhausted. + To continue until the longest iterable is exhausted, set *longest* to + ``True``. + + >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1), longest=True)) + [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e'), (None, 'f')] + + By default, ``None`` will be used to replace offsets beyond the end of the + sequence. Specify *fillvalue* to use some other value. + + """ + offsets = kwargs['offsets'] + longest = kwargs.get('longest', False) + fillvalue = kwargs.get('fillvalue', None) + + if len(iterables) != len(offsets): + raise ValueError("Number of iterables and offsets didn't match") + + staggered = [] + for it, n in zip(iterables, offsets): + if n < 0: + staggered.append(chain(repeat(fillvalue, -n), it)) + elif n > 0: + staggered.append(islice(it, n, None)) + else: + staggered.append(it) + + if longest: + return zip_longest(*staggered, fillvalue=fillvalue) + + return zip(*staggered) + + +def sort_together(iterables, key_list=(0,), reverse=False): + """Return the input iterables sorted together, with *key_list* as the + priority for sorting. All iterables are trimmed to the length of the + shortest one. + + This can be used like the sorting function in a spreadsheet. If each + iterable represents a column of data, the key list determines which + columns are used for sorting. + + By default, all iterables are sorted using the ``0``-th iterable:: + + >>> iterables = [(4, 3, 2, 1), ('a', 'b', 'c', 'd')] + >>> sort_together(iterables) + [(1, 2, 3, 4), ('d', 'c', 'b', 'a')] + + Set a different key list to sort according to another iterable. + Specifying multiple keys dictates how ties are broken:: + + >>> iterables = [(3, 1, 2), (0, 1, 0), ('c', 'b', 'a')] + >>> sort_together(iterables, key_list=(1, 2)) + [(2, 3, 1), (0, 0, 1), ('a', 'c', 'b')] + + Set *reverse* to ``True`` to sort in descending order. + + >>> sort_together([(1, 2, 3), ('c', 'b', 'a')], reverse=True) + [(3, 2, 1), ('a', 'b', 'c')] + + """ + return list(zip(*sorted(zip(*iterables), + key=itemgetter(*key_list), + reverse=reverse))) + + +def unzip(iterable): + """The inverse of :func:`zip`, this function disaggregates the elements + of the zipped *iterable*. + + The ``i``-th iterable contains the ``i``-th element from each element + of the zipped iterable. The first element is used to to determine the + length of the remaining elements. + + >>> iterable = [('a', 1), ('b', 2), ('c', 3), ('d', 4)] + >>> letters, numbers = unzip(iterable) + >>> list(letters) + ['a', 'b', 'c', 'd'] + >>> list(numbers) + [1, 2, 3, 4] + + This is similar to using ``zip(*iterable)``, but it avoids reading + *iterable* into memory. Note, however, that this function uses + :func:`itertools.tee` and thus may require significant storage. + + """ + head, iterable = spy(iter(iterable)) + if not head: + # empty iterable, e.g. zip([], [], []) + return () + # spy returns a one-length iterable as head + head = head[0] + iterables = tee(iterable, len(head)) + + def itemgetter(i): + def getter(obj): + try: + return obj[i] + except IndexError: + # basically if we have an iterable like + # iter([(1, 2, 3), (4, 5), (6,)]) + # the second unzipped iterable would fail at the third tuple + # since it would try to access tup[1] + # same with the third unzipped iterable and the second tuple + # to support these "improperly zipped" iterables, + # we create a custom itemgetter + # which just stops the unzipped iterables + # at first length mismatch + raise StopIteration + return getter + + return tuple(map(itemgetter(i), it) for i, it in enumerate(iterables)) + + +def divide(n, iterable): + """Divide the elements from *iterable* into *n* parts, maintaining + order. + + >>> group_1, group_2 = divide(2, [1, 2, 3, 4, 5, 6]) + >>> list(group_1) + [1, 2, 3] + >>> list(group_2) + [4, 5, 6] + + If the length of *iterable* is not evenly divisible by *n*, then the + length of the returned iterables will not be identical: + + >>> children = divide(3, [1, 2, 3, 4, 5, 6, 7]) + >>> [list(c) for c in children] + [[1, 2, 3], [4, 5], [6, 7]] + + If the length of the iterable is smaller than n, then the last returned + iterables will be empty: + + >>> children = divide(5, [1, 2, 3]) + >>> [list(c) for c in children] + [[1], [2], [3], [], []] + + This function will exhaust the iterable before returning and may require + significant storage. If order is not important, see :func:`distribute`, + which does not first pull the iterable into memory. + + """ + if n < 1: + raise ValueError('n must be at least 1') + + seq = tuple(iterable) + q, r = divmod(len(seq), n) + + ret = [] + for i in range(n): + start = (i * q) + (i if i < r else r) + stop = ((i + 1) * q) + (i + 1 if i + 1 < r else r) + ret.append(iter(seq[start:stop])) + + return ret + + +def always_iterable(obj, base_type=(text_type, binary_type)): + """If *obj* is iterable, return an iterator over its items:: + + >>> obj = (1, 2, 3) + >>> list(always_iterable(obj)) + [1, 2, 3] + + If *obj* is not iterable, return a one-item iterable containing *obj*:: + + >>> obj = 1 + >>> list(always_iterable(obj)) + [1] + + If *obj* is ``None``, return an empty iterable: + + >>> obj = None + >>> list(always_iterable(None)) + [] + + By default, binary and text strings are not considered iterable:: + + >>> obj = 'foo' + >>> list(always_iterable(obj)) + ['foo'] + + If *base_type* is set, objects for which ``isinstance(obj, base_type)`` + returns ``True`` won't be considered iterable. + + >>> obj = {'a': 1} + >>> list(always_iterable(obj)) # Iterate over the dict's keys + ['a'] + >>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit + [{'a': 1}] + + Set *base_type* to ``None`` to avoid any special handling and treat objects + Python considers iterable as iterable: + + >>> obj = 'foo' + >>> list(always_iterable(obj, base_type=None)) + ['f', 'o', 'o'] + """ + if obj is None: + return iter(()) + + if (base_type is not None) and isinstance(obj, base_type): + return iter((obj,)) + + try: + return iter(obj) + except TypeError: + return iter((obj,)) + + +def adjacent(predicate, iterable, distance=1): + """Return an iterable over `(bool, item)` tuples where the `item` is + drawn from *iterable* and the `bool` indicates whether + that item satisfies the *predicate* or is adjacent to an item that does. + + For example, to find whether items are adjacent to a ``3``:: + + >>> list(adjacent(lambda x: x == 3, range(6))) + [(False, 0), (False, 1), (True, 2), (True, 3), (True, 4), (False, 5)] + + Set *distance* to change what counts as adjacent. For example, to find + whether items are two places away from a ``3``: + + >>> list(adjacent(lambda x: x == 3, range(6), distance=2)) + [(False, 0), (True, 1), (True, 2), (True, 3), (True, 4), (True, 5)] + + This is useful for contextualizing the results of a search function. + For example, a code comparison tool might want to identify lines that + have changed, but also surrounding lines to give the viewer of the diff + context. + + The predicate function will only be called once for each item in the + iterable. + + See also :func:`groupby_transform`, which can be used with this function + to group ranges of items with the same `bool` value. + + """ + # Allow distance=0 mainly for testing that it reproduces results with map() + if distance < 0: + raise ValueError('distance must be at least 0') + + i1, i2 = tee(iterable) + padding = [False] * distance + selected = chain(padding, map(predicate, i1), padding) + adjacent_to_selected = map(any, windowed(selected, 2 * distance + 1)) + return zip(adjacent_to_selected, i2) + + +def groupby_transform(iterable, keyfunc=None, valuefunc=None): + """An extension of :func:`itertools.groupby` that transforms the values of + *iterable* after grouping them. + *keyfunc* is a function used to compute a grouping key for each item. + *valuefunc* is a function for transforming the items after grouping. + + >>> iterable = 'AaaABbBCcA' + >>> keyfunc = lambda x: x.upper() + >>> valuefunc = lambda x: x.lower() + >>> grouper = groupby_transform(iterable, keyfunc, valuefunc) + >>> [(k, ''.join(g)) for k, g in grouper] + [('A', 'aaaa'), ('B', 'bbb'), ('C', 'cc'), ('A', 'a')] + + *keyfunc* and *valuefunc* default to identity functions if they are not + specified. + + :func:`groupby_transform` is useful when grouping elements of an iterable + using a separate iterable as the key. To do this, :func:`zip` the iterables + and pass a *keyfunc* that extracts the first element and a *valuefunc* + that extracts the second element:: + + >>> from operator import itemgetter + >>> keys = [0, 0, 1, 1, 1, 2, 2, 2, 3] + >>> values = 'abcdefghi' + >>> iterable = zip(keys, values) + >>> grouper = groupby_transform(iterable, itemgetter(0), itemgetter(1)) + >>> [(k, ''.join(g)) for k, g in grouper] + [(0, 'ab'), (1, 'cde'), (2, 'fgh'), (3, 'i')] + + Note that the order of items in the iterable is significant. + Only adjacent items are grouped together, so if you don't want any + duplicate groups, you should sort the iterable by the key function. + + """ + valuefunc = (lambda x: x) if valuefunc is None else valuefunc + return ((k, map(valuefunc, g)) for k, g in groupby(iterable, keyfunc)) + + +def numeric_range(*args): + """An extension of the built-in ``range()`` function whose arguments can + be any orderable numeric type. + + With only *stop* specified, *start* defaults to ``0`` and *step* + defaults to ``1``. The output items will match the type of *stop*: + + >>> list(numeric_range(3.5)) + [0.0, 1.0, 2.0, 3.0] + + With only *start* and *stop* specified, *step* defaults to ``1``. The + output items will match the type of *start*: + + >>> from decimal import Decimal + >>> start = Decimal('2.1') + >>> stop = Decimal('5.1') + >>> list(numeric_range(start, stop)) + [Decimal('2.1'), Decimal('3.1'), Decimal('4.1')] + + With *start*, *stop*, and *step* specified the output items will match + the type of ``start + step``: + + >>> from fractions import Fraction + >>> start = Fraction(1, 2) # Start at 1/2 + >>> stop = Fraction(5, 2) # End at 5/2 + >>> step = Fraction(1, 2) # Count by 1/2 + >>> list(numeric_range(start, stop, step)) + [Fraction(1, 2), Fraction(1, 1), Fraction(3, 2), Fraction(2, 1)] + + If *step* is zero, ``ValueError`` is raised. Negative steps are supported: + + >>> list(numeric_range(3, -1, -1.0)) + [3.0, 2.0, 1.0, 0.0] + + Be aware of the limitations of floating point numbers; the representation + of the yielded numbers may be surprising. + + """ + argc = len(args) + if argc == 1: + stop, = args + start = type(stop)(0) + step = 1 + elif argc == 2: + start, stop = args + step = 1 + elif argc == 3: + start, stop, step = args + else: + err_msg = 'numeric_range takes at most 3 arguments, got {}' + raise TypeError(err_msg.format(argc)) + + values = (start + (step * n) for n in count()) + if step > 0: + return takewhile(partial(gt, stop), values) + elif step < 0: + return takewhile(partial(lt, stop), values) + else: + raise ValueError('numeric_range arg 3 must not be zero') + + +def count_cycle(iterable, n=None): + """Cycle through the items from *iterable* up to *n* times, yielding + the number of completed cycles along with each item. If *n* is omitted the + process repeats indefinitely. + + >>> list(count_cycle('AB', 3)) + [(0, 'A'), (0, 'B'), (1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')] + + """ + iterable = tuple(iterable) + if not iterable: + return iter(()) + counter = count() if n is None else range(n) + return ((i, item) for i in counter for item in iterable) + + +def locate(iterable, pred=bool, window_size=None): + """Yield the index of each item in *iterable* for which *pred* returns + ``True``. + + *pred* defaults to :func:`bool`, which will select truthy items: + + >>> list(locate([0, 1, 1, 0, 1, 0, 0])) + [1, 2, 4] + + Set *pred* to a custom function to, e.g., find the indexes for a particular + item. + + >>> list(locate(['a', 'b', 'c', 'b'], lambda x: x == 'b')) + [1, 3] + + If *window_size* is given, then the *pred* function will be called with + that many items. This enables searching for sub-sequences: + + >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3] + >>> pred = lambda *args: args == (1, 2, 3) + >>> list(locate(iterable, pred=pred, window_size=3)) + [1, 5, 9] + + Use with :func:`seekable` to find indexes and then retrieve the associated + items: + + >>> from itertools import count + >>> from more_itertools import seekable + >>> source = (3 * n + 1 if (n % 2) else n // 2 for n in count()) + >>> it = seekable(source) + >>> pred = lambda x: x > 100 + >>> indexes = locate(it, pred=pred) + >>> i = next(indexes) + >>> it.seek(i) + >>> next(it) + 106 + + """ + if window_size is None: + return compress(count(), map(pred, iterable)) + + if window_size < 1: + raise ValueError('window size must be at least 1') + + it = windowed(iterable, window_size, fillvalue=_marker) + return compress(count(), starmap(pred, it)) + + +def lstrip(iterable, pred): + """Yield the items from *iterable*, but strip any from the beginning + for which *pred* returns ``True``. + + For example, to remove a set of items from the start of an iterable: + + >>> iterable = (None, False, None, 1, 2, None, 3, False, None) + >>> pred = lambda x: x in {None, False, ''} + >>> list(lstrip(iterable, pred)) + [1, 2, None, 3, False, None] + + This function is analogous to to :func:`str.lstrip`, and is essentially + an wrapper for :func:`itertools.dropwhile`. + + """ + return dropwhile(pred, iterable) + + +def rstrip(iterable, pred): + """Yield the items from *iterable*, but strip any from the end + for which *pred* returns ``True``. + + For example, to remove a set of items from the end of an iterable: + + >>> iterable = (None, False, None, 1, 2, None, 3, False, None) + >>> pred = lambda x: x in {None, False, ''} + >>> list(rstrip(iterable, pred)) + [None, False, None, 1, 2, None, 3] + + This function is analogous to :func:`str.rstrip`. + + """ + cache = [] + cache_append = cache.append + for x in iterable: + if pred(x): + cache_append(x) + else: + for y in cache: + yield y + del cache[:] + yield x + + +def strip(iterable, pred): + """Yield the items from *iterable*, but strip any from the + beginning and end for which *pred* returns ``True``. + + For example, to remove a set of items from both ends of an iterable: + + >>> iterable = (None, False, None, 1, 2, None, 3, False, None) + >>> pred = lambda x: x in {None, False, ''} + >>> list(strip(iterable, pred)) + [1, 2, None, 3] + + This function is analogous to :func:`str.strip`. + + """ + return rstrip(lstrip(iterable, pred), pred) + + +def islice_extended(iterable, *args): + """An extension of :func:`itertools.islice` that supports negative values + for *stop*, *start*, and *step*. + + >>> iterable = iter('abcdefgh') + >>> list(islice_extended(iterable, -4, -1)) + ['e', 'f', 'g'] + + Slices with negative values require some caching of *iterable*, but this + function takes care to minimize the amount of memory required. + + For example, you can use a negative step with an infinite iterator: + + >>> from itertools import count + >>> list(islice_extended(count(), 110, 99, -2)) + [110, 108, 106, 104, 102, 100] + + """ + s = slice(*args) + start = s.start + stop = s.stop + if s.step == 0: + raise ValueError('step argument must be a non-zero integer or None.') + step = s.step or 1 + + it = iter(iterable) + + if step > 0: + start = 0 if (start is None) else start + + if (start < 0): + # Consume all but the last -start items + cache = deque(enumerate(it, 1), maxlen=-start) + len_iter = cache[-1][0] if cache else 0 + + # Adjust start to be positive + i = max(len_iter + start, 0) + + # Adjust stop to be positive + if stop is None: + j = len_iter + elif stop >= 0: + j = min(stop, len_iter) + else: + j = max(len_iter + stop, 0) + + # Slice the cache + n = j - i + if n <= 0: + return + + for index, item in islice(cache, 0, n, step): + yield item + elif (stop is not None) and (stop < 0): + # Advance to the start position + next(islice(it, start, start), None) + + # When stop is negative, we have to carry -stop items while + # iterating + cache = deque(islice(it, -stop), maxlen=-stop) + + for index, item in enumerate(it): + cached_item = cache.popleft() + if index % step == 0: + yield cached_item + cache.append(item) + else: + # When both start and stop are positive we have the normal case + for item in islice(it, start, stop, step): + yield item + else: + start = -1 if (start is None) else start + + if (stop is not None) and (stop < 0): + # Consume all but the last items + n = -stop - 1 + cache = deque(enumerate(it, 1), maxlen=n) + len_iter = cache[-1][0] if cache else 0 + + # If start and stop are both negative they are comparable and + # we can just slice. Otherwise we can adjust start to be negative + # and then slice. + if start < 0: + i, j = start, stop + else: + i, j = min(start - len_iter, -1), None + + for index, item in list(cache)[i:j:step]: + yield item + else: + # Advance to the stop position + if stop is not None: + m = stop + 1 + next(islice(it, m, m), None) + + # stop is positive, so if start is negative they are not comparable + # and we need the rest of the items. + if start < 0: + i = start + n = None + # stop is None and start is positive, so we just need items up to + # the start index. + elif stop is None: + i = None + n = start + 1 + # Both stop and start are positive, so they are comparable. + else: + i = None + n = start - stop + if n <= 0: + return + + cache = list(islice(it, n)) + + for item in cache[i::step]: + yield item + + +def always_reversible(iterable): + """An extension of :func:`reversed` that supports all iterables, not + just those which implement the ``Reversible`` or ``Sequence`` protocols. + + >>> print(*always_reversible(x for x in range(3))) + 2 1 0 + + If the iterable is already reversible, this function returns the + result of :func:`reversed()`. If the iterable is not reversible, + this function will cache the remaining items in the iterable and + yield them in reverse order, which may require significant storage. + """ + try: + return reversed(iterable) + except TypeError: + return reversed(list(iterable)) + + +def consecutive_groups(iterable, ordering=lambda x: x): + """Yield groups of consecutive items using :func:`itertools.groupby`. + The *ordering* function determines whether two items are adjacent by + returning their position. + + By default, the ordering function is the identity function. This is + suitable for finding runs of numbers: + + >>> iterable = [1, 10, 11, 12, 20, 30, 31, 32, 33, 40] + >>> for group in consecutive_groups(iterable): + ... print(list(group)) + [1] + [10, 11, 12] + [20] + [30, 31, 32, 33] + [40] + + For finding runs of adjacent letters, try using the :meth:`index` method + of a string of letters: + + >>> from string import ascii_lowercase + >>> iterable = 'abcdfgilmnop' + >>> ordering = ascii_lowercase.index + >>> for group in consecutive_groups(iterable, ordering): + ... print(list(group)) + ['a', 'b', 'c', 'd'] + ['f', 'g'] + ['i'] + ['l', 'm', 'n', 'o', 'p'] + + """ + for k, g in groupby( + enumerate(iterable), key=lambda x: x[0] - ordering(x[1]) + ): + yield map(itemgetter(1), g) + + +def difference(iterable, func=sub): + """By default, compute the first difference of *iterable* using + :func:`operator.sub`. + + >>> iterable = [0, 1, 3, 6, 10] + >>> list(difference(iterable)) + [0, 1, 2, 3, 4] + + This is the opposite of :func:`accumulate`'s default behavior: + + >>> from more_itertools import accumulate + >>> iterable = [0, 1, 2, 3, 4] + >>> list(accumulate(iterable)) + [0, 1, 3, 6, 10] + >>> list(difference(accumulate(iterable))) + [0, 1, 2, 3, 4] + + By default *func* is :func:`operator.sub`, but other functions can be + specified. They will be applied as follows:: + + A, B, C, D, ... --> A, func(B, A), func(C, B), func(D, C), ... + + For example, to do progressive division: + + >>> iterable = [1, 2, 6, 24, 120] # Factorial sequence + >>> func = lambda x, y: x // y + >>> list(difference(iterable, func)) + [1, 2, 3, 4, 5] + + """ + a, b = tee(iterable) + try: + item = next(b) + except StopIteration: + return iter([]) + return chain([item], map(lambda x: func(x[1], x[0]), zip(a, b))) + + +class SequenceView(Sequence): + """Return a read-only view of the sequence object *target*. + + :class:`SequenceView` objects are analogous to Python's built-in + "dictionary view" types. They provide a dynamic view of a sequence's items, + meaning that when the sequence updates, so does the view. + + >>> seq = ['0', '1', '2'] + >>> view = SequenceView(seq) + >>> view + SequenceView(['0', '1', '2']) + >>> seq.append('3') + >>> view + SequenceView(['0', '1', '2', '3']) + + Sequence views support indexing, slicing, and length queries. They act + like the underlying sequence, except they don't allow assignment: + + >>> view[1] + '1' + >>> view[1:-1] + ['1', '2'] + >>> len(view) + 4 + + Sequence views are useful as an alternative to copying, as they don't + require (much) extra storage. + + """ + def __init__(self, target): + if not isinstance(target, Sequence): + raise TypeError + self._target = target + + def __getitem__(self, index): + return self._target[index] + + def __len__(self): + return len(self._target) + + def __repr__(self): + return '{}({})'.format(self.__class__.__name__, repr(self._target)) + + +class seekable(object): + """Wrap an iterator to allow for seeking backward and forward. This + progressively caches the items in the source iterable so they can be + re-visited. + + Call :meth:`seek` with an index to seek to that position in the source + iterable. + + To "reset" an iterator, seek to ``0``: + + >>> from itertools import count + >>> it = seekable((str(n) for n in count())) + >>> next(it), next(it), next(it) + ('0', '1', '2') + >>> it.seek(0) + >>> next(it), next(it), next(it) + ('0', '1', '2') + >>> next(it) + '3' + + You can also seek forward: + + >>> it = seekable((str(n) for n in range(20))) + >>> it.seek(10) + >>> next(it) + '10' + >>> it.seek(20) # Seeking past the end of the source isn't a problem + >>> list(it) + [] + >>> it.seek(0) # Resetting works even after hitting the end + >>> next(it), next(it), next(it) + ('0', '1', '2') + + The cache grows as the source iterable progresses, so beware of wrapping + very large or infinite iterables. + + You may view the contents of the cache with the :meth:`elements` method. + That returns a :class:`SequenceView`, a view that updates automatically: + + >>> it = seekable((str(n) for n in range(10))) + >>> next(it), next(it), next(it) + ('0', '1', '2') + >>> elements = it.elements() + >>> elements + SequenceView(['0', '1', '2']) + >>> next(it) + '3' + >>> elements + SequenceView(['0', '1', '2', '3']) + + """ + + def __init__(self, iterable): + self._source = iter(iterable) + self._cache = [] + self._index = None + + def __iter__(self): + return self + + def __next__(self): + if self._index is not None: + try: + item = self._cache[self._index] + except IndexError: + self._index = None + else: + self._index += 1 + return item + + item = next(self._source) + self._cache.append(item) + return item + + next = __next__ + + def elements(self): + return SequenceView(self._cache) + + def seek(self, index): + self._index = index + remainder = index - len(self._cache) + if remainder > 0: + consume(self, remainder) + + +class run_length(object): + """ + :func:`run_length.encode` compresses an iterable with run-length encoding. + It yields groups of repeated items with the count of how many times they + were repeated: + + >>> uncompressed = 'abbcccdddd' + >>> list(run_length.encode(uncompressed)) + [('a', 1), ('b', 2), ('c', 3), ('d', 4)] + + :func:`run_length.decode` decompresses an iterable that was previously + compressed with run-length encoding. It yields the items of the + decompressed iterable: + + >>> compressed = [('a', 1), ('b', 2), ('c', 3), ('d', 4)] + >>> list(run_length.decode(compressed)) + ['a', 'b', 'b', 'c', 'c', 'c', 'd', 'd', 'd', 'd'] + + """ + + @staticmethod + def encode(iterable): + return ((k, ilen(g)) for k, g in groupby(iterable)) + + @staticmethod + def decode(iterable): + return chain.from_iterable(repeat(k, n) for k, n in iterable) + + +def exactly_n(iterable, n, predicate=bool): + """Return ``True`` if exactly ``n`` items in the iterable are ``True`` + according to the *predicate* function. + + >>> exactly_n([True, True, False], 2) + True + >>> exactly_n([True, True, False], 1) + False + >>> exactly_n([0, 1, 2, 3, 4, 5], 3, lambda x: x < 3) + True + + The iterable will be advanced until ``n + 1`` truthy items are encountered, + so avoid calling it on infinite iterables. + + """ + return len(take(n + 1, filter(predicate, iterable))) == n + + +def circular_shifts(iterable): + """Return a list of circular shifts of *iterable*. + + >>> circular_shifts(range(4)) + [(0, 1, 2, 3), (1, 2, 3, 0), (2, 3, 0, 1), (3, 0, 1, 2)] + """ + lst = list(iterable) + return take(len(lst), windowed(cycle(lst), len(lst))) + + +def make_decorator(wrapping_func, result_index=0): + """Return a decorator version of *wrapping_func*, which is a function that + modifies an iterable. *result_index* is the position in that function's + signature where the iterable goes. + + This lets you use itertools on the "production end," i.e. at function + definition. This can augment what the function returns without changing the + function's code. + + For example, to produce a decorator version of :func:`chunked`: + + >>> from more_itertools import chunked + >>> chunker = make_decorator(chunked, result_index=0) + >>> @chunker(3) + ... def iter_range(n): + ... return iter(range(n)) + ... + >>> list(iter_range(9)) + [[0, 1, 2], [3, 4, 5], [6, 7, 8]] + + To only allow truthy items to be returned: + + >>> truth_serum = make_decorator(filter, result_index=1) + >>> @truth_serum(bool) + ... def boolean_test(): + ... return [0, 1, '', ' ', False, True] + ... + >>> list(boolean_test()) + [1, ' ', True] + + The :func:`peekable` and :func:`seekable` wrappers make for practical + decorators: + + >>> from more_itertools import peekable + >>> peekable_function = make_decorator(peekable) + >>> @peekable_function() + ... def str_range(*args): + ... return (str(x) for x in range(*args)) + ... + >>> it = str_range(1, 20, 2) + >>> next(it), next(it), next(it) + ('1', '3', '5') + >>> it.peek() + '7' + >>> next(it) + '7' + + """ + # See https://sites.google.com/site/bbayles/index/decorator_factory for + # notes on how this works. + def decorator(*wrapping_args, **wrapping_kwargs): + def outer_wrapper(f): + def inner_wrapper(*args, **kwargs): + result = f(*args, **kwargs) + wrapping_args_ = list(wrapping_args) + wrapping_args_.insert(result_index, result) + return wrapping_func(*wrapping_args_, **wrapping_kwargs) + + return inner_wrapper + + return outer_wrapper + + return decorator + + +def map_reduce(iterable, keyfunc, valuefunc=None, reducefunc=None): + """Return a dictionary that maps the items in *iterable* to categories + defined by *keyfunc*, transforms them with *valuefunc*, and + then summarizes them by category with *reducefunc*. + + *valuefunc* defaults to the identity function if it is unspecified. + If *reducefunc* is unspecified, no summarization takes place: + + >>> keyfunc = lambda x: x.upper() + >>> result = map_reduce('abbccc', keyfunc) + >>> sorted(result.items()) + [('A', ['a']), ('B', ['b', 'b']), ('C', ['c', 'c', 'c'])] + + Specifying *valuefunc* transforms the categorized items: + + >>> keyfunc = lambda x: x.upper() + >>> valuefunc = lambda x: 1 + >>> result = map_reduce('abbccc', keyfunc, valuefunc) + >>> sorted(result.items()) + [('A', [1]), ('B', [1, 1]), ('C', [1, 1, 1])] + + Specifying *reducefunc* summarizes the categorized items: + + >>> keyfunc = lambda x: x.upper() + >>> valuefunc = lambda x: 1 + >>> reducefunc = sum + >>> result = map_reduce('abbccc', keyfunc, valuefunc, reducefunc) + >>> sorted(result.items()) + [('A', 1), ('B', 2), ('C', 3)] + + You may want to filter the input iterable before applying the map/reduce + procedure: + + >>> all_items = range(30) + >>> items = [x for x in all_items if 10 <= x <= 20] # Filter + >>> keyfunc = lambda x: x % 2 # Evens map to 0; odds to 1 + >>> categories = map_reduce(items, keyfunc=keyfunc) + >>> sorted(categories.items()) + [(0, [10, 12, 14, 16, 18, 20]), (1, [11, 13, 15, 17, 19])] + >>> summaries = map_reduce(items, keyfunc=keyfunc, reducefunc=sum) + >>> sorted(summaries.items()) + [(0, 90), (1, 75)] + + Note that all items in the iterable are gathered into a list before the + summarization step, which may require significant storage. + + The returned object is a :obj:`collections.defaultdict` with the + ``default_factory`` set to ``None``, such that it behaves like a normal + dictionary. + + """ + valuefunc = (lambda x: x) if (valuefunc is None) else valuefunc + + ret = defaultdict(list) + for item in iterable: + key = keyfunc(item) + value = valuefunc(item) + ret[key].append(value) + + if reducefunc is not None: + for key, value_list in ret.items(): + ret[key] = reducefunc(value_list) + + ret.default_factory = None + return ret + + +def rlocate(iterable, pred=bool, window_size=None): + """Yield the index of each item in *iterable* for which *pred* returns + ``True``, starting from the right and moving left. + + *pred* defaults to :func:`bool`, which will select truthy items: + + >>> list(rlocate([0, 1, 1, 0, 1, 0, 0])) # Truthy at 1, 2, and 4 + [4, 2, 1] + + Set *pred* to a custom function to, e.g., find the indexes for a particular + item: + + >>> iterable = iter('abcb') + >>> pred = lambda x: x == 'b' + >>> list(rlocate(iterable, pred)) + [3, 1] + + If *window_size* is given, then the *pred* function will be called with + that many items. This enables searching for sub-sequences: + + >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3] + >>> pred = lambda *args: args == (1, 2, 3) + >>> list(rlocate(iterable, pred=pred, window_size=3)) + [9, 5, 1] + + Beware, this function won't return anything for infinite iterables. + If *iterable* is reversible, ``rlocate`` will reverse it and search from + the right. Otherwise, it will search from the left and return the results + in reverse order. + + See :func:`locate` to for other example applications. + + """ + if window_size is None: + try: + len_iter = len(iterable) + return ( + len_iter - i - 1 for i in locate(reversed(iterable), pred) + ) + except TypeError: + pass + + return reversed(list(locate(iterable, pred, window_size))) + + +def replace(iterable, pred, substitutes, count=None, window_size=1): + """Yield the items from *iterable*, replacing the items for which *pred* + returns ``True`` with the items from the iterable *substitutes*. + + >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1] + >>> pred = lambda x: x == 0 + >>> substitutes = (2, 3) + >>> list(replace(iterable, pred, substitutes)) + [1, 1, 2, 3, 1, 1, 2, 3, 1, 1] + + If *count* is given, the number of replacements will be limited: + + >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1, 0] + >>> pred = lambda x: x == 0 + >>> substitutes = [None] + >>> list(replace(iterable, pred, substitutes, count=2)) + [1, 1, None, 1, 1, None, 1, 1, 0] + + Use *window_size* to control the number of items passed as arguments to + *pred*. This allows for locating and replacing subsequences. + + >>> iterable = [0, 1, 2, 5, 0, 1, 2, 5] + >>> window_size = 3 + >>> pred = lambda *args: args == (0, 1, 2) # 3 items passed to pred + >>> substitutes = [3, 4] # Splice in these items + >>> list(replace(iterable, pred, substitutes, window_size=window_size)) + [3, 4, 5, 3, 4, 5] + + """ + if window_size < 1: + raise ValueError('window_size must be at least 1') + + # Save the substitutes iterable, since it's used more than once + substitutes = tuple(substitutes) + + # Add padding such that the number of windows matches the length of the + # iterable + it = chain(iterable, [_marker] * (window_size - 1)) + windows = windowed(it, window_size) + + n = 0 + for w in windows: + # If the current window matches our predicate (and we haven't hit + # our maximum number of replacements), splice in the substitutes + # and then consume the following windows that overlap with this one. + # For example, if the iterable is (0, 1, 2, 3, 4...) + # and the window size is 2, we have (0, 1), (1, 2), (2, 3)... + # If the predicate matches on (0, 1), we need to zap (0, 1) and (1, 2) + if pred(*w): + if (count is None) or (n < count): + n += 1 + for s in substitutes: + yield s + consume(windows, window_size - 1) + continue + + # If there was no match (or we've reached the replacement limit), + # yield the first item from the window. + if w and (w[0] is not _marker): + yield w[0] diff --git a/pipenv/vendor/more_itertools/recipes.py b/pipenv/vendor/more_itertools/recipes.py new file mode 100644 index 0000000000..3b455d4eb8 --- /dev/null +++ b/pipenv/vendor/more_itertools/recipes.py @@ -0,0 +1,577 @@ +"""Imported from the recipes section of the itertools documentation. + +All functions taken from the recipes section of the itertools library docs +[1]_. +Some backward-compatible usability improvements have been made. + +.. [1] http://docs.python.org/library/itertools.html#recipes + +""" +from collections import deque +from itertools import ( + chain, combinations, count, cycle, groupby, islice, repeat, starmap, tee +) +import operator +from random import randrange, sample, choice + +from six import PY2 +from six.moves import filter, filterfalse, map, range, zip, zip_longest + +__all__ = [ + 'accumulate', + 'all_equal', + 'consume', + 'dotproduct', + 'first_true', + 'flatten', + 'grouper', + 'iter_except', + 'ncycles', + 'nth', + 'nth_combination', + 'padnone', + 'pairwise', + 'partition', + 'powerset', + 'prepend', + 'quantify', + 'random_combination_with_replacement', + 'random_combination', + 'random_permutation', + 'random_product', + 'repeatfunc', + 'roundrobin', + 'tabulate', + 'tail', + 'take', + 'unique_everseen', + 'unique_justseen', +] + + +def accumulate(iterable, func=operator.add): + """ + Return an iterator whose items are the accumulated results of a function + (specified by the optional *func* argument) that takes two arguments. + By default, returns accumulated sums with :func:`operator.add`. + + >>> list(accumulate([1, 2, 3, 4, 5])) # Running sum + [1, 3, 6, 10, 15] + >>> list(accumulate([1, 2, 3], func=operator.mul)) # Running product + [1, 2, 6] + >>> list(accumulate([0, 1, -1, 2, 3, 2], func=max)) # Running maximum + [0, 1, 1, 2, 3, 3] + + This function is available in the ``itertools`` module for Python 3.2 and + greater. + + """ + it = iter(iterable) + try: + total = next(it) + except StopIteration: + return + else: + yield total + + for element in it: + total = func(total, element) + yield total + + +def take(n, iterable): + """Return first *n* items of the iterable as a list. + + >>> take(3, range(10)) + [0, 1, 2] + >>> take(5, range(3)) + [0, 1, 2] + + Effectively a short replacement for ``next`` based iterator consumption + when you want more than one item, but less than the whole iterator. + + """ + return list(islice(iterable, n)) + + +def tabulate(function, start=0): + """Return an iterator over the results of ``func(start)``, + ``func(start + 1)``, ``func(start + 2)``... + + *func* should be a function that accepts one integer argument. + + If *start* is not specified it defaults to 0. It will be incremented each + time the iterator is advanced. + + >>> square = lambda x: x ** 2 + >>> iterator = tabulate(square, -3) + >>> take(4, iterator) + [9, 4, 1, 0] + + """ + return map(function, count(start)) + + +def tail(n, iterable): + """Return an iterator over the last *n* items of *iterable*. + + >>> t = tail(3, 'ABCDEFG') + >>> list(t) + ['E', 'F', 'G'] + + """ + return iter(deque(iterable, maxlen=n)) + + +def consume(iterator, n=None): + """Advance *iterable* by *n* steps. If *n* is ``None``, consume it + entirely. + + Efficiently exhausts an iterator without returning values. Defaults to + consuming the whole iterator, but an optional second argument may be + provided to limit consumption. + + >>> i = (x for x in range(10)) + >>> next(i) + 0 + >>> consume(i, 3) + >>> next(i) + 4 + >>> consume(i) + >>> next(i) + Traceback (most recent call last): + File "<stdin>", line 1, in <module> + StopIteration + + If the iterator has fewer items remaining than the provided limit, the + whole iterator will be consumed. + + >>> i = (x for x in range(3)) + >>> consume(i, 5) + >>> next(i) + Traceback (most recent call last): + File "<stdin>", line 1, in <module> + StopIteration + + """ + # Use functions that consume iterators at C speed. + if n is None: + # feed the entire iterator into a zero-length deque + deque(iterator, maxlen=0) + else: + # advance to the empty slice starting at position n + next(islice(iterator, n, n), None) + + +def nth(iterable, n, default=None): + """Returns the nth item or a default value. + + >>> l = range(10) + >>> nth(l, 3) + 3 + >>> nth(l, 20, "zebra") + 'zebra' + + """ + return next(islice(iterable, n, None), default) + + +def all_equal(iterable): + """ + Returns ``True`` if all the elements are equal to each other. + + >>> all_equal('aaaa') + True + >>> all_equal('aaab') + False + + """ + g = groupby(iterable) + return next(g, True) and not next(g, False) + + +def quantify(iterable, pred=bool): + """Return the how many times the predicate is true. + + >>> quantify([True, False, True]) + 2 + + """ + return sum(map(pred, iterable)) + + +def padnone(iterable): + """Returns the sequence of elements and then returns ``None`` indefinitely. + + >>> take(5, padnone(range(3))) + [0, 1, 2, None, None] + + Useful for emulating the behavior of the built-in :func:`map` function. + + See also :func:`padded`. + + """ + return chain(iterable, repeat(None)) + + +def ncycles(iterable, n): + """Returns the sequence elements *n* times + + >>> list(ncycles(["a", "b"], 3)) + ['a', 'b', 'a', 'b', 'a', 'b'] + + """ + return chain.from_iterable(repeat(tuple(iterable), n)) + + +def dotproduct(vec1, vec2): + """Returns the dot product of the two iterables. + + >>> dotproduct([10, 10], [20, 20]) + 400 + + """ + return sum(map(operator.mul, vec1, vec2)) + + +def flatten(listOfLists): + """Return an iterator flattening one level of nesting in a list of lists. + + >>> list(flatten([[0, 1], [2, 3]])) + [0, 1, 2, 3] + + See also :func:`collapse`, which can flatten multiple levels of nesting. + + """ + return chain.from_iterable(listOfLists) + + +def repeatfunc(func, times=None, *args): + """Call *func* with *args* repeatedly, returning an iterable over the + results. + + If *times* is specified, the iterable will terminate after that many + repetitions: + + >>> from operator import add + >>> times = 4 + >>> args = 3, 5 + >>> list(repeatfunc(add, times, *args)) + [8, 8, 8, 8] + + If *times* is ``None`` the iterable will not terminate: + + >>> from random import randrange + >>> times = None + >>> args = 1, 11 + >>> take(6, repeatfunc(randrange, times, *args)) # doctest:+SKIP + [2, 4, 8, 1, 8, 4] + + """ + if times is None: + return starmap(func, repeat(args)) + return starmap(func, repeat(args, times)) + + +def pairwise(iterable): + """Returns an iterator of paired items, overlapping, from the original + + >>> take(4, pairwise(count())) + [(0, 1), (1, 2), (2, 3), (3, 4)] + + """ + a, b = tee(iterable) + next(b, None) + return zip(a, b) + + +def grouper(n, iterable, fillvalue=None): + """Collect data into fixed-length chunks or blocks. + + >>> list(grouper(3, 'ABCDEFG', 'x')) + [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')] + + """ + args = [iter(iterable)] * n + return zip_longest(fillvalue=fillvalue, *args) + + +def roundrobin(*iterables): + """Yields an item from each iterable, alternating between them. + + >>> list(roundrobin('ABC', 'D', 'EF')) + ['A', 'D', 'E', 'B', 'F', 'C'] + + This function produces the same output as :func:`interleave_longest`, but + may perform better for some inputs (in particular when the number of + iterables is small). + + """ + # Recipe credited to George Sakkis + pending = len(iterables) + if PY2: + nexts = cycle(iter(it).next for it in iterables) + else: + nexts = cycle(iter(it).__next__ for it in iterables) + while pending: + try: + for next in nexts: + yield next() + except StopIteration: + pending -= 1 + nexts = cycle(islice(nexts, pending)) + + +def partition(pred, iterable): + """ + Returns a 2-tuple of iterables derived from the input iterable. + The first yields the items that have ``pred(item) == False``. + The second yields the items that have ``pred(item) == True``. + + >>> is_odd = lambda x: x % 2 != 0 + >>> iterable = range(10) + >>> even_items, odd_items = partition(is_odd, iterable) + >>> list(even_items), list(odd_items) + ([0, 2, 4, 6, 8], [1, 3, 5, 7, 9]) + + """ + # partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9 + t1, t2 = tee(iterable) + return filterfalse(pred, t1), filter(pred, t2) + + +def powerset(iterable): + """Yields all possible subsets of the iterable. + + >>> list(powerset([1, 2, 3])) + [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)] + + :func:`powerset` will operate on iterables that aren't :class:`set` + instances, so repeated elements in the input will produce repeated elements + in the output. Use :func:`unique_everseen` on the input to avoid generating + duplicates: + + >>> seq = [1, 1, 0] + >>> list(powerset(seq)) + [(), (1,), (1,), (0,), (1, 1), (1, 0), (1, 0), (1, 1, 0)] + >>> from more_itertools import unique_everseen + >>> list(powerset(unique_everseen(seq))) + [(), (1,), (0,), (1, 0)] + + """ + s = list(iterable) + return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1)) + + +def unique_everseen(iterable, key=None): + """ + Yield unique elements, preserving order. + + >>> list(unique_everseen('AAAABBBCCDAABBB')) + ['A', 'B', 'C', 'D'] + >>> list(unique_everseen('ABBCcAD', str.lower)) + ['A', 'B', 'C', 'D'] + + Sequences with a mix of hashable and unhashable items can be used. + The function will be slower (i.e., `O(n^2)`) for unhashable items. + + """ + seenset = set() + seenset_add = seenset.add + seenlist = [] + seenlist_add = seenlist.append + if key is None: + for element in iterable: + try: + if element not in seenset: + seenset_add(element) + yield element + except TypeError: + if element not in seenlist: + seenlist_add(element) + yield element + else: + for element in iterable: + k = key(element) + try: + if k not in seenset: + seenset_add(k) + yield element + except TypeError: + if k not in seenlist: + seenlist_add(k) + yield element + + +def unique_justseen(iterable, key=None): + """Yields elements in order, ignoring serial duplicates + + >>> list(unique_justseen('AAAABBBCCDAABBB')) + ['A', 'B', 'C', 'D', 'A', 'B'] + >>> list(unique_justseen('ABBCcAD', str.lower)) + ['A', 'B', 'C', 'A', 'D'] + + """ + return map(next, map(operator.itemgetter(1), groupby(iterable, key))) + + +def iter_except(func, exception, first=None): + """Yields results from a function repeatedly until an exception is raised. + + Converts a call-until-exception interface to an iterator interface. + Like ``iter(func, sentinel)``, but uses an exception instead of a sentinel + to end the loop. + + >>> l = [0, 1, 2] + >>> list(iter_except(l.pop, IndexError)) + [2, 1, 0] + + """ + try: + if first is not None: + yield first() + while 1: + yield func() + except exception: + pass + + +def first_true(iterable, default=None, pred=None): + """ + Returns the first true value in the iterable. + + If no true value is found, returns *default* + + If *pred* is not None, returns the first item for which + ``pred(item) == True`` . + + >>> first_true(range(10)) + 1 + >>> first_true(range(10), pred=lambda x: x > 5) + 6 + >>> first_true(range(10), default='missing', pred=lambda x: x > 9) + 'missing' + + """ + return next(filter(pred, iterable), default) + + +def random_product(*args, **kwds): + """Draw an item at random from each of the input iterables. + + >>> random_product('abc', range(4), 'XYZ') # doctest:+SKIP + ('c', 3, 'Z') + + If *repeat* is provided as a keyword argument, that many items will be + drawn from each iterable. + + >>> random_product('abcd', range(4), repeat=2) # doctest:+SKIP + ('a', 2, 'd', 3) + + This equivalent to taking a random selection from + ``itertools.product(*args, **kwarg)``. + + """ + pools = [tuple(pool) for pool in args] * kwds.get('repeat', 1) + return tuple(choice(pool) for pool in pools) + + +def random_permutation(iterable, r=None): + """Return a random *r* length permutation of the elements in *iterable*. + + If *r* is not specified or is ``None``, then *r* defaults to the length of + *iterable*. + + >>> random_permutation(range(5)) # doctest:+SKIP + (3, 4, 0, 1, 2) + + This equivalent to taking a random selection from + ``itertools.permutations(iterable, r)``. + + """ + pool = tuple(iterable) + r = len(pool) if r is None else r + return tuple(sample(pool, r)) + + +def random_combination(iterable, r): + """Return a random *r* length subsequence of the elements in *iterable*. + + >>> random_combination(range(5), 3) # doctest:+SKIP + (2, 3, 4) + + This equivalent to taking a random selection from + ``itertools.combinations(iterable, r)``. + + """ + pool = tuple(iterable) + n = len(pool) + indices = sorted(sample(range(n), r)) + return tuple(pool[i] for i in indices) + + +def random_combination_with_replacement(iterable, r): + """Return a random *r* length subsequence of elements in *iterable*, + allowing individual elements to be repeated. + + >>> random_combination_with_replacement(range(3), 5) # doctest:+SKIP + (0, 0, 1, 2, 2) + + This equivalent to taking a random selection from + ``itertools.combinations_with_replacement(iterable, r)``. + + """ + pool = tuple(iterable) + n = len(pool) + indices = sorted(randrange(n) for i in range(r)) + return tuple(pool[i] for i in indices) + + +def nth_combination(iterable, r, index): + """Equivalent to ``list(combinations(iterable, r))[index]``. + + The subsequences of *iterable* that are of length *r* can be ordered + lexicographically. :func:`nth_combination` computes the subsequence at + sort position *index* directly, without computing the previous + subsequences. + + """ + pool = tuple(iterable) + n = len(pool) + if (r < 0) or (r > n): + raise ValueError + + c = 1 + k = min(r, n - r) + for i in range(1, k + 1): + c = c * (n - k + i) // i + + if index < 0: + index += c + + if (index < 0) or (index >= c): + raise IndexError + + result = [] + while r: + c, n, r = c * r // n, n - 1, r - 1 + while index >= c: + index -= c + c, n = c * (n - r) // n, n - 1 + result.append(pool[-1 - n]) + + return tuple(result) + + +def prepend(value, iterator): + """Yield *value*, followed by the elements in *iterator*. + + >>> value = '0' + >>> iterator = ['1', '2', '3'] + >>> list(prepend(value, iterator)) + ['0', '1', '2', '3'] + + To prepend multiple values, see :func:`itertools.chain`. + + """ + return chain([value], iterator) diff --git a/pipenv/vendor/more_itertools/tests/__init__.py b/pipenv/vendor/more_itertools/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pipenv/vendor/more_itertools/tests/test_more.py b/pipenv/vendor/more_itertools/tests/test_more.py new file mode 100644 index 0000000000..eacf8a8aae --- /dev/null +++ b/pipenv/vendor/more_itertools/tests/test_more.py @@ -0,0 +1,2313 @@ +from __future__ import division, print_function, unicode_literals + +from collections import OrderedDict +from decimal import Decimal +from doctest import DocTestSuite +from fractions import Fraction +from functools import partial, reduce +from heapq import merge +from io import StringIO +from itertools import ( + chain, + count, + groupby, + islice, + permutations, + product, + repeat, +) +from operator import add, mul, itemgetter +from unittest import TestCase + +from six.moves import filter, map, range, zip + +import more_itertools as mi + + +def load_tests(loader, tests, ignore): + # Add the doctests + tests.addTests(DocTestSuite('more_itertools.more')) + return tests + + +class CollateTests(TestCase): + """Unit tests for ``collate()``""" + # Also accidentally tests peekable, though that could use its own tests + + def test_default(self): + """Test with the default `key` function.""" + iterables = [range(4), range(7), range(3, 6)] + self.assertEqual( + sorted(reduce(list.__add__, [list(it) for it in iterables])), + list(mi.collate(*iterables)) + ) + + def test_key(self): + """Test using a custom `key` function.""" + iterables = [range(5, 0, -1), range(4, 0, -1)] + actual = sorted( + reduce(list.__add__, [list(it) for it in iterables]), reverse=True + ) + expected = list(mi.collate(*iterables, key=lambda x: -x)) + self.assertEqual(actual, expected) + + def test_empty(self): + """Be nice if passed an empty list of iterables.""" + self.assertEqual([], list(mi.collate())) + + def test_one(self): + """Work when only 1 iterable is passed.""" + self.assertEqual([0, 1], list(mi.collate(range(2)))) + + def test_reverse(self): + """Test the `reverse` kwarg.""" + iterables = [range(4, 0, -1), range(7, 0, -1), range(3, 6, -1)] + + actual = sorted( + reduce(list.__add__, [list(it) for it in iterables]), reverse=True + ) + expected = list(mi.collate(*iterables, reverse=True)) + self.assertEqual(actual, expected) + + def test_alias(self): + self.assertNotEqual(merge.__doc__, mi.collate.__doc__) + self.assertNotEqual(partial.__doc__, mi.collate.__doc__) + + +class ChunkedTests(TestCase): + """Tests for ``chunked()``""" + + def test_even(self): + """Test when ``n`` divides evenly into the length of the iterable.""" + self.assertEqual( + list(mi.chunked('ABCDEF', 3)), [['A', 'B', 'C'], ['D', 'E', 'F']] + ) + + def test_odd(self): + """Test when ``n`` does not divide evenly into the length of the + iterable. + + """ + self.assertEqual( + list(mi.chunked('ABCDE', 3)), [['A', 'B', 'C'], ['D', 'E']] + ) + + +class FirstTests(TestCase): + """Tests for ``first()``""" + + def test_many(self): + """Test that it works on many-item iterables.""" + # Also try it on a generator expression to make sure it works on + # whatever those return, across Python versions. + self.assertEqual(mi.first(x for x in range(4)), 0) + + def test_one(self): + """Test that it doesn't raise StopIteration prematurely.""" + self.assertEqual(mi.first([3]), 3) + + def test_empty_stop_iteration(self): + """It should raise StopIteration for empty iterables.""" + self.assertRaises(ValueError, lambda: mi.first([])) + + def test_default(self): + """It should return the provided default arg for empty iterables.""" + self.assertEqual(mi.first([], 'boo'), 'boo') + + +class IterOnlyRange: + """User-defined iterable class which only support __iter__. + + It is not specified to inherit ``object``, so indexing on a instance will + raise an ``AttributeError`` rather than ``TypeError`` in Python 2. + + >>> r = IterOnlyRange(5) + >>> r[0] + AttributeError: IterOnlyRange instance has no attribute '__getitem__' + + Note: In Python 3, ``TypeError`` will be raised because ``object`` is + inherited implicitly by default. + + >>> r[0] + TypeError: 'IterOnlyRange' object does not support indexing + """ + def __init__(self, n): + """Set the length of the range.""" + self.n = n + + def __iter__(self): + """Works same as range().""" + return iter(range(self.n)) + + +class LastTests(TestCase): + """Tests for ``last()``""" + + def test_many_nonsliceable(self): + """Test that it works on many-item non-slice-able iterables.""" + # Also try it on a generator expression to make sure it works on + # whatever those return, across Python versions. + self.assertEqual(mi.last(x for x in range(4)), 3) + + def test_one_nonsliceable(self): + """Test that it doesn't raise StopIteration prematurely.""" + self.assertEqual(mi.last(x for x in range(1)), 0) + + def test_empty_stop_iteration_nonsliceable(self): + """It should raise ValueError for empty non-slice-able iterables.""" + self.assertRaises(ValueError, lambda: mi.last(x for x in range(0))) + + def test_default_nonsliceable(self): + """It should return the provided default arg for empty non-slice-able + iterables. + """ + self.assertEqual(mi.last((x for x in range(0)), 'boo'), 'boo') + + def test_many_sliceable(self): + """Test that it works on many-item slice-able iterables.""" + self.assertEqual(mi.last([0, 1, 2, 3]), 3) + + def test_one_sliceable(self): + """Test that it doesn't raise StopIteration prematurely.""" + self.assertEqual(mi.last([3]), 3) + + def test_empty_stop_iteration_sliceable(self): + """It should raise ValueError for empty slice-able iterables.""" + self.assertRaises(ValueError, lambda: mi.last([])) + + def test_default_sliceable(self): + """It should return the provided default arg for empty slice-able + iterables. + """ + self.assertEqual(mi.last([], 'boo'), 'boo') + + def test_dict(self): + """last(dic) and last(dic.keys()) should return same result.""" + dic = {'a': 1, 'b': 2, 'c': 3} + self.assertEqual(mi.last(dic), mi.last(dic.keys())) + + def test_ordereddict(self): + """last(dic) should return the last key.""" + od = OrderedDict() + od['a'] = 1 + od['b'] = 2 + od['c'] = 3 + self.assertEqual(mi.last(od), 'c') + + def test_customrange(self): + """It should work on custom class where [] raises AttributeError.""" + self.assertEqual(mi.last(IterOnlyRange(5)), 4) + + +class PeekableTests(TestCase): + """Tests for ``peekable()`` behavor not incidentally covered by testing + ``collate()`` + + """ + def test_peek_default(self): + """Make sure passing a default into ``peek()`` works.""" + p = mi.peekable([]) + self.assertEqual(p.peek(7), 7) + + def test_truthiness(self): + """Make sure a ``peekable`` tests true iff there are items remaining in + the iterable. + + """ + p = mi.peekable([]) + self.assertFalse(p) + + p = mi.peekable(range(3)) + self.assertTrue(p) + + def test_simple_peeking(self): + """Make sure ``next`` and ``peek`` advance and don't advance the + iterator, respectively. + + """ + p = mi.peekable(range(10)) + self.assertEqual(next(p), 0) + self.assertEqual(p.peek(), 1) + self.assertEqual(next(p), 1) + + def test_indexing(self): + """ + Indexing into the peekable shouldn't advance the iterator. + """ + p = mi.peekable('abcdefghijkl') + + # The 0th index is what ``next()`` will return + self.assertEqual(p[0], 'a') + self.assertEqual(next(p), 'a') + + # Indexing further into the peekable shouldn't advance the itertor + self.assertEqual(p[2], 'd') + self.assertEqual(next(p), 'b') + + # The 0th index moves up with the iterator; the last index follows + self.assertEqual(p[0], 'c') + self.assertEqual(p[9], 'l') + + self.assertEqual(next(p), 'c') + self.assertEqual(p[8], 'l') + + # Negative indexing should work too + self.assertEqual(p[-2], 'k') + self.assertEqual(p[-9], 'd') + self.assertRaises(IndexError, lambda: p[-10]) + + def test_slicing(self): + """Slicing the peekable shouldn't advance the iterator.""" + seq = list('abcdefghijkl') + p = mi.peekable(seq) + + # Slicing the peekable should just be like slicing a re-iterable + self.assertEqual(p[1:4], seq[1:4]) + + # Advancing the iterator moves the slices up also + self.assertEqual(next(p), 'a') + self.assertEqual(p[1:4], seq[1:][1:4]) + + # Implicit starts and stop should work + self.assertEqual(p[:5], seq[1:][:5]) + self.assertEqual(p[:], seq[1:][:]) + + # Indexing past the end should work + self.assertEqual(p[:100], seq[1:][:100]) + + # Steps should work, including negative + self.assertEqual(p[::2], seq[1:][::2]) + self.assertEqual(p[::-1], seq[1:][::-1]) + + def test_slicing_reset(self): + """Test slicing on a fresh iterable each time""" + iterable = ['0', '1', '2', '3', '4', '5'] + indexes = list(range(-4, len(iterable) + 4)) + [None] + steps = [1, 2, 3, 4, -1, -2, -3, 4] + for slice_args in product(indexes, indexes, steps): + it = iter(iterable) + p = mi.peekable(it) + next(p) + index = slice(*slice_args) + actual = p[index] + expected = iterable[1:][index] + self.assertEqual(actual, expected, slice_args) + + def test_slicing_error(self): + iterable = '01234567' + p = mi.peekable(iter(iterable)) + + # Prime the cache + p.peek() + old_cache = list(p._cache) + + # Illegal slice + with self.assertRaises(ValueError): + p[1:-1:0] + + # Neither the cache nor the iteration should be affected + self.assertEqual(old_cache, list(p._cache)) + self.assertEqual(list(p), list(iterable)) + + def test_passthrough(self): + """Iterating a peekable without using ``peek()`` or ``prepend()`` + should just give the underlying iterable's elements (a trivial test but + useful to set a baseline in case something goes wrong)""" + expected = [1, 2, 3, 4, 5] + actual = list(mi.peekable(expected)) + self.assertEqual(actual, expected) + + # prepend() behavior tests + + def test_prepend(self): + """Tests intersperesed ``prepend()`` and ``next()`` calls""" + it = mi.peekable(range(2)) + actual = [] + + # Test prepend() before next() + it.prepend(10) + actual += [next(it), next(it)] + + # Test prepend() between next()s + it.prepend(11) + actual += [next(it), next(it)] + + # Test prepend() after source iterable is consumed + it.prepend(12) + actual += [next(it)] + + expected = [10, 0, 11, 1, 12] + self.assertEqual(actual, expected) + + def test_multi_prepend(self): + """Tests prepending multiple items and getting them in proper order""" + it = mi.peekable(range(5)) + actual = [next(it), next(it)] + it.prepend(10, 11, 12) + it.prepend(20, 21) + actual += list(it) + expected = [0, 1, 20, 21, 10, 11, 12, 2, 3, 4] + self.assertEqual(actual, expected) + + def test_empty(self): + """Tests prepending in front of an empty iterable""" + it = mi.peekable([]) + it.prepend(10) + actual = list(it) + expected = [10] + self.assertEqual(actual, expected) + + def test_prepend_truthiness(self): + """Tests that ``__bool__()`` or ``__nonzero__()`` works properly + with ``prepend()``""" + it = mi.peekable(range(5)) + self.assertTrue(it) + actual = list(it) + self.assertFalse(it) + it.prepend(10) + self.assertTrue(it) + actual += [next(it)] + self.assertFalse(it) + expected = [0, 1, 2, 3, 4, 10] + self.assertEqual(actual, expected) + + def test_multi_prepend_peek(self): + """Tests prepending multiple elements and getting them in reverse order + while peeking""" + it = mi.peekable(range(5)) + actual = [next(it), next(it)] + self.assertEqual(it.peek(), 2) + it.prepend(10, 11, 12) + self.assertEqual(it.peek(), 10) + it.prepend(20, 21) + self.assertEqual(it.peek(), 20) + actual += list(it) + self.assertFalse(it) + expected = [0, 1, 20, 21, 10, 11, 12, 2, 3, 4] + self.assertEqual(actual, expected) + + def test_prepend_after_stop(self): + """Test resuming iteration after a previous exhaustion""" + it = mi.peekable(range(3)) + self.assertEqual(list(it), [0, 1, 2]) + self.assertRaises(StopIteration, lambda: next(it)) + it.prepend(10) + self.assertEqual(next(it), 10) + self.assertRaises(StopIteration, lambda: next(it)) + + def test_prepend_slicing(self): + """Tests interaction between prepending and slicing""" + seq = list(range(20)) + p = mi.peekable(seq) + + p.prepend(30, 40, 50) + pseq = [30, 40, 50] + seq # pseq for prepended_seq + + # adapt the specific tests from test_slicing + self.assertEqual(p[0], 30) + self.assertEqual(p[1:8], pseq[1:8]) + self.assertEqual(p[1:], pseq[1:]) + self.assertEqual(p[:5], pseq[:5]) + self.assertEqual(p[:], pseq[:]) + self.assertEqual(p[:100], pseq[:100]) + self.assertEqual(p[::2], pseq[::2]) + self.assertEqual(p[::-1], pseq[::-1]) + + def test_prepend_indexing(self): + """Tests interaction between prepending and indexing""" + seq = list(range(20)) + p = mi.peekable(seq) + + p.prepend(30, 40, 50) + + self.assertEqual(p[0], 30) + self.assertEqual(next(p), 30) + self.assertEqual(p[2], 0) + self.assertEqual(next(p), 40) + self.assertEqual(p[0], 50) + self.assertEqual(p[9], 8) + self.assertEqual(next(p), 50) + self.assertEqual(p[8], 8) + self.assertEqual(p[-2], 18) + self.assertEqual(p[-9], 11) + self.assertRaises(IndexError, lambda: p[-21]) + + def test_prepend_iterable(self): + """Tests prepending from an iterable""" + it = mi.peekable(range(5)) + # Don't directly use the range() object to avoid any range-specific + # optimizations + it.prepend(*(x for x in range(5))) + actual = list(it) + expected = list(chain(range(5), range(5))) + self.assertEqual(actual, expected) + + def test_prepend_many(self): + """Tests that prepending a huge number of elements works""" + it = mi.peekable(range(5)) + # Don't directly use the range() object to avoid any range-specific + # optimizations + it.prepend(*(x for x in range(20000))) + actual = list(it) + expected = list(chain(range(20000), range(5))) + self.assertEqual(actual, expected) + + def test_prepend_reversed(self): + """Tests prepending from a reversed iterable""" + it = mi.peekable(range(3)) + it.prepend(*reversed((10, 11, 12))) + actual = list(it) + expected = [12, 11, 10, 0, 1, 2] + self.assertEqual(actual, expected) + + +class ConsumerTests(TestCase): + """Tests for ``consumer()``""" + + def test_consumer(self): + @mi.consumer + def eater(): + while True: + x = yield # noqa + + e = eater() + e.send('hi') # without @consumer, would raise TypeError + + +class DistinctPermutationsTests(TestCase): + def test_distinct_permutations(self): + """Make sure the output for ``distinct_permutations()`` is the same as + set(permutations(it)). + + """ + iterable = ['z', 'a', 'a', 'q', 'q', 'q', 'y'] + test_output = sorted(mi.distinct_permutations(iterable)) + ref_output = sorted(set(permutations(iterable))) + self.assertEqual(test_output, ref_output) + + def test_other_iterables(self): + """Make sure ``distinct_permutations()`` accepts a different type of + iterables. + + """ + # a generator + iterable = (c for c in ['z', 'a', 'a', 'q', 'q', 'q', 'y']) + test_output = sorted(mi.distinct_permutations(iterable)) + # "reload" it + iterable = (c for c in ['z', 'a', 'a', 'q', 'q', 'q', 'y']) + ref_output = sorted(set(permutations(iterable))) + self.assertEqual(test_output, ref_output) + + # an iterator + iterable = iter(['z', 'a', 'a', 'q', 'q', 'q', 'y']) + test_output = sorted(mi.distinct_permutations(iterable)) + # "reload" it + iterable = iter(['z', 'a', 'a', 'q', 'q', 'q', 'y']) + ref_output = sorted(set(permutations(iterable))) + self.assertEqual(test_output, ref_output) + + +class IlenTests(TestCase): + def test_ilen(self): + """Sanity-checks for ``ilen()``.""" + # Non-empty + self.assertEqual( + mi.ilen(filter(lambda x: x % 10 == 0, range(101))), 11 + ) + + # Empty + self.assertEqual(mi.ilen((x for x in range(0))), 0) + + # Iterable with __len__ + self.assertEqual(mi.ilen(list(range(6))), 6) + + +class WithIterTests(TestCase): + def test_with_iter(self): + s = StringIO('One fish\nTwo fish') + initial_words = [line.split()[0] for line in mi.with_iter(s)] + + # Iterable's items should be faithfully represented + self.assertEqual(initial_words, ['One', 'Two']) + # The file object should be closed + self.assertTrue(s.closed) + + +class OneTests(TestCase): + def test_basic(self): + it = iter(['item']) + self.assertEqual(mi.one(it), 'item') + + def test_too_short(self): + it = iter([]) + self.assertRaises(ValueError, lambda: mi.one(it)) + self.assertRaises(IndexError, lambda: mi.one(it, too_short=IndexError)) + + def test_too_long(self): + it = count() + self.assertRaises(ValueError, lambda: mi.one(it)) # burn 0 and 1 + self.assertEqual(next(it), 2) + self.assertRaises( + OverflowError, lambda: mi.one(it, too_long=OverflowError) + ) + + +class IntersperseTest(TestCase): + """ Tests for intersperse() """ + + def test_even(self): + iterable = (x for x in '01') + self.assertEqual( + list(mi.intersperse(None, iterable)), ['0', None, '1'] + ) + + def test_odd(self): + iterable = (x for x in '012') + self.assertEqual( + list(mi.intersperse(None, iterable)), ['0', None, '1', None, '2'] + ) + + def test_nested(self): + element = ('a', 'b') + iterable = (x for x in '012') + actual = list(mi.intersperse(element, iterable)) + expected = ['0', ('a', 'b'), '1', ('a', 'b'), '2'] + self.assertEqual(actual, expected) + + def test_not_iterable(self): + self.assertRaises(TypeError, lambda: mi.intersperse('x', 1)) + + def test_n(self): + for n, element, expected in [ + (1, '_', ['0', '_', '1', '_', '2', '_', '3', '_', '4', '_', '5']), + (2, '_', ['0', '1', '_', '2', '3', '_', '4', '5']), + (3, '_', ['0', '1', '2', '_', '3', '4', '5']), + (4, '_', ['0', '1', '2', '3', '_', '4', '5']), + (5, '_', ['0', '1', '2', '3', '4', '_', '5']), + (6, '_', ['0', '1', '2', '3', '4', '5']), + (7, '_', ['0', '1', '2', '3', '4', '5']), + (3, ['a', 'b'], ['0', '1', '2', ['a', 'b'], '3', '4', '5']), + ]: + iterable = (x for x in '012345') + actual = list(mi.intersperse(element, iterable, n=n)) + self.assertEqual(actual, expected) + + def test_n_zero(self): + self.assertRaises( + ValueError, lambda: list(mi.intersperse('x', '012', n=0)) + ) + + +class UniqueToEachTests(TestCase): + """Tests for ``unique_to_each()``""" + + def test_all_unique(self): + """When all the input iterables are unique the output should match + the input.""" + iterables = [[1, 2], [3, 4, 5], [6, 7, 8]] + self.assertEqual(mi.unique_to_each(*iterables), iterables) + + def test_duplicates(self): + """When there are duplicates in any of the input iterables that aren't + in the rest, those duplicates should be emitted.""" + iterables = ["mississippi", "missouri"] + self.assertEqual( + mi.unique_to_each(*iterables), [['p', 'p'], ['o', 'u', 'r']] + ) + + def test_mixed(self): + """When the input iterables contain different types the function should + still behave properly""" + iterables = ['x', (i for i in range(3)), [1, 2, 3], tuple()] + self.assertEqual(mi.unique_to_each(*iterables), [['x'], [0], [3], []]) + + +class WindowedTests(TestCase): + """Tests for ``windowed()``""" + + def test_basic(self): + actual = list(mi.windowed([1, 2, 3, 4, 5], 3)) + expected = [(1, 2, 3), (2, 3, 4), (3, 4, 5)] + self.assertEqual(actual, expected) + + def test_large_size(self): + """ + When the window size is larger than the iterable, and no fill value is + given,``None`` should be filled in. + """ + actual = list(mi.windowed([1, 2, 3, 4, 5], 6)) + expected = [(1, 2, 3, 4, 5, None)] + self.assertEqual(actual, expected) + + def test_fillvalue(self): + """ + When sizes don't match evenly, the given fill value should be used. + """ + iterable = [1, 2, 3, 4, 5] + + for n, kwargs, expected in [ + (6, {}, [(1, 2, 3, 4, 5, '!')]), # n > len(iterable) + (3, {'step': 3}, [(1, 2, 3), (4, 5, '!')]), # using ``step`` + ]: + actual = list(mi.windowed(iterable, n, fillvalue='!', **kwargs)) + self.assertEqual(actual, expected) + + def test_zero(self): + """When the window size is zero, an empty tuple should be emitted.""" + actual = list(mi.windowed([1, 2, 3, 4, 5], 0)) + expected = [tuple()] + self.assertEqual(actual, expected) + + def test_negative(self): + """When the window size is negative, ValueError should be raised.""" + with self.assertRaises(ValueError): + list(mi.windowed([1, 2, 3, 4, 5], -1)) + + def test_step(self): + """The window should advance by the number of steps provided""" + iterable = [1, 2, 3, 4, 5, 6, 7] + for n, step, expected in [ + (3, 2, [(1, 2, 3), (3, 4, 5), (5, 6, 7)]), # n > step + (3, 3, [(1, 2, 3), (4, 5, 6), (7, None, None)]), # n == step + (3, 4, [(1, 2, 3), (5, 6, 7)]), # line up nicely + (3, 5, [(1, 2, 3), (6, 7, None)]), # off by one + (3, 6, [(1, 2, 3), (7, None, None)]), # off by two + (3, 7, [(1, 2, 3)]), # step past the end + (7, 8, [(1, 2, 3, 4, 5, 6, 7)]), # step > len(iterable) + ]: + actual = list(mi.windowed(iterable, n, step=step)) + self.assertEqual(actual, expected) + + # Step must be greater than or equal to 1 + with self.assertRaises(ValueError): + list(mi.windowed(iterable, 3, step=0)) + + +class SubstringsTests(TestCase): + def test_basic(self): + iterable = (x for x in range(4)) + actual = list(mi.substrings(iterable)) + expected = [ + (0,), + (1,), + (2,), + (3,), + (0, 1), + (1, 2), + (2, 3), + (0, 1, 2), + (1, 2, 3), + (0, 1, 2, 3), + ] + self.assertEqual(actual, expected) + + def test_strings(self): + iterable = 'abc' + actual = list(mi.substrings(iterable)) + expected = [ + ('a',), ('b',), ('c',), ('a', 'b'), ('b', 'c'), ('a', 'b', 'c') + ] + self.assertEqual(actual, expected) + + def test_empty(self): + iterable = iter([]) + actual = list(mi.substrings(iterable)) + expected = [] + self.assertEqual(actual, expected) + + def test_order(self): + iterable = [2, 0, 1] + actual = list(mi.substrings(iterable)) + expected = [(2,), (0,), (1,), (2, 0), (0, 1), (2, 0, 1)] + self.assertEqual(actual, expected) + + +class BucketTests(TestCase): + """Tests for ``bucket()``""" + + def test_basic(self): + iterable = [10, 20, 30, 11, 21, 31, 12, 22, 23, 33] + D = mi.bucket(iterable, key=lambda x: 10 * (x // 10)) + + # In-order access + self.assertEqual(list(D[10]), [10, 11, 12]) + + # Out of order access + self.assertEqual(list(D[30]), [30, 31, 33]) + self.assertEqual(list(D[20]), [20, 21, 22, 23]) + + self.assertEqual(list(D[40]), []) # Nothing in here! + + def test_in(self): + iterable = [10, 20, 30, 11, 21, 31, 12, 22, 23, 33] + D = mi.bucket(iterable, key=lambda x: 10 * (x // 10)) + + self.assertIn(10, D) + self.assertNotIn(40, D) + self.assertIn(20, D) + self.assertNotIn(21, D) + + # Checking in-ness shouldn't advance the iterator + self.assertEqual(next(D[10]), 10) + + def test_validator(self): + iterable = count(0) + key = lambda x: int(str(x)[0]) # First digit of each number + validator = lambda x: 0 < x < 10 # No leading zeros + D = mi.bucket(iterable, key, validator=validator) + self.assertEqual(mi.take(3, D[1]), [1, 10, 11]) + self.assertNotIn(0, D) # Non-valid entries don't return True + self.assertNotIn(0, D._cache) # Don't store non-valid entries + self.assertEqual(list(D[0]), []) + + +class SpyTests(TestCase): + """Tests for ``spy()``""" + + def test_basic(self): + original_iterable = iter('abcdefg') + head, new_iterable = mi.spy(original_iterable) + self.assertEqual(head, ['a']) + self.assertEqual( + list(new_iterable), ['a', 'b', 'c', 'd', 'e', 'f', 'g'] + ) + + def test_unpacking(self): + original_iterable = iter('abcdefg') + (first, second, third), new_iterable = mi.spy(original_iterable, 3) + self.assertEqual(first, 'a') + self.assertEqual(second, 'b') + self.assertEqual(third, 'c') + self.assertEqual( + list(new_iterable), ['a', 'b', 'c', 'd', 'e', 'f', 'g'] + ) + + def test_too_many(self): + original_iterable = iter('abc') + head, new_iterable = mi.spy(original_iterable, 4) + self.assertEqual(head, ['a', 'b', 'c']) + self.assertEqual(list(new_iterable), ['a', 'b', 'c']) + + def test_zero(self): + original_iterable = iter('abc') + head, new_iterable = mi.spy(original_iterable, 0) + self.assertEqual(head, []) + self.assertEqual(list(new_iterable), ['a', 'b', 'c']) + + +class InterleaveTests(TestCase): + def test_even(self): + actual = list(mi.interleave([1, 4, 7], [2, 5, 8], [3, 6, 9])) + expected = [1, 2, 3, 4, 5, 6, 7, 8, 9] + self.assertEqual(actual, expected) + + def test_short(self): + actual = list(mi.interleave([1, 4], [2, 5, 7], [3, 6, 8])) + expected = [1, 2, 3, 4, 5, 6] + self.assertEqual(actual, expected) + + def test_mixed_types(self): + it_list = ['a', 'b', 'c', 'd'] + it_str = '12345' + it_inf = count() + actual = list(mi.interleave(it_list, it_str, it_inf)) + expected = ['a', '1', 0, 'b', '2', 1, 'c', '3', 2, 'd', '4', 3] + self.assertEqual(actual, expected) + + +class InterleaveLongestTests(TestCase): + def test_even(self): + actual = list(mi.interleave_longest([1, 4, 7], [2, 5, 8], [3, 6, 9])) + expected = [1, 2, 3, 4, 5, 6, 7, 8, 9] + self.assertEqual(actual, expected) + + def test_short(self): + actual = list(mi.interleave_longest([1, 4], [2, 5, 7], [3, 6, 8])) + expected = [1, 2, 3, 4, 5, 6, 7, 8] + self.assertEqual(actual, expected) + + def test_mixed_types(self): + it_list = ['a', 'b', 'c', 'd'] + it_str = '12345' + it_gen = (x for x in range(3)) + actual = list(mi.interleave_longest(it_list, it_str, it_gen)) + expected = ['a', '1', 0, 'b', '2', 1, 'c', '3', 2, 'd', '4', '5'] + self.assertEqual(actual, expected) + + +class TestCollapse(TestCase): + """Tests for ``collapse()``""" + + def test_collapse(self): + l = [[1], 2, [[3], 4], [[[5]]]] + self.assertEqual(list(mi.collapse(l)), [1, 2, 3, 4, 5]) + + def test_collapse_to_string(self): + l = [["s1"], "s2", [["s3"], "s4"], [[["s5"]]]] + self.assertEqual(list(mi.collapse(l)), ["s1", "s2", "s3", "s4", "s5"]) + + def test_collapse_flatten(self): + l = [[1], [2], [[3], 4], [[[5]]]] + self.assertEqual(list(mi.collapse(l, levels=1)), list(mi.flatten(l))) + + def test_collapse_to_level(self): + l = [[1], 2, [[3], 4], [[[5]]]] + self.assertEqual(list(mi.collapse(l, levels=2)), [1, 2, 3, 4, [5]]) + self.assertEqual( + list(mi.collapse(mi.collapse(l, levels=1), levels=1)), + list(mi.collapse(l, levels=2)) + ) + + def test_collapse_to_list(self): + l = (1, [2], (3, [4, (5,)], 'ab')) + actual = list(mi.collapse(l, base_type=list)) + expected = [1, [2], 3, [4, (5,)], 'ab'] + self.assertEqual(actual, expected) + + +class SideEffectTests(TestCase): + """Tests for ``side_effect()``""" + + def test_individual(self): + # The function increments the counter for each call + counter = [0] + + def func(arg): + counter[0] += 1 + + result = list(mi.side_effect(func, range(10))) + self.assertEqual(result, list(range(10))) + self.assertEqual(counter[0], 10) + + def test_chunked(self): + # The function increments the counter for each call + counter = [0] + + def func(arg): + counter[0] += 1 + + result = list(mi.side_effect(func, range(10), 2)) + self.assertEqual(result, list(range(10))) + self.assertEqual(counter[0], 5) + + def test_before_after(self): + f = StringIO() + collector = [] + + def func(item): + print(item, file=f) + collector.append(f.getvalue()) + + def it(): + yield 'a' + yield 'b' + raise RuntimeError('kaboom') + + before = lambda: print('HEADER', file=f) + after = f.close + + try: + mi.consume(mi.side_effect(func, it(), before=before, after=after)) + except RuntimeError: + pass + + # The iterable should have been written to the file + self.assertEqual(collector, ['HEADER\na\n', 'HEADER\na\nb\n']) + + # The file should be closed even though something bad happened + self.assertTrue(f.closed) + + def test_before_fails(self): + f = StringIO() + func = lambda x: print(x, file=f) + + def before(): + raise RuntimeError('ouch') + + try: + mi.consume( + mi.side_effect(func, 'abc', before=before, after=f.close) + ) + except RuntimeError: + pass + + # The file should be closed even though something bad happened in the + # before function + self.assertTrue(f.closed) + + +class SlicedTests(TestCase): + """Tests for ``sliced()``""" + + def test_even(self): + """Test when the length of the sequence is divisible by *n*""" + seq = 'ABCDEFGHI' + self.assertEqual(list(mi.sliced(seq, 3)), ['ABC', 'DEF', 'GHI']) + + def test_odd(self): + """Test when the length of the sequence is not divisible by *n*""" + seq = 'ABCDEFGHI' + self.assertEqual(list(mi.sliced(seq, 4)), ['ABCD', 'EFGH', 'I']) + + def test_not_sliceable(self): + seq = (x for x in 'ABCDEFGHI') + + with self.assertRaises(TypeError): + list(mi.sliced(seq, 3)) + + +class SplitAtTests(TestCase): + """Tests for ``split()``""" + + def comp_with_str_split(self, str_to_split, delim): + pred = lambda c: c == delim + actual = list(map(''.join, mi.split_at(str_to_split, pred))) + expected = str_to_split.split(delim) + self.assertEqual(actual, expected) + + def test_seperators(self): + test_strs = ['', 'abcba', 'aaabbbcccddd', 'e'] + for s, delim in product(test_strs, 'abcd'): + self.comp_with_str_split(s, delim) + + +class SplitBeforeTest(TestCase): + """Tests for ``split_before()``""" + + def test_starts_with_sep(self): + actual = list(mi.split_before('xooxoo', lambda c: c == 'x')) + expected = [['x', 'o', 'o'], ['x', 'o', 'o']] + self.assertEqual(actual, expected) + + def test_ends_with_sep(self): + actual = list(mi.split_before('ooxoox', lambda c: c == 'x')) + expected = [['o', 'o'], ['x', 'o', 'o'], ['x']] + self.assertEqual(actual, expected) + + def test_no_sep(self): + actual = list(mi.split_before('ooo', lambda c: c == 'x')) + expected = [['o', 'o', 'o']] + self.assertEqual(actual, expected) + + +class SplitAfterTest(TestCase): + """Tests for ``split_after()``""" + + def test_starts_with_sep(self): + actual = list(mi.split_after('xooxoo', lambda c: c == 'x')) + expected = [['x'], ['o', 'o', 'x'], ['o', 'o']] + self.assertEqual(actual, expected) + + def test_ends_with_sep(self): + actual = list(mi.split_after('ooxoox', lambda c: c == 'x')) + expected = [['o', 'o', 'x'], ['o', 'o', 'x']] + self.assertEqual(actual, expected) + + def test_no_sep(self): + actual = list(mi.split_after('ooo', lambda c: c == 'x')) + expected = [['o', 'o', 'o']] + self.assertEqual(actual, expected) + + +class SplitIntoTests(TestCase): + """Tests for ``split_into()``""" + + def test_iterable_just_right(self): + """Size of ``iterable`` equals the sum of ``sizes``.""" + iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9] + sizes = [2, 3, 4] + expected = [[1, 2], [3, 4, 5], [6, 7, 8, 9]] + actual = list(mi.split_into(iterable, sizes)) + self.assertEqual(actual, expected) + + def test_iterable_too_small(self): + """Size of ``iterable`` is smaller than sum of ``sizes``. Last return + list is shorter as a result.""" + iterable = [1, 2, 3, 4, 5, 6, 7] + sizes = [2, 3, 4] + expected = [[1, 2], [3, 4, 5], [6, 7]] + actual = list(mi.split_into(iterable, sizes)) + self.assertEqual(actual, expected) + + def test_iterable_too_small_extra(self): + """Size of ``iterable`` is smaller than sum of ``sizes``. Second last + return list is shorter and last return list is empty as a result.""" + iterable = [1, 2, 3, 4, 5, 6, 7] + sizes = [2, 3, 4, 5] + expected = [[1, 2], [3, 4, 5], [6, 7], []] + actual = list(mi.split_into(iterable, sizes)) + self.assertEqual(actual, expected) + + def test_iterable_too_large(self): + """Size of ``iterable`` is larger than sum of ``sizes``. Not all + items of iterable are returned.""" + iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9] + sizes = [2, 3, 2] + expected = [[1, 2], [3, 4, 5], [6, 7]] + actual = list(mi.split_into(iterable, sizes)) + self.assertEqual(actual, expected) + + def test_using_none_with_leftover(self): + """Last item of ``sizes`` is None when items still remain in + ``iterable``. Last list returned stretches to fit all remaining items + of ``iterable``.""" + iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9] + sizes = [2, 3, None] + expected = [[1, 2], [3, 4, 5], [6, 7, 8, 9]] + actual = list(mi.split_into(iterable, sizes)) + self.assertEqual(actual, expected) + + def test_using_none_without_leftover(self): + """Last item of ``sizes`` is None when no items remain in + ``iterable``. Last list returned is empty.""" + iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9] + sizes = [2, 3, 4, None] + expected = [[1, 2], [3, 4, 5], [6, 7, 8, 9], []] + actual = list(mi.split_into(iterable, sizes)) + self.assertEqual(actual, expected) + + def test_using_none_mid_sizes(self): + """None is present in ``sizes`` but is not the last item. Last list + returned stretches to fit all remaining items of ``iterable`` but + all items in ``sizes`` after None are ignored.""" + iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9] + sizes = [2, 3, None, 4] + expected = [[1, 2], [3, 4, 5], [6, 7, 8, 9]] + actual = list(mi.split_into(iterable, sizes)) + self.assertEqual(actual, expected) + + def test_iterable_empty(self): + """``iterable`` argument is empty but ``sizes`` is not. An empty + list is returned for each item in ``sizes``.""" + iterable = [] + sizes = [2, 4, 2] + expected = [[], [], []] + actual = list(mi.split_into(iterable, sizes)) + self.assertEqual(actual, expected) + + def test_iterable_empty_using_none(self): + """``iterable`` argument is empty but ``sizes`` is not. An empty + list is returned for each item in ``sizes`` that is not after a + None item.""" + iterable = [] + sizes = [2, 4, None, 2] + expected = [[], [], []] + actual = list(mi.split_into(iterable, sizes)) + self.assertEqual(actual, expected) + + def test_sizes_empty(self): + """``sizes`` argument is empty but ``iterable`` is not. An empty + generator is returned.""" + iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9] + sizes = [] + expected = [] + actual = list(mi.split_into(iterable, sizes)) + self.assertEqual(actual, expected) + + def test_both_empty(self): + """Both ``sizes`` and ``iterable`` arguments are empty. An empty + generator is returned.""" + iterable = [] + sizes = [] + expected = [] + actual = list(mi.split_into(iterable, sizes)) + self.assertEqual(actual, expected) + + def test_bool_in_sizes(self): + """A bool object is present in ``sizes`` is treated as a 1 or 0 for + ``True`` or ``False`` due to bool being an instance of int.""" + iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9] + sizes = [3, True, 2, False] + expected = [[1, 2, 3], [4], [5, 6], []] + actual = list(mi.split_into(iterable, sizes)) + self.assertEqual(actual, expected) + + def test_invalid_in_sizes(self): + """A ValueError is raised if an object in ``sizes`` is neither ``None`` + or an integer.""" + iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9] + sizes = [1, [], 3] + with self.assertRaises(ValueError): + list(mi.split_into(iterable, sizes)) + + def test_invalid_in_sizes_after_none(self): + """A item in ``sizes`` that is invalid will not raise a TypeError if it + comes after a ``None`` item.""" + iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9] + sizes = [3, 4, None, []] + expected = [[1, 2, 3], [4, 5, 6, 7], [8, 9]] + actual = list(mi.split_into(iterable, sizes)) + self.assertEqual(actual, expected) + + def test_generator_iterable_integrity(self): + """Check that if ``iterable`` is an iterator, it is consumed only by as + many items as the sum of ``sizes``.""" + iterable = (i for i in range(10)) + sizes = [2, 3] + + expected = [[0, 1], [2, 3, 4]] + actual = list(mi.split_into(iterable, sizes)) + self.assertEqual(actual, expected) + + iterable_expected = [5, 6, 7, 8, 9] + iterable_actual = list(iterable) + self.assertEqual(iterable_actual, iterable_expected) + + def test_generator_sizes_integrity(self): + """Check that if ``sizes`` is an iterator, it is consumed only until a + ``None`` item is reached""" + iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9] + sizes = (i for i in [1, 2, None, 3, 4]) + + expected = [[1], [2, 3], [4, 5, 6, 7, 8, 9]] + actual = list(mi.split_into(iterable, sizes)) + self.assertEqual(actual, expected) + + sizes_expected = [3, 4] + sizes_actual = list(sizes) + self.assertEqual(sizes_actual, sizes_expected) + + +class PaddedTest(TestCase): + """Tests for ``padded()``""" + + def test_no_n(self): + seq = [1, 2, 3] + + # No fillvalue + self.assertEqual(mi.take(5, mi.padded(seq)), [1, 2, 3, None, None]) + + # With fillvalue + self.assertEqual( + mi.take(5, mi.padded(seq, fillvalue='')), [1, 2, 3, '', ''] + ) + + def test_invalid_n(self): + self.assertRaises(ValueError, lambda: list(mi.padded([1, 2, 3], n=-1))) + self.assertRaises(ValueError, lambda: list(mi.padded([1, 2, 3], n=0))) + + def test_valid_n(self): + seq = [1, 2, 3, 4, 5] + + # No need for padding: len(seq) <= n + self.assertEqual(list(mi.padded(seq, n=4)), [1, 2, 3, 4, 5]) + self.assertEqual(list(mi.padded(seq, n=5)), [1, 2, 3, 4, 5]) + + # No fillvalue + self.assertEqual( + list(mi.padded(seq, n=7)), [1, 2, 3, 4, 5, None, None] + ) + + # With fillvalue + self.assertEqual( + list(mi.padded(seq, fillvalue='', n=7)), [1, 2, 3, 4, 5, '', ''] + ) + + def test_next_multiple(self): + seq = [1, 2, 3, 4, 5, 6] + + # No need for padding: len(seq) % n == 0 + self.assertEqual( + list(mi.padded(seq, n=3, next_multiple=True)), [1, 2, 3, 4, 5, 6] + ) + + # Padding needed: len(seq) < n + self.assertEqual( + list(mi.padded(seq, n=8, next_multiple=True)), + [1, 2, 3, 4, 5, 6, None, None] + ) + + # No padding needed: len(seq) == n + self.assertEqual( + list(mi.padded(seq, n=6, next_multiple=True)), [1, 2, 3, 4, 5, 6] + ) + + # Padding needed: len(seq) > n + self.assertEqual( + list(mi.padded(seq, n=4, next_multiple=True)), + [1, 2, 3, 4, 5, 6, None, None] + ) + + # With fillvalue + self.assertEqual( + list(mi.padded(seq, fillvalue='', n=4, next_multiple=True)), + [1, 2, 3, 4, 5, 6, '', ''] + ) + + +class DistributeTest(TestCase): + """Tests for distribute()""" + + def test_invalid_n(self): + self.assertRaises(ValueError, lambda: mi.distribute(-1, [1, 2, 3])) + self.assertRaises(ValueError, lambda: mi.distribute(0, [1, 2, 3])) + + def test_basic(self): + iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + + for n, expected in [ + (1, [iterable]), + (2, [[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]]), + (3, [[1, 4, 7, 10], [2, 5, 8], [3, 6, 9]]), + (10, [[n] for n in range(1, 10 + 1)]), + ]: + self.assertEqual( + [list(x) for x in mi.distribute(n, iterable)], expected + ) + + def test_large_n(self): + iterable = [1, 2, 3, 4] + self.assertEqual( + [list(x) for x in mi.distribute(6, iterable)], + [[1], [2], [3], [4], [], []] + ) + + +class StaggerTest(TestCase): + """Tests for ``stagger()``""" + + def test_default(self): + iterable = [0, 1, 2, 3] + actual = list(mi.stagger(iterable)) + expected = [(None, 0, 1), (0, 1, 2), (1, 2, 3)] + self.assertEqual(actual, expected) + + def test_offsets(self): + iterable = [0, 1, 2, 3] + for offsets, expected in [ + ((-2, 0, 2), [('', 0, 2), ('', 1, 3)]), + ((-2, -1), [('', ''), ('', 0), (0, 1), (1, 2), (2, 3)]), + ((1, 2), [(1, 2), (2, 3)]), + ]: + all_groups = mi.stagger(iterable, offsets=offsets, fillvalue='') + self.assertEqual(list(all_groups), expected) + + def test_longest(self): + iterable = [0, 1, 2, 3] + for offsets, expected in [ + ( + (-1, 0, 1), + [('', 0, 1), (0, 1, 2), (1, 2, 3), (2, 3, ''), (3, '', '')] + ), + ((-2, -1), [('', ''), ('', 0), (0, 1), (1, 2), (2, 3), (3, '')]), + ((1, 2), [(1, 2), (2, 3), (3, '')]), + ]: + all_groups = mi.stagger( + iterable, offsets=offsets, fillvalue='', longest=True + ) + self.assertEqual(list(all_groups), expected) + + +class ZipOffsetTest(TestCase): + """Tests for ``zip_offset()``""" + + def test_shortest(self): + a_1 = [0, 1, 2, 3] + a_2 = [0, 1, 2, 3, 4, 5] + a_3 = [0, 1, 2, 3, 4, 5, 6, 7] + actual = list( + mi.zip_offset(a_1, a_2, a_3, offsets=(-1, 0, 1), fillvalue='') + ) + expected = [('', 0, 1), (0, 1, 2), (1, 2, 3), (2, 3, 4), (3, 4, 5)] + self.assertEqual(actual, expected) + + def test_longest(self): + a_1 = [0, 1, 2, 3] + a_2 = [0, 1, 2, 3, 4, 5] + a_3 = [0, 1, 2, 3, 4, 5, 6, 7] + actual = list( + mi.zip_offset(a_1, a_2, a_3, offsets=(-1, 0, 1), longest=True) + ) + expected = [ + (None, 0, 1), + (0, 1, 2), + (1, 2, 3), + (2, 3, 4), + (3, 4, 5), + (None, 5, 6), + (None, None, 7), + ] + self.assertEqual(actual, expected) + + def test_mismatch(self): + iterables = [0, 1, 2], [2, 3, 4] + offsets = (-1, 0, 1) + self.assertRaises( + ValueError, + lambda: list(mi.zip_offset(*iterables, offsets=offsets)) + ) + + +class UnzipTests(TestCase): + """Tests for unzip()""" + + def test_empty_iterable(self): + self.assertEqual(list(mi.unzip([])), []) + # in reality zip([], [], []) is equivalent to iter([]) + # but it doesn't hurt to test both + self.assertEqual(list(mi.unzip(zip([], [], []))), []) + + def test_length_one_iterable(self): + xs, ys, zs = mi.unzip(zip([1], [2], [3])) + self.assertEqual(list(xs), [1]) + self.assertEqual(list(ys), [2]) + self.assertEqual(list(zs), [3]) + + def test_normal_case(self): + xs, ys, zs = range(10), range(1, 11), range(2, 12) + zipped = zip(xs, ys, zs) + xs, ys, zs = mi.unzip(zipped) + self.assertEqual(list(xs), list(range(10))) + self.assertEqual(list(ys), list(range(1, 11))) + self.assertEqual(list(zs), list(range(2, 12))) + + def test_improperly_zipped(self): + zipped = iter([(1, 2, 3), (4, 5), (6,)]) + xs, ys, zs = mi.unzip(zipped) + self.assertEqual(list(xs), [1, 4, 6]) + self.assertEqual(list(ys), [2, 5]) + self.assertEqual(list(zs), [3]) + + def test_increasingly_zipped(self): + zipped = iter([(1, 2), (3, 4, 5), (6, 7, 8, 9)]) + unzipped = mi.unzip(zipped) + # from the docstring: + # len(first tuple) is the number of iterables zipped + self.assertEqual(len(unzipped), 2) + xs, ys = unzipped + self.assertEqual(list(xs), [1, 3, 6]) + self.assertEqual(list(ys), [2, 4, 7]) + + +class SortTogetherTest(TestCase): + """Tests for sort_together()""" + + def test_key_list(self): + """tests `key_list` including default, iterables include duplicates""" + iterables = [ + ['GA', 'GA', 'GA', 'CT', 'CT', 'CT'], + ['May', 'Aug.', 'May', 'June', 'July', 'July'], + [97, 20, 100, 70, 100, 20] + ] + + self.assertEqual( + mi.sort_together(iterables), + [ + ('CT', 'CT', 'CT', 'GA', 'GA', 'GA'), + ('June', 'July', 'July', 'May', 'Aug.', 'May'), + (70, 100, 20, 97, 20, 100) + ] + ) + + self.assertEqual( + mi.sort_together(iterables, key_list=(0, 1)), + [ + ('CT', 'CT', 'CT', 'GA', 'GA', 'GA'), + ('July', 'July', 'June', 'Aug.', 'May', 'May'), + (100, 20, 70, 20, 97, 100) + ] + ) + + self.assertEqual( + mi.sort_together(iterables, key_list=(0, 1, 2)), + [ + ('CT', 'CT', 'CT', 'GA', 'GA', 'GA'), + ('July', 'July', 'June', 'Aug.', 'May', 'May'), + (20, 100, 70, 20, 97, 100) + ] + ) + + self.assertEqual( + mi.sort_together(iterables, key_list=(2,)), + [ + ('GA', 'CT', 'CT', 'GA', 'GA', 'CT'), + ('Aug.', 'July', 'June', 'May', 'May', 'July'), + (20, 20, 70, 97, 100, 100) + ] + ) + + def test_invalid_key_list(self): + """tests `key_list` for indexes not available in `iterables`""" + iterables = [ + ['GA', 'GA', 'GA', 'CT', 'CT', 'CT'], + ['May', 'Aug.', 'May', 'June', 'July', 'July'], + [97, 20, 100, 70, 100, 20] + ] + + self.assertRaises( + IndexError, lambda: mi.sort_together(iterables, key_list=(5,)) + ) + + def test_reverse(self): + """tests `reverse` to ensure a reverse sort for `key_list` iterables""" + iterables = [ + ['GA', 'GA', 'GA', 'CT', 'CT', 'CT'], + ['May', 'Aug.', 'May', 'June', 'July', 'July'], + [97, 20, 100, 70, 100, 20] + ] + + self.assertEqual( + mi.sort_together(iterables, key_list=(0, 1, 2), reverse=True), + [('GA', 'GA', 'GA', 'CT', 'CT', 'CT'), + ('May', 'May', 'Aug.', 'June', 'July', 'July'), + (100, 97, 20, 70, 100, 20)] + ) + + def test_uneven_iterables(self): + """tests trimming of iterables to the shortest length before sorting""" + iterables = [['GA', 'GA', 'GA', 'CT', 'CT', 'CT', 'MA'], + ['May', 'Aug.', 'May', 'June', 'July', 'July'], + [97, 20, 100, 70, 100, 20, 0]] + + self.assertEqual( + mi.sort_together(iterables), + [ + ('CT', 'CT', 'CT', 'GA', 'GA', 'GA'), + ('June', 'July', 'July', 'May', 'Aug.', 'May'), + (70, 100, 20, 97, 20, 100) + ] + ) + + +class DivideTest(TestCase): + """Tests for divide()""" + + def test_invalid_n(self): + self.assertRaises(ValueError, lambda: mi.divide(-1, [1, 2, 3])) + self.assertRaises(ValueError, lambda: mi.divide(0, [1, 2, 3])) + + def test_basic(self): + iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + + for n, expected in [ + (1, [iterable]), + (2, [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]), + (3, [[1, 2, 3, 4], [5, 6, 7], [8, 9, 10]]), + (10, [[n] for n in range(1, 10 + 1)]), + ]: + self.assertEqual( + [list(x) for x in mi.divide(n, iterable)], expected + ) + + def test_large_n(self): + iterable = [1, 2, 3, 4] + self.assertEqual( + [list(x) for x in mi.divide(6, iterable)], + [[1], [2], [3], [4], [], []] + ) + + +class TestAlwaysIterable(TestCase): + """Tests for always_iterable()""" + def test_single(self): + self.assertEqual(list(mi.always_iterable(1)), [1]) + + def test_strings(self): + for obj in ['foo', b'bar', 'baz']: + actual = list(mi.always_iterable(obj)) + expected = [obj] + self.assertEqual(actual, expected) + + def test_base_type(self): + dict_obj = {'a': 1, 'b': 2} + str_obj = '123' + + # Default: dicts are iterable like they normally are + default_actual = list(mi.always_iterable(dict_obj)) + default_expected = list(dict_obj) + self.assertEqual(default_actual, default_expected) + + # Unitary types set: dicts are not iterable + custom_actual = list(mi.always_iterable(dict_obj, base_type=dict)) + custom_expected = [dict_obj] + self.assertEqual(custom_actual, custom_expected) + + # With unitary types set, strings are iterable + str_actual = list(mi.always_iterable(str_obj, base_type=None)) + str_expected = list(str_obj) + self.assertEqual(str_actual, str_expected) + + def test_iterables(self): + self.assertEqual(list(mi.always_iterable([0, 1])), [0, 1]) + self.assertEqual( + list(mi.always_iterable([0, 1], base_type=list)), [[0, 1]] + ) + self.assertEqual( + list(mi.always_iterable(iter('foo'))), ['f', 'o', 'o'] + ) + self.assertEqual(list(mi.always_iterable([])), []) + + def test_none(self): + self.assertEqual(list(mi.always_iterable(None)), []) + + def test_generator(self): + def _gen(): + yield 0 + yield 1 + + self.assertEqual(list(mi.always_iterable(_gen())), [0, 1]) + + +class AdjacentTests(TestCase): + def test_typical(self): + actual = list(mi.adjacent(lambda x: x % 5 == 0, range(10))) + expected = [(True, 0), (True, 1), (False, 2), (False, 3), (True, 4), + (True, 5), (True, 6), (False, 7), (False, 8), (False, 9)] + self.assertEqual(actual, expected) + + def test_empty_iterable(self): + actual = list(mi.adjacent(lambda x: x % 5 == 0, [])) + expected = [] + self.assertEqual(actual, expected) + + def test_length_one(self): + actual = list(mi.adjacent(lambda x: x % 5 == 0, [0])) + expected = [(True, 0)] + self.assertEqual(actual, expected) + + actual = list(mi.adjacent(lambda x: x % 5 == 0, [1])) + expected = [(False, 1)] + self.assertEqual(actual, expected) + + def test_consecutive_true(self): + """Test that when the predicate matches multiple consecutive elements + it doesn't repeat elements in the output""" + actual = list(mi.adjacent(lambda x: x % 5 < 2, range(10))) + expected = [(True, 0), (True, 1), (True, 2), (False, 3), (True, 4), + (True, 5), (True, 6), (True, 7), (False, 8), (False, 9)] + self.assertEqual(actual, expected) + + def test_distance(self): + actual = list(mi.adjacent(lambda x: x % 5 == 0, range(10), distance=2)) + expected = [(True, 0), (True, 1), (True, 2), (True, 3), (True, 4), + (True, 5), (True, 6), (True, 7), (False, 8), (False, 9)] + self.assertEqual(actual, expected) + + actual = list(mi.adjacent(lambda x: x % 5 == 0, range(10), distance=3)) + expected = [(True, 0), (True, 1), (True, 2), (True, 3), (True, 4), + (True, 5), (True, 6), (True, 7), (True, 8), (False, 9)] + self.assertEqual(actual, expected) + + def test_large_distance(self): + """Test distance larger than the length of the iterable""" + iterable = range(10) + actual = list(mi.adjacent(lambda x: x % 5 == 4, iterable, distance=20)) + expected = list(zip(repeat(True), iterable)) + self.assertEqual(actual, expected) + + actual = list(mi.adjacent(lambda x: False, iterable, distance=20)) + expected = list(zip(repeat(False), iterable)) + self.assertEqual(actual, expected) + + def test_zero_distance(self): + """Test that adjacent() reduces to zip+map when distance is 0""" + iterable = range(1000) + predicate = lambda x: x % 4 == 2 + actual = mi.adjacent(predicate, iterable, 0) + expected = zip(map(predicate, iterable), iterable) + self.assertTrue(all(a == e for a, e in zip(actual, expected))) + + def test_negative_distance(self): + """Test that adjacent() raises an error with negative distance""" + pred = lambda x: x + self.assertRaises( + ValueError, lambda: mi.adjacent(pred, range(1000), -1) + ) + self.assertRaises( + ValueError, lambda: mi.adjacent(pred, range(10), -10) + ) + + def test_grouping(self): + """Test interaction of adjacent() with groupby_transform()""" + iterable = mi.adjacent(lambda x: x % 5 == 0, range(10)) + grouper = mi.groupby_transform(iterable, itemgetter(0), itemgetter(1)) + actual = [(k, list(g)) for k, g in grouper] + expected = [ + (True, [0, 1]), + (False, [2, 3]), + (True, [4, 5, 6]), + (False, [7, 8, 9]), + ] + self.assertEqual(actual, expected) + + def test_call_once(self): + """Test that the predicate is only called once per item.""" + already_seen = set() + iterable = range(10) + + def predicate(item): + self.assertNotIn(item, already_seen) + already_seen.add(item) + return True + + actual = list(mi.adjacent(predicate, iterable)) + expected = [(True, x) for x in iterable] + self.assertEqual(actual, expected) + + +class GroupByTransformTests(TestCase): + def assertAllGroupsEqual(self, groupby1, groupby2): + """Compare two groupby objects for equality, both keys and groups.""" + for a, b in zip(groupby1, groupby2): + key1, group1 = a + key2, group2 = b + self.assertEqual(key1, key2) + self.assertListEqual(list(group1), list(group2)) + self.assertRaises(StopIteration, lambda: next(groupby1)) + self.assertRaises(StopIteration, lambda: next(groupby2)) + + def test_default_funcs(self): + """Test that groupby_transform() with default args mimics groupby()""" + iterable = [(x // 5, x) for x in range(1000)] + actual = mi.groupby_transform(iterable) + expected = groupby(iterable) + self.assertAllGroupsEqual(actual, expected) + + def test_valuefunc(self): + iterable = [(int(x / 5), int(x / 3), x) for x in range(10)] + + # Test the standard usage of grouping one iterable using another's keys + grouper = mi.groupby_transform( + iterable, keyfunc=itemgetter(0), valuefunc=itemgetter(-1) + ) + actual = [(k, list(g)) for k, g in grouper] + expected = [(0, [0, 1, 2, 3, 4]), (1, [5, 6, 7, 8, 9])] + self.assertEqual(actual, expected) + + grouper = mi.groupby_transform( + iterable, keyfunc=itemgetter(1), valuefunc=itemgetter(-1) + ) + actual = [(k, list(g)) for k, g in grouper] + expected = [(0, [0, 1, 2]), (1, [3, 4, 5]), (2, [6, 7, 8]), (3, [9])] + self.assertEqual(actual, expected) + + # and now for something a little different + d = dict(zip(range(10), 'abcdefghij')) + grouper = mi.groupby_transform( + range(10), keyfunc=lambda x: x // 5, valuefunc=d.get + ) + actual = [(k, ''.join(g)) for k, g in grouper] + expected = [(0, 'abcde'), (1, 'fghij')] + self.assertEqual(actual, expected) + + def test_no_valuefunc(self): + iterable = range(1000) + + def key(x): + return x // 5 + + actual = mi.groupby_transform(iterable, key, valuefunc=None) + expected = groupby(iterable, key) + self.assertAllGroupsEqual(actual, expected) + + actual = mi.groupby_transform(iterable, key) # default valuefunc + expected = groupby(iterable, key) + self.assertAllGroupsEqual(actual, expected) + + +class NumericRangeTests(TestCase): + def test_basic(self): + for args, expected in [ + ((4,), [0, 1, 2, 3]), + ((4.0,), [0.0, 1.0, 2.0, 3.0]), + ((1.0, 4), [1.0, 2.0, 3.0]), + ((1, 4.0), [1, 2, 3]), + ((1.0, 5), [1.0, 2.0, 3.0, 4.0]), + ((0, 20, 5), [0, 5, 10, 15]), + ((0, 20, 5.0), [0.0, 5.0, 10.0, 15.0]), + ((0, 10, 3), [0, 3, 6, 9]), + ((0, 10, 3.0), [0.0, 3.0, 6.0, 9.0]), + ((0, -5, -1), [0, -1, -2, -3, -4]), + ((0.0, -5, -1), [0.0, -1.0, -2.0, -3.0, -4.0]), + ((1, 2, Fraction(1, 2)), [Fraction(1, 1), Fraction(3, 2)]), + ((0,), []), + ((0.0,), []), + ((1, 0), []), + ((1.0, 0.0), []), + ((Fraction(2, 1),), [Fraction(0, 1), Fraction(1, 1)]), + ((Decimal('2.0'),), [Decimal('0.0'), Decimal('1.0')]), + ]: + actual = list(mi.numeric_range(*args)) + self.assertEqual(actual, expected) + self.assertTrue( + all(type(a) == type(e) for a, e in zip(actual, expected)) + ) + + def test_arg_count(self): + self.assertRaises(TypeError, lambda: list(mi.numeric_range())) + self.assertRaises( + TypeError, lambda: list(mi.numeric_range(0, 1, 2, 3)) + ) + + def test_zero_step(self): + self.assertRaises( + ValueError, lambda: list(mi.numeric_range(1, 2, 0)) + ) + + +class CountCycleTests(TestCase): + def test_basic(self): + expected = [ + (0, 'a'), (0, 'b'), (0, 'c'), + (1, 'a'), (1, 'b'), (1, 'c'), + (2, 'a'), (2, 'b'), (2, 'c'), + ] + for actual in [ + mi.take(9, mi.count_cycle('abc')), # n=None + list(mi.count_cycle('abc', 3)), # n=3 + ]: + self.assertEqual(actual, expected) + + def test_empty(self): + self.assertEqual(list(mi.count_cycle('')), []) + self.assertEqual(list(mi.count_cycle('', 2)), []) + + def test_negative(self): + self.assertEqual(list(mi.count_cycle('abc', -3)), []) + + +class LocateTests(TestCase): + def test_default_pred(self): + iterable = [0, 1, 1, 0, 1, 0, 0] + actual = list(mi.locate(iterable)) + expected = [1, 2, 4] + self.assertEqual(actual, expected) + + def test_no_matches(self): + iterable = [0, 0, 0] + actual = list(mi.locate(iterable)) + expected = [] + self.assertEqual(actual, expected) + + def test_custom_pred(self): + iterable = ['0', 1, 1, '0', 1, '0', '0'] + pred = lambda x: x == '0' + actual = list(mi.locate(iterable, pred)) + expected = [0, 3, 5, 6] + self.assertEqual(actual, expected) + + def test_window_size(self): + iterable = ['0', 1, 1, '0', 1, '0', '0'] + pred = lambda *args: args == ('0', 1) + actual = list(mi.locate(iterable, pred, window_size=2)) + expected = [0, 3] + self.assertEqual(actual, expected) + + def test_window_size_large(self): + iterable = [1, 2, 3, 4] + pred = lambda a, b, c, d, e: True + actual = list(mi.locate(iterable, pred, window_size=5)) + expected = [0] + self.assertEqual(actual, expected) + + def test_window_size_zero(self): + iterable = [1, 2, 3, 4] + pred = lambda: True + with self.assertRaises(ValueError): + list(mi.locate(iterable, pred, window_size=0)) + + +class StripFunctionTests(TestCase): + def test_hashable(self): + iterable = list('www.example.com') + pred = lambda x: x in set('cmowz.') + + self.assertEqual(list(mi.lstrip(iterable, pred)), list('example.com')) + self.assertEqual(list(mi.rstrip(iterable, pred)), list('www.example')) + self.assertEqual(list(mi.strip(iterable, pred)), list('example')) + + def test_not_hashable(self): + iterable = [ + list('http://'), list('www'), list('.example'), list('.com') + ] + pred = lambda x: x in [list('http://'), list('www'), list('.com')] + + self.assertEqual(list(mi.lstrip(iterable, pred)), iterable[2:]) + self.assertEqual(list(mi.rstrip(iterable, pred)), iterable[:3]) + self.assertEqual(list(mi.strip(iterable, pred)), iterable[2: 3]) + + def test_math(self): + iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] + pred = lambda x: x <= 2 + + self.assertEqual(list(mi.lstrip(iterable, pred)), iterable[3:]) + self.assertEqual(list(mi.rstrip(iterable, pred)), iterable[:-3]) + self.assertEqual(list(mi.strip(iterable, pred)), iterable[3:-3]) + + +class IsliceExtendedTests(TestCase): + def test_all(self): + iterable = ['0', '1', '2', '3', '4', '5'] + indexes = list(range(-4, len(iterable) + 4)) + [None] + steps = [1, 2, 3, 4, -1, -2, -3, 4] + for slice_args in product(indexes, indexes, steps): + try: + actual = list(mi.islice_extended(iterable, *slice_args)) + except Exception as e: + self.fail((slice_args, e)) + + expected = iterable[slice(*slice_args)] + self.assertEqual(actual, expected, slice_args) + + def test_zero_step(self): + with self.assertRaises(ValueError): + list(mi.islice_extended([1, 2, 3], 0, 1, 0)) + + +class ConsecutiveGroupsTest(TestCase): + def test_numbers(self): + iterable = [-10, -8, -7, -6, 1, 2, 4, 5, -1, 7] + actual = [list(g) for g in mi.consecutive_groups(iterable)] + expected = [[-10], [-8, -7, -6], [1, 2], [4, 5], [-1], [7]] + self.assertEqual(actual, expected) + + def test_custom_ordering(self): + iterable = ['1', '10', '11', '20', '21', '22', '30', '31'] + ordering = lambda x: int(x) + actual = [list(g) for g in mi.consecutive_groups(iterable, ordering)] + expected = [['1'], ['10', '11'], ['20', '21', '22'], ['30', '31']] + self.assertEqual(actual, expected) + + def test_exotic_ordering(self): + iterable = [ + ('a', 'b', 'c', 'd'), + ('a', 'c', 'b', 'd'), + ('a', 'c', 'd', 'b'), + ('a', 'd', 'b', 'c'), + ('d', 'b', 'c', 'a'), + ('d', 'c', 'a', 'b'), + ] + ordering = list(permutations('abcd')).index + actual = [list(g) for g in mi.consecutive_groups(iterable, ordering)] + expected = [ + [('a', 'b', 'c', 'd')], + [('a', 'c', 'b', 'd'), ('a', 'c', 'd', 'b'), ('a', 'd', 'b', 'c')], + [('d', 'b', 'c', 'a'), ('d', 'c', 'a', 'b')], + ] + self.assertEqual(actual, expected) + + +class DifferenceTest(TestCase): + def test_normal(self): + iterable = [10, 20, 30, 40, 50] + actual = list(mi.difference(iterable)) + expected = [10, 10, 10, 10, 10] + self.assertEqual(actual, expected) + + def test_custom(self): + iterable = [10, 20, 30, 40, 50] + actual = list(mi.difference(iterable, add)) + expected = [10, 30, 50, 70, 90] + self.assertEqual(actual, expected) + + def test_roundtrip(self): + original = list(range(100)) + accumulated = mi.accumulate(original) + actual = list(mi.difference(accumulated)) + self.assertEqual(actual, original) + + def test_one(self): + self.assertEqual(list(mi.difference([0])), [0]) + + def test_empty(self): + self.assertEqual(list(mi.difference([])), []) + + +class SeekableTest(TestCase): + def test_exhaustion_reset(self): + iterable = [str(n) for n in range(10)] + + s = mi.seekable(iterable) + self.assertEqual(list(s), iterable) # Normal iteration + self.assertEqual(list(s), []) # Iterable is exhausted + + s.seek(0) + self.assertEqual(list(s), iterable) # Back in action + + def test_partial_reset(self): + iterable = [str(n) for n in range(10)] + + s = mi.seekable(iterable) + self.assertEqual(mi.take(5, s), iterable[:5]) # Normal iteration + + s.seek(1) + self.assertEqual(list(s), iterable[1:]) # Get the rest of the iterable + + def test_forward(self): + iterable = [str(n) for n in range(10)] + + s = mi.seekable(iterable) + self.assertEqual(mi.take(1, s), iterable[:1]) # Normal iteration + + s.seek(3) # Skip over index 2 + self.assertEqual(list(s), iterable[3:]) # Result is similar to slicing + + s.seek(0) # Back to 0 + self.assertEqual(list(s), iterable) # No difference in result + + def test_past_end(self): + iterable = [str(n) for n in range(10)] + + s = mi.seekable(iterable) + self.assertEqual(mi.take(1, s), iterable[:1]) # Normal iteration + + s.seek(20) + self.assertEqual(list(s), []) # Iterable is exhausted + + s.seek(0) # Back to 0 + self.assertEqual(list(s), iterable) # No difference in result + + def test_elements(self): + iterable = map(str, count()) + + s = mi.seekable(iterable) + mi.take(10, s) + + elements = s.elements() + self.assertEqual( + [elements[i] for i in range(10)], [str(n) for n in range(10)] + ) + self.assertEqual(len(elements), 10) + + mi.take(10, s) + self.assertEqual(list(elements), [str(n) for n in range(20)]) + + +class SequenceViewTests(TestCase): + def test_init(self): + view = mi.SequenceView((1, 2, 3)) + self.assertEqual(repr(view), "SequenceView((1, 2, 3))") + self.assertRaises(TypeError, lambda: mi.SequenceView({})) + + def test_update(self): + seq = [1, 2, 3] + view = mi.SequenceView(seq) + self.assertEqual(len(view), 3) + self.assertEqual(repr(view), "SequenceView([1, 2, 3])") + + seq.pop() + self.assertEqual(len(view), 2) + self.assertEqual(repr(view), "SequenceView([1, 2])") + + def test_indexing(self): + seq = ('a', 'b', 'c', 'd', 'e', 'f') + view = mi.SequenceView(seq) + for i in range(-len(seq), len(seq)): + self.assertEqual(view[i], seq[i]) + + def test_slicing(self): + seq = ('a', 'b', 'c', 'd', 'e', 'f') + view = mi.SequenceView(seq) + n = len(seq) + indexes = list(range(-n - 1, n + 1)) + [None] + steps = list(range(-n, n + 1)) + steps.remove(0) + for slice_args in product(indexes, indexes, steps): + i = slice(*slice_args) + self.assertEqual(view[i], seq[i]) + + def test_abc_methods(self): + # collections.Sequence should provide all of this functionality + seq = ('a', 'b', 'c', 'd', 'e', 'f', 'f') + view = mi.SequenceView(seq) + + # __contains__ + self.assertIn('b', view) + self.assertNotIn('g', view) + + # __iter__ + self.assertEqual(list(iter(view)), list(seq)) + + # __reversed__ + self.assertEqual(list(reversed(view)), list(reversed(seq))) + + # index + self.assertEqual(view.index('b'), 1) + + # count + self.assertEqual(seq.count('f'), 2) + + +class RunLengthTest(TestCase): + def test_encode(self): + iterable = (int(str(n)[0]) for n in count(800)) + actual = mi.take(4, mi.run_length.encode(iterable)) + expected = [(8, 100), (9, 100), (1, 1000), (2, 1000)] + self.assertEqual(actual, expected) + + def test_decode(self): + iterable = [('d', 4), ('c', 3), ('b', 2), ('a', 1)] + actual = ''.join(mi.run_length.decode(iterable)) + expected = 'ddddcccbba' + self.assertEqual(actual, expected) + + +class ExactlyNTests(TestCase): + """Tests for ``exactly_n()``""" + + def test_true(self): + """Iterable has ``n`` ``True`` elements""" + self.assertTrue(mi.exactly_n([True, False, True], 2)) + self.assertTrue(mi.exactly_n([1, 1, 1, 0], 3)) + self.assertTrue(mi.exactly_n([False, False], 0)) + self.assertTrue(mi.exactly_n(range(100), 10, lambda x: x < 10)) + + def test_false(self): + """Iterable does not have ``n`` ``True`` elements""" + self.assertFalse(mi.exactly_n([True, False, False], 2)) + self.assertFalse(mi.exactly_n([True, True, False], 1)) + self.assertFalse(mi.exactly_n([False], 1)) + self.assertFalse(mi.exactly_n([True], -1)) + self.assertFalse(mi.exactly_n(repeat(True), 100)) + + def test_empty(self): + """Return ``True`` if the iterable is empty and ``n`` is 0""" + self.assertTrue(mi.exactly_n([], 0)) + self.assertFalse(mi.exactly_n([], 1)) + + +class AlwaysReversibleTests(TestCase): + """Tests for ``always_reversible()``""" + + def test_regular_reversed(self): + self.assertEqual(list(reversed(range(10))), + list(mi.always_reversible(range(10)))) + self.assertEqual(list(reversed([1, 2, 3])), + list(mi.always_reversible([1, 2, 3]))) + self.assertEqual(reversed([1, 2, 3]).__class__, + mi.always_reversible([1, 2, 3]).__class__) + + def test_nonseq_reversed(self): + # Create a non-reversible generator from a sequence + with self.assertRaises(TypeError): + reversed(x for x in range(10)) + + self.assertEqual(list(reversed(range(10))), + list(mi.always_reversible(x for x in range(10)))) + self.assertEqual(list(reversed([1, 2, 3])), + list(mi.always_reversible(x for x in [1, 2, 3]))) + self.assertNotEqual(reversed((1, 2)).__class__, + mi.always_reversible(x for x in (1, 2)).__class__) + + +class CircularShiftsTests(TestCase): + def test_empty(self): + # empty iterable -> empty list + self.assertEqual(list(mi.circular_shifts([])), []) + + def test_simple_circular_shifts(self): + # test the a simple iterator case + self.assertEqual( + mi.circular_shifts(range(4)), + [(0, 1, 2, 3), (1, 2, 3, 0), (2, 3, 0, 1), (3, 0, 1, 2)] + ) + + def test_duplicates(self): + # test non-distinct entries + self.assertEqual( + mi.circular_shifts([0, 1, 0, 1]), + [(0, 1, 0, 1), (1, 0, 1, 0), (0, 1, 0, 1), (1, 0, 1, 0)] + ) + + +class MakeDecoratorTests(TestCase): + def test_basic(self): + slicer = mi.make_decorator(islice) + + @slicer(1, 10, 2) + def user_function(arg_1, arg_2, kwarg_1=None): + self.assertEqual(arg_1, 'arg_1') + self.assertEqual(arg_2, 'arg_2') + self.assertEqual(kwarg_1, 'kwarg_1') + return map(str, count()) + + it = user_function('arg_1', 'arg_2', kwarg_1='kwarg_1') + actual = list(it) + expected = ['1', '3', '5', '7', '9'] + self.assertEqual(actual, expected) + + def test_result_index(self): + def stringify(*args, **kwargs): + self.assertEqual(args[0], 'arg_0') + iterable = args[1] + self.assertEqual(args[2], 'arg_2') + self.assertEqual(kwargs['kwarg_1'], 'kwarg_1') + return map(str, iterable) + + stringifier = mi.make_decorator(stringify, result_index=1) + + @stringifier('arg_0', 'arg_2', kwarg_1='kwarg_1') + def user_function(n): + return count(n) + + it = user_function(1) + actual = mi.take(5, it) + expected = ['1', '2', '3', '4', '5'] + self.assertEqual(actual, expected) + + def test_wrap_class(self): + seeker = mi.make_decorator(mi.seekable) + + @seeker() + def user_function(n): + return map(str, range(n)) + + it = user_function(5) + self.assertEqual(list(it), ['0', '1', '2', '3', '4']) + + it.seek(0) + self.assertEqual(list(it), ['0', '1', '2', '3', '4']) + + +class MapReduceTests(TestCase): + def test_default(self): + iterable = (str(x) for x in range(5)) + keyfunc = lambda x: int(x) // 2 + actual = sorted(mi.map_reduce(iterable, keyfunc).items()) + expected = [(0, ['0', '1']), (1, ['2', '3']), (2, ['4'])] + self.assertEqual(actual, expected) + + def test_valuefunc(self): + iterable = (str(x) for x in range(5)) + keyfunc = lambda x: int(x) // 2 + valuefunc = int + actual = sorted(mi.map_reduce(iterable, keyfunc, valuefunc).items()) + expected = [(0, [0, 1]), (1, [2, 3]), (2, [4])] + self.assertEqual(actual, expected) + + def test_reducefunc(self): + iterable = (str(x) for x in range(5)) + keyfunc = lambda x: int(x) // 2 + valuefunc = int + reducefunc = lambda value_list: reduce(mul, value_list, 1) + actual = sorted( + mi.map_reduce(iterable, keyfunc, valuefunc, reducefunc).items() + ) + expected = [(0, 0), (1, 6), (2, 4)] + self.assertEqual(actual, expected) + + def test_ret(self): + d = mi.map_reduce([1, 0, 2, 0, 1, 0], bool) + self.assertEqual(d, {False: [0, 0, 0], True: [1, 2, 1]}) + self.assertRaises(KeyError, lambda: d[None].append(1)) + + +class RlocateTests(TestCase): + def test_default_pred(self): + iterable = [0, 1, 1, 0, 1, 0, 0] + for it in (iterable[:], iter(iterable)): + actual = list(mi.rlocate(it)) + expected = [4, 2, 1] + self.assertEqual(actual, expected) + + def test_no_matches(self): + iterable = [0, 0, 0] + for it in (iterable[:], iter(iterable)): + actual = list(mi.rlocate(it)) + expected = [] + self.assertEqual(actual, expected) + + def test_custom_pred(self): + iterable = ['0', 1, 1, '0', 1, '0', '0'] + pred = lambda x: x == '0' + for it in (iterable[:], iter(iterable)): + actual = list(mi.rlocate(it, pred)) + expected = [6, 5, 3, 0] + self.assertEqual(actual, expected) + + def test_efficient_reversal(self): + iterable = range(9 ** 9) # Is efficiently reversible + target = 9 ** 9 - 2 + pred = lambda x: x == target # Find-able from the right + actual = next(mi.rlocate(iterable, pred)) + self.assertEqual(actual, target) + + def test_window_size(self): + iterable = ['0', 1, 1, '0', 1, '0', '0'] + pred = lambda *args: args == ('0', 1) + for it in (iterable, iter(iterable)): + actual = list(mi.rlocate(it, pred, window_size=2)) + expected = [3, 0] + self.assertEqual(actual, expected) + + def test_window_size_large(self): + iterable = [1, 2, 3, 4] + pred = lambda a, b, c, d, e: True + for it in (iterable, iter(iterable)): + actual = list(mi.rlocate(iterable, pred, window_size=5)) + expected = [0] + self.assertEqual(actual, expected) + + def test_window_size_zero(self): + iterable = [1, 2, 3, 4] + pred = lambda: True + for it in (iterable, iter(iterable)): + with self.assertRaises(ValueError): + list(mi.locate(iterable, pred, window_size=0)) + + +class ReplaceTests(TestCase): + def test_basic(self): + iterable = range(10) + pred = lambda x: x % 2 == 0 + substitutes = [] + actual = list(mi.replace(iterable, pred, substitutes)) + expected = [1, 3, 5, 7, 9] + self.assertEqual(actual, expected) + + def test_count(self): + iterable = range(10) + pred = lambda x: x % 2 == 0 + substitutes = [] + actual = list(mi.replace(iterable, pred, substitutes, count=4)) + expected = [1, 3, 5, 7, 8, 9] + self.assertEqual(actual, expected) + + def test_window_size(self): + iterable = range(10) + pred = lambda *args: args == (0, 1, 2) + substitutes = [] + actual = list(mi.replace(iterable, pred, substitutes, window_size=3)) + expected = [3, 4, 5, 6, 7, 8, 9] + self.assertEqual(actual, expected) + + def test_window_size_end(self): + iterable = range(10) + pred = lambda *args: args == (7, 8, 9) + substitutes = [] + actual = list(mi.replace(iterable, pred, substitutes, window_size=3)) + expected = [0, 1, 2, 3, 4, 5, 6] + self.assertEqual(actual, expected) + + def test_window_size_count(self): + iterable = range(10) + pred = lambda *args: (args == (0, 1, 2)) or (args == (7, 8, 9)) + substitutes = [] + actual = list( + mi.replace(iterable, pred, substitutes, count=1, window_size=3) + ) + expected = [3, 4, 5, 6, 7, 8, 9] + self.assertEqual(actual, expected) + + def test_window_size_large(self): + iterable = range(4) + pred = lambda a, b, c, d, e: True + substitutes = [5, 6, 7] + actual = list(mi.replace(iterable, pred, substitutes, window_size=5)) + expected = [5, 6, 7] + self.assertEqual(actual, expected) + + def test_window_size_zero(self): + iterable = range(10) + pred = lambda *args: True + substitutes = [] + with self.assertRaises(ValueError): + list(mi.replace(iterable, pred, substitutes, window_size=0)) + + def test_iterable_substitutes(self): + iterable = range(5) + pred = lambda x: x % 2 == 0 + substitutes = iter('__') + actual = list(mi.replace(iterable, pred, substitutes)) + expected = ['_', '_', 1, '_', '_', 3, '_', '_'] + self.assertEqual(actual, expected) diff --git a/pipenv/vendor/more_itertools/tests/test_recipes.py b/pipenv/vendor/more_itertools/tests/test_recipes.py new file mode 100644 index 0000000000..b3cfb62f46 --- /dev/null +++ b/pipenv/vendor/more_itertools/tests/test_recipes.py @@ -0,0 +1,616 @@ +from doctest import DocTestSuite +from unittest import TestCase + +from itertools import combinations +from six.moves import range + +import more_itertools as mi + + +def load_tests(loader, tests, ignore): + # Add the doctests + tests.addTests(DocTestSuite('more_itertools.recipes')) + return tests + + +class AccumulateTests(TestCase): + """Tests for ``accumulate()``""" + + def test_empty(self): + """Test that an empty input returns an empty output""" + self.assertEqual(list(mi.accumulate([])), []) + + def test_default(self): + """Test accumulate with the default function (addition)""" + self.assertEqual(list(mi.accumulate([1, 2, 3])), [1, 3, 6]) + + def test_bogus_function(self): + """Test accumulate with an invalid function""" + with self.assertRaises(TypeError): + list(mi.accumulate([1, 2, 3], func=lambda x: x)) + + def test_custom_function(self): + """Test accumulate with a custom function""" + self.assertEqual( + list(mi.accumulate((1, 2, 3, 2, 1), func=max)), [1, 2, 3, 3, 3] + ) + + +class TakeTests(TestCase): + """Tests for ``take()``""" + + def test_simple_take(self): + """Test basic usage""" + t = mi.take(5, range(10)) + self.assertEqual(t, [0, 1, 2, 3, 4]) + + def test_null_take(self): + """Check the null case""" + t = mi.take(0, range(10)) + self.assertEqual(t, []) + + def test_negative_take(self): + """Make sure taking negative items results in a ValueError""" + self.assertRaises(ValueError, lambda: mi.take(-3, range(10))) + + def test_take_too_much(self): + """Taking more than an iterator has remaining should return what the + iterator has remaining. + + """ + t = mi.take(10, range(5)) + self.assertEqual(t, [0, 1, 2, 3, 4]) + + +class TabulateTests(TestCase): + """Tests for ``tabulate()``""" + + def test_simple_tabulate(self): + """Test the happy path""" + t = mi.tabulate(lambda x: x) + f = tuple([next(t) for _ in range(3)]) + self.assertEqual(f, (0, 1, 2)) + + def test_count(self): + """Ensure tabulate accepts specific count""" + t = mi.tabulate(lambda x: 2 * x, -1) + f = (next(t), next(t), next(t)) + self.assertEqual(f, (-2, 0, 2)) + + +class TailTests(TestCase): + """Tests for ``tail()``""" + + def test_greater(self): + """Length of iterable is greater than requested tail""" + self.assertEqual(list(mi.tail(3, 'ABCDEFG')), ['E', 'F', 'G']) + + def test_equal(self): + """Length of iterable is equal to the requested tail""" + self.assertEqual( + list(mi.tail(7, 'ABCDEFG')), ['A', 'B', 'C', 'D', 'E', 'F', 'G'] + ) + + def test_less(self): + """Length of iterable is less than requested tail""" + self.assertEqual( + list(mi.tail(8, 'ABCDEFG')), ['A', 'B', 'C', 'D', 'E', 'F', 'G'] + ) + + +class ConsumeTests(TestCase): + """Tests for ``consume()``""" + + def test_sanity(self): + """Test basic functionality""" + r = (x for x in range(10)) + mi.consume(r, 3) + self.assertEqual(3, next(r)) + + def test_null_consume(self): + """Check the null case""" + r = (x for x in range(10)) + mi.consume(r, 0) + self.assertEqual(0, next(r)) + + def test_negative_consume(self): + """Check that negative consumsion throws an error""" + r = (x for x in range(10)) + self.assertRaises(ValueError, lambda: mi.consume(r, -1)) + + def test_total_consume(self): + """Check that iterator is totally consumed by default""" + r = (x for x in range(10)) + mi.consume(r) + self.assertRaises(StopIteration, lambda: next(r)) + + +class NthTests(TestCase): + """Tests for ``nth()``""" + + def test_basic(self): + """Make sure the nth item is returned""" + l = range(10) + for i, v in enumerate(l): + self.assertEqual(mi.nth(l, i), v) + + def test_default(self): + """Ensure a default value is returned when nth item not found""" + l = range(3) + self.assertEqual(mi.nth(l, 100, "zebra"), "zebra") + + def test_negative_item_raises(self): + """Ensure asking for a negative item raises an exception""" + self.assertRaises(ValueError, lambda: mi.nth(range(10), -3)) + + +class AllEqualTests(TestCase): + """Tests for ``all_equal()``""" + + def test_true(self): + """Everything is equal""" + self.assertTrue(mi.all_equal('aaaaaa')) + self.assertTrue(mi.all_equal([0, 0, 0, 0])) + + def test_false(self): + """Not everything is equal""" + self.assertFalse(mi.all_equal('aaaaab')) + self.assertFalse(mi.all_equal([0, 0, 0, 1])) + + def test_tricky(self): + """Not everything is identical, but everything is equal""" + items = [1, complex(1, 0), 1.0] + self.assertTrue(mi.all_equal(items)) + + def test_empty(self): + """Return True if the iterable is empty""" + self.assertTrue(mi.all_equal('')) + self.assertTrue(mi.all_equal([])) + + def test_one(self): + """Return True if the iterable is singular""" + self.assertTrue(mi.all_equal('0')) + self.assertTrue(mi.all_equal([0])) + + +class QuantifyTests(TestCase): + """Tests for ``quantify()``""" + + def test_happy_path(self): + """Make sure True count is returned""" + q = [True, False, True] + self.assertEqual(mi.quantify(q), 2) + + def test_custom_predicate(self): + """Ensure non-default predicates return as expected""" + q = range(10) + self.assertEqual(mi.quantify(q, lambda x: x % 2 == 0), 5) + + +class PadnoneTests(TestCase): + """Tests for ``padnone()``""" + + def test_happy_path(self): + """wrapper iterator should return None indefinitely""" + r = range(2) + p = mi.padnone(r) + self.assertEqual([0, 1, None, None], [next(p) for _ in range(4)]) + + +class NcyclesTests(TestCase): + """Tests for ``nyclces()``""" + + def test_happy_path(self): + """cycle a sequence three times""" + r = ["a", "b", "c"] + n = mi.ncycles(r, 3) + self.assertEqual( + ["a", "b", "c", "a", "b", "c", "a", "b", "c"], + list(n) + ) + + def test_null_case(self): + """asking for 0 cycles should return an empty iterator""" + n = mi.ncycles(range(100), 0) + self.assertRaises(StopIteration, lambda: next(n)) + + def test_pathalogical_case(self): + """asking for negative cycles should return an empty iterator""" + n = mi.ncycles(range(100), -10) + self.assertRaises(StopIteration, lambda: next(n)) + + +class DotproductTests(TestCase): + """Tests for ``dotproduct()``'""" + + def test_happy_path(self): + """simple dotproduct example""" + self.assertEqual(400, mi.dotproduct([10, 10], [20, 20])) + + +class FlattenTests(TestCase): + """Tests for ``flatten()``""" + + def test_basic_usage(self): + """ensure list of lists is flattened one level""" + f = [[0, 1, 2], [3, 4, 5]] + self.assertEqual(list(range(6)), list(mi.flatten(f))) + + def test_single_level(self): + """ensure list of lists is flattened only one level""" + f = [[0, [1, 2]], [[3, 4], 5]] + self.assertEqual([0, [1, 2], [3, 4], 5], list(mi.flatten(f))) + + +class RepeatfuncTests(TestCase): + """Tests for ``repeatfunc()``""" + + def test_simple_repeat(self): + """test simple repeated functions""" + r = mi.repeatfunc(lambda: 5) + self.assertEqual([5, 5, 5, 5, 5], [next(r) for _ in range(5)]) + + def test_finite_repeat(self): + """ensure limited repeat when times is provided""" + r = mi.repeatfunc(lambda: 5, times=5) + self.assertEqual([5, 5, 5, 5, 5], list(r)) + + def test_added_arguments(self): + """ensure arguments are applied to the function""" + r = mi.repeatfunc(lambda x: x, 2, 3) + self.assertEqual([3, 3], list(r)) + + def test_null_times(self): + """repeat 0 should return an empty iterator""" + r = mi.repeatfunc(range, 0, 3) + self.assertRaises(StopIteration, lambda: next(r)) + + +class PairwiseTests(TestCase): + """Tests for ``pairwise()``""" + + def test_base_case(self): + """ensure an iterable will return pairwise""" + p = mi.pairwise([1, 2, 3]) + self.assertEqual([(1, 2), (2, 3)], list(p)) + + def test_short_case(self): + """ensure an empty iterator if there's not enough values to pair""" + p = mi.pairwise("a") + self.assertRaises(StopIteration, lambda: next(p)) + + +class GrouperTests(TestCase): + """Tests for ``grouper()``""" + + def test_even(self): + """Test when group size divides evenly into the length of + the iterable. + + """ + self.assertEqual( + list(mi.grouper(3, 'ABCDEF')), [('A', 'B', 'C'), ('D', 'E', 'F')] + ) + + def test_odd(self): + """Test when group size does not divide evenly into the length of the + iterable. + + """ + self.assertEqual( + list(mi.grouper(3, 'ABCDE')), [('A', 'B', 'C'), ('D', 'E', None)] + ) + + def test_fill_value(self): + """Test that the fill value is used to pad the final group""" + self.assertEqual( + list(mi.grouper(3, 'ABCDE', 'x')), + [('A', 'B', 'C'), ('D', 'E', 'x')] + ) + + +class RoundrobinTests(TestCase): + """Tests for ``roundrobin()``""" + + def test_even_groups(self): + """Ensure ordered output from evenly populated iterables""" + self.assertEqual( + list(mi.roundrobin('ABC', [1, 2, 3], range(3))), + ['A', 1, 0, 'B', 2, 1, 'C', 3, 2] + ) + + def test_uneven_groups(self): + """Ensure ordered output from unevenly populated iterables""" + self.assertEqual( + list(mi.roundrobin('ABCD', [1, 2], range(0))), + ['A', 1, 'B', 2, 'C', 'D'] + ) + + +class PartitionTests(TestCase): + """Tests for ``partition()``""" + + def test_bool(self): + """Test when pred() returns a boolean""" + lesser, greater = mi.partition(lambda x: x > 5, range(10)) + self.assertEqual(list(lesser), [0, 1, 2, 3, 4, 5]) + self.assertEqual(list(greater), [6, 7, 8, 9]) + + def test_arbitrary(self): + """Test when pred() returns an integer""" + divisibles, remainders = mi.partition(lambda x: x % 3, range(10)) + self.assertEqual(list(divisibles), [0, 3, 6, 9]) + self.assertEqual(list(remainders), [1, 2, 4, 5, 7, 8]) + + +class PowersetTests(TestCase): + """Tests for ``powerset()``""" + + def test_combinatorics(self): + """Ensure a proper enumeration""" + p = mi.powerset([1, 2, 3]) + self.assertEqual( + list(p), + [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)] + ) + + +class UniqueEverseenTests(TestCase): + """Tests for ``unique_everseen()``""" + + def test_everseen(self): + """ensure duplicate elements are ignored""" + u = mi.unique_everseen('AAAABBBBCCDAABBB') + self.assertEqual( + ['A', 'B', 'C', 'D'], + list(u) + ) + + def test_custom_key(self): + """ensure the custom key comparison works""" + u = mi.unique_everseen('aAbACCc', key=str.lower) + self.assertEqual(list('abC'), list(u)) + + def test_unhashable(self): + """ensure things work for unhashable items""" + iterable = ['a', [1, 2, 3], [1, 2, 3], 'a'] + u = mi.unique_everseen(iterable) + self.assertEqual(list(u), ['a', [1, 2, 3]]) + + def test_unhashable_key(self): + """ensure things work for unhashable items with a custom key""" + iterable = ['a', [1, 2, 3], [1, 2, 3], 'a'] + u = mi.unique_everseen(iterable, key=lambda x: x) + self.assertEqual(list(u), ['a', [1, 2, 3]]) + + +class UniqueJustseenTests(TestCase): + """Tests for ``unique_justseen()``""" + + def test_justseen(self): + """ensure only last item is remembered""" + u = mi.unique_justseen('AAAABBBCCDABB') + self.assertEqual(list('ABCDAB'), list(u)) + + def test_custom_key(self): + """ensure the custom key comparison works""" + u = mi.unique_justseen('AABCcAD', str.lower) + self.assertEqual(list('ABCAD'), list(u)) + + +class IterExceptTests(TestCase): + """Tests for ``iter_except()``""" + + def test_exact_exception(self): + """ensure the exact specified exception is caught""" + l = [1, 2, 3] + i = mi.iter_except(l.pop, IndexError) + self.assertEqual(list(i), [3, 2, 1]) + + def test_generic_exception(self): + """ensure the generic exception can be caught""" + l = [1, 2] + i = mi.iter_except(l.pop, Exception) + self.assertEqual(list(i), [2, 1]) + + def test_uncaught_exception_is_raised(self): + """ensure a non-specified exception is raised""" + l = [1, 2, 3] + i = mi.iter_except(l.pop, KeyError) + self.assertRaises(IndexError, lambda: list(i)) + + def test_first(self): + """ensure first is run before the function""" + l = [1, 2, 3] + f = lambda: 25 + i = mi.iter_except(l.pop, IndexError, f) + self.assertEqual(list(i), [25, 3, 2, 1]) + + +class FirstTrueTests(TestCase): + """Tests for ``first_true()``""" + + def test_something_true(self): + """Test with no keywords""" + self.assertEqual(mi.first_true(range(10)), 1) + + def test_nothing_true(self): + """Test default return value.""" + self.assertIsNone(mi.first_true([0, 0, 0])) + + def test_default(self): + """Test with a default keyword""" + self.assertEqual(mi.first_true([0, 0, 0], default='!'), '!') + + def test_pred(self): + """Test with a custom predicate""" + self.assertEqual( + mi.first_true([2, 4, 6], pred=lambda x: x % 3 == 0), 6 + ) + + +class RandomProductTests(TestCase): + """Tests for ``random_product()`` + + Since random.choice() has different results with the same seed across + python versions 2.x and 3.x, these tests use highly probably events to + create predictable outcomes across platforms. + """ + + def test_simple_lists(self): + """Ensure that one item is chosen from each list in each pair. + Also ensure that each item from each list eventually appears in + the chosen combinations. + + Odds are roughly 1 in 7.1 * 10e16 that one item from either list will + not be chosen after 100 samplings of one item from each list. Just to + be safe, better use a known random seed, too. + + """ + nums = [1, 2, 3] + lets = ['a', 'b', 'c'] + n, m = zip(*[mi.random_product(nums, lets) for _ in range(100)]) + n, m = set(n), set(m) + self.assertEqual(n, set(nums)) + self.assertEqual(m, set(lets)) + self.assertEqual(len(n), len(nums)) + self.assertEqual(len(m), len(lets)) + + def test_list_with_repeat(self): + """ensure multiple items are chosen, and that they appear to be chosen + from one list then the next, in proper order. + + """ + nums = [1, 2, 3] + lets = ['a', 'b', 'c'] + r = list(mi.random_product(nums, lets, repeat=100)) + self.assertEqual(2 * 100, len(r)) + n, m = set(r[::2]), set(r[1::2]) + self.assertEqual(n, set(nums)) + self.assertEqual(m, set(lets)) + self.assertEqual(len(n), len(nums)) + self.assertEqual(len(m), len(lets)) + + +class RandomPermutationTests(TestCase): + """Tests for ``random_permutation()``""" + + def test_full_permutation(self): + """ensure every item from the iterable is returned in a new ordering + + 15 elements have a 1 in 1.3 * 10e12 of appearing in sorted order, so + we fix a seed value just to be sure. + + """ + i = range(15) + r = mi.random_permutation(i) + self.assertEqual(set(i), set(r)) + if i == r: + raise AssertionError("Values were not permuted") + + def test_partial_permutation(self): + """ensure all returned items are from the iterable, that the returned + permutation is of the desired length, and that all items eventually + get returned. + + Sampling 100 permutations of length 5 from a set of 15 leaves a + (2/3)^100 chance that an item will not be chosen. Multiplied by 15 + items, there is a 1 in 2.6e16 chance that at least 1 item will not + show up in the resulting output. Using a random seed will fix that. + + """ + items = range(15) + item_set = set(items) + all_items = set() + for _ in range(100): + permutation = mi.random_permutation(items, 5) + self.assertEqual(len(permutation), 5) + permutation_set = set(permutation) + self.assertLessEqual(permutation_set, item_set) + all_items |= permutation_set + self.assertEqual(all_items, item_set) + + +class RandomCombinationTests(TestCase): + """Tests for ``random_combination()``""" + + def test_pseudorandomness(self): + """ensure different subsets of the iterable get returned over many + samplings of random combinations""" + items = range(15) + all_items = set() + for _ in range(50): + combination = mi.random_combination(items, 5) + all_items |= set(combination) + self.assertEqual(all_items, set(items)) + + def test_no_replacement(self): + """ensure that elements are sampled without replacement""" + items = range(15) + for _ in range(50): + combination = mi.random_combination(items, len(items)) + self.assertEqual(len(combination), len(set(combination))) + self.assertRaises( + ValueError, lambda: mi.random_combination(items, len(items) + 1) + ) + + +class RandomCombinationWithReplacementTests(TestCase): + """Tests for ``random_combination_with_replacement()``""" + + def test_replacement(self): + """ensure that elements are sampled with replacement""" + items = range(5) + combo = mi.random_combination_with_replacement(items, len(items) * 2) + self.assertEqual(2 * len(items), len(combo)) + if len(set(combo)) == len(combo): + raise AssertionError("Combination contained no duplicates") + + def test_pseudorandomness(self): + """ensure different subsets of the iterable get returned over many + samplings of random combinations""" + items = range(15) + all_items = set() + for _ in range(50): + combination = mi.random_combination_with_replacement(items, 5) + all_items |= set(combination) + self.assertEqual(all_items, set(items)) + + +class NthCombinationTests(TestCase): + def test_basic(self): + iterable = 'abcdefg' + r = 4 + for index, expected in enumerate(combinations(iterable, r)): + actual = mi.nth_combination(iterable, r, index) + self.assertEqual(actual, expected) + + def test_long(self): + actual = mi.nth_combination(range(180), 4, 2000000) + expected = (2, 12, 35, 126) + self.assertEqual(actual, expected) + + def test_invalid_r(self): + for r in (-1, 3): + with self.assertRaises(ValueError): + mi.nth_combination([], r, 0) + + def test_invalid_index(self): + with self.assertRaises(IndexError): + mi.nth_combination('abcdefg', 3, -36) + + +class PrependTests(TestCase): + def test_basic(self): + value = 'a' + iterator = iter('bcdefg') + actual = list(mi.prepend(value, iterator)) + expected = list('abcdefg') + self.assertEqual(actual, expected) + + def test_multiple(self): + value = 'ab' + iterator = iter('cdefg') + actual = tuple(mi.prepend(value, iterator)) + expected = ('ab',) + tuple('cdefg') + self.assertEqual(actual, expected) diff --git a/pipenv/vendor/requirementslib/models/dependencies.py b/pipenv/vendor/requirementslib/models/dependencies.py index 2b42df8954..b8af28e905 100644 --- a/pipenv/vendor/requirementslib/models/dependencies.py +++ b/pipenv/vendor/requirementslib/models/dependencies.py @@ -11,7 +11,6 @@ import packaging.version import pip_shims.shims import requests -from first import first from packaging.utils import canonicalize_name from vistir.compat import JSONDecodeError, fs_str from vistir.contextmanagers import cd, temp_environ @@ -20,6 +19,7 @@ from ..environment import MYPY_RUNNING from ..utils import _ensure_dir, prepare_pip_source_args from .cache import CACHE_DIR, DependencyCache +from .setup_info import SetupInfo from .utils import ( clean_requires_python, fix_requires_python_marker, @@ -139,9 +139,9 @@ def compatible_versions(self, other): :rtype: set(str) """ - if len(self.candidates) == 1 and first(self.candidates).editable: + if len(self.candidates) == 1 and next(iter(self.candidates)).editable: return self - elif len(other.candidates) == 1 and first(other.candidates).editable: + elif len(other.candidates) == 1 and next(iter(other.candidates)).editable: return other return self.version_set & other.version_set @@ -158,9 +158,9 @@ def compatible_abstract_dep(self, other): from .requirements import Requirement - if len(self.candidates) == 1 and first(self.candidates).editable: + if len(self.candidates) == 1 and next(iter(self.candidates)).editable: return self - elif len(other.candidates) == 1 and first(other.candidates).editable: + elif len(other.candidates) == 1 and next(iter(other.candidates)).editable: return other new_specifiers = self.specifiers & other.specifiers markers = set(self.markers) if self.markers else set() @@ -475,90 +475,19 @@ def get_dependencies_from_index(dep, sources=None, pip_options=None, wheel_cache if not wheel_cache: wheel_cache = WHEEL_CACHE dep.is_direct = True - reqset = pip_shims.shims.RequirementSet() - reqset.add_requirement(dep) requirements = None setup_requires = {} - with temp_environ(), start_resolver( - finder=finder, session=session, wheel_cache=wheel_cache - ) as resolver: + with temp_environ(): os.environ["PIP_EXISTS_ACTION"] = "i" - dist = None if dep.editable and not dep.prepared and not dep.req: - with cd(dep.setup_py_dir): - from setuptools.dist import distutils - - try: - dist = distutils.core.run_setup(dep.setup_py) - except (ImportError, TypeError, AttributeError): - dist = None - else: - setup_requires[dist.get_name()] = dist.setup_requires - if not dist: - try: - dist = dep.get_dist() - except (TypeError, ValueError, AttributeError): - pass - else: - setup_requires[dist.get_name()] = dist.setup_requires - resolver.require_hashes = False - try: - results = resolver._resolve_one(reqset, dep) - except Exception: - # FIXME: Needs to bubble the exception somehow to the user. - results = [] - finally: - try: - wheel_cache.cleanup() - except AttributeError: - pass - resolver_requires_python = getattr(resolver, "requires_python", None) - requires_python = getattr(reqset, "requires_python", resolver_requires_python) - if requires_python: - add_marker = fix_requires_python_marker(requires_python) - reqset.remove(dep) - if dep.req.marker: - dep.req.marker._markers.extend(["and"].extend(add_marker._markers)) - else: - dep.req.marker = add_marker - reqset.add(dep) - requirements = set() - for r in results: - if requires_python: - if r.req.marker: - r.req.marker._markers.extend(["and"].extend(add_marker._markers)) - else: - r.req.marker = add_marker - requirements.add(format_requirement(r)) - for section in setup_requires: - python_version = section - not_python = not is_python(section) - - # This is for cleaning up :extras: formatted markers - # by adding them to the results of the resolver - # since any such extra would have been returned as a result anyway - for value in setup_requires[section]: - - # This is a marker. - if is_python(section): - python_version = value[1:-1] - else: - not_python = True - - if ":" not in value and not_python: - try: - requirement_str = "{0}{1}".format(value, python_version).replace( - ":", ";" - ) - requirements.add( - format_requirement( - make_install_requirement(requirement_str).ireq - ) - ) - # Anything could go wrong here -- can't be too careful. - except Exception: - pass - + setup_info = SetupInfo.from_ireq(dep) + results = setup_info.get_info() + setup_requires.update(results["setup_requires"]) + requirements = set(results["requires"].values()) + else: + results = pip_shims.shims.resolve(dep) + requirements = [v for v in results.values() if v.name != dep.name] + requirements = set([format_requirement(r) for r in requirements]) if not dep.editable and is_pinned_requirement(dep) and requirements is not None: DEPENDENCY_CACHE[dep] = list(requirements) return requirements @@ -685,10 +614,10 @@ def get_grouped_dependencies(constraints): # then we take the loose match (which _is_ flexible) and start moving backwards in # versions by popping them off of a stack and checking for the conflicting package for _, ireqs in full_groupby(constraints, key=key_from_ireq): - ireqs = list(ireqs) - editable_ireq = first(ireqs, key=lambda ireq: ireq.editable) + ireqs = sorted(ireqs, key=lambda ireq: ireq.editable) + editable_ireq = next(iter(ireq for ireq in ireqs if ireq.editable), None) if editable_ireq: - yield editable_ireq # ignore all the other specs: the editable one is the one that counts + yield editable_ireq # only the editable match mattters, ignore all others continue ireqs = iter(ireqs) # deepcopy the accumulator so as to not modify the self.our_constraints invariant diff --git a/pipenv/vendor/requirementslib/models/markers.py b/pipenv/vendor/requirementslib/models/markers.py index c7649b77d6..6e46b518ec 100644 --- a/pipenv/vendor/requirementslib/models/markers.py +++ b/pipenv/vendor/requirementslib/models/markers.py @@ -11,9 +11,9 @@ from vistir.compat import Mapping, Set, lru_cache from vistir.misc import dedup -from .utils import filter_none, validate_markers from ..environment import MYPY_RUNNING from ..exceptions import RequirementError +from .utils import filter_none, validate_markers from six.moves import reduce # isort:skip @@ -24,7 +24,8 @@ STRING_TYPE = Union[str, bytes, Text] -MAX_VERSIONS = {2: 7, 3: 10} +MAX_VERSIONS = {2: 7, 3: 11, 4: 0} +DEPRECATED_VERSIONS = ["3.0", "3.1", "3.2", "3.3"] def is_instance(item, cls): @@ -147,9 +148,8 @@ def _format_pyspec(specifier): version = getattr(specifier, "version", specifier).rstrip() if version and version.endswith("*"): if version.endswith(".*"): - version = version.rstrip(".*") - else: - version = version.rstrip("*") + version = version[:-2] + version = version.rstrip("*") specifier = Specifier("{0}{1}".format(specifier.operator, version)) try: op = REPLACE_RANGES[specifier.operator] @@ -196,6 +196,7 @@ def _get_specs(specset): return sorted(result, key=operator.itemgetter(1)) +# TODO: Rename this to something meaningful def _group_by_op(specs): # type: (Union[Set[Specifier], SpecifierSet]) -> Iterator specs = [_get_specs(x) for x in list(specs)] @@ -205,6 +206,7 @@ def _group_by_op(specs): return grouping +# TODO: rename this to something meaningful def normalize_specifier_set(specs): # type: (Union[str, SpecifierSet]) -> Optional[Set[Specifier]] """Given a specifier set, a string, or an iterable, normalize the specifiers @@ -227,14 +229,16 @@ def normalize_specifier_set(specs): return {_format_pyspec(spec) for spec in specs} spec_list = [] for spec in specs.split(","): + spec = spec.strip() if spec.endswith(".*"): - spec = spec.rstrip(".*") - elif spec.endswith("*"): - spec = spec.rstrip("*") + spec = spec[:-2] + spec = spec.rstrip("*") spec_list.append(spec) return normalize_specifier_set(SpecifierSet(",".join(spec_list))) +# TODO: Check if this is used by anything public otherwise make it private +# And rename it to something meaningful def get_sorted_version_string(version_set): # type: (Set[AnyStr]) -> AnyStr version_list = sorted( @@ -244,6 +248,9 @@ def get_sorted_version_string(version_set): return version +# TODO: Rename this to something meaningful +# TODO: Add a deprecation decorator and deprecate this -- i'm sure it's used +# in other libraries @lru_cache(maxsize=1024) def cleanup_pyspecs(specs, joiner="or"): specs = normalize_specifier_set(specs) @@ -288,6 +295,7 @@ def cleanup_pyspecs(specs, joiner="or"): return sorted([(k, v) for k, v in results.items()], key=operator.itemgetter(1)) +# TODO: Rename this to something meaningful @lru_cache(maxsize=1024) def fix_version_tuple(version_tuple): # type: (Tuple[AnyStr, AnyStr]) -> Tuple[AnyStr, AnyStr] @@ -302,6 +310,7 @@ def fix_version_tuple(version_tuple): return (op, version) +# TODO: Rename this to something meaningful, deprecate it (See prior function) @lru_cache(maxsize=128) def get_versions(specset, group_by_operator=True): # type: (Union[Set[Specifier], SpecifierSet], bool) -> List[Tuple[STRING_TYPE, STRING_TYPE]] @@ -528,39 +537,69 @@ def contains_pyversion(marker): return _markers_contains_pyversion(marker._markers) +def _split_specifierset_str(specset_str, prefix="=="): + # type: (str, str) -> Set[Specifier] + """ + Take a specifierset string and split it into a list to join for specifier sets + + :param str specset_str: A string containing python versions, often comma separated + :param str prefix: A prefix to use when generating the specifier set + :return: A list of :class:`Specifier` instances generated with the provided prefix + :rtype: Set[Specifier] + """ + specifiers = set() + if "," not in specset_str and " " in specset_str: + values = [v.strip() for v in specset_str.split()] + else: + values = [v.strip() for v in specset_str.split(",")] + if prefix == "!=" and any(v in values for v in DEPRECATED_VERSIONS): + values = DEPRECATED_VERSIONS[:] + for value in sorted(values): + specifiers.add(Specifier("{0}{1}".format(prefix, value))) + return specifiers + + +def _get_specifiers_from_markers(marker_item): + """ + Given a marker item, get specifiers from the version marker + + :param :class:`~packaging.markers.Marker` marker_sequence: A marker describing a version constraint + :return: A set of specifiers corresponding to the marker constraint + :rtype: Set[Specifier] + """ + specifiers = set() + if isinstance(marker_item, tuple): + variable, op, value = marker_item + if variable.value != "python_version": + return specifiers + if op.value == "in": + specifiers.update(_split_specifierset_str(value.value, prefix="==")) + elif op.value == "not in": + specifiers.update(_split_specifierset_str(value.value, prefix="!=")) + else: + specifiers.add(Specifier("{0}{1}".format(op.value, value.value))) + elif isinstance(marker_item, list): + parts = get_specset(marker_item) + if parts: + specifiers.update(parts) + return specifiers + + def get_specset(marker_list): # type: (List) -> Optional[SpecifierSet] specset = set() _last_str = "and" for marker_parts in marker_list: - if isinstance(marker_parts, tuple): - variable, op, value = marker_parts - if variable.value != "python_version": - continue - if op.value == "in": - values = [v.strip() for v in value.value.split(",")] - specset.update(Specifier("=={0}".format(v)) for v in values) - elif op.value == "not in": - values = [v.strip() for v in value.value.split(",")] - bad_versions = ["3.0", "3.1", "3.2", "3.3"] - if len(values) >= 2 and any(v in values for v in bad_versions): - values = bad_versions - specset.update( - Specifier("!={0}".format(v.strip())) for v in sorted(bad_versions) - ) - else: - specset.add(Specifier("{0}{1}".format(op.value, value.value))) - elif isinstance(marker_parts, list): - parts = get_specset(marker_parts) - if parts: - specset.update(parts) - elif isinstance(marker_parts, str): - _last_str = marker_parts + if isinstance(marker_parts, str): + _last_str = marker_parts # noqa + else: + specset.update(_get_specifiers_from_markers(marker_parts)) specifiers = SpecifierSet() specifiers._specs = frozenset(specset) return specifiers +# TODO: Refactor this (reduce complexity) def parse_marker_dict(marker_dict): op = marker_dict["op"] lhs = marker_dict["lhs"] @@ -670,3 +709,16 @@ def marker_from_specifier(spec): marker_segments.append(format_pyversion(marker_segment)) marker_str = " and ".join(marker_segments).replace('"', "'") return Marker(marker_str) + + +def merge_markers(m1, m2): + # type: (Marker, Marker) -> Optional[Marker] + if not all((m1, m2)): + return next(iter(v for v in (m1, m2) if v), None) + m1 = _ensure_marker(m1) + m2 = _ensure_marker(m2) + _markers = [] # type: List[Marker] + for marker in (m1, m2): + _markers.append(str(marker)) + marker_str = " and ".join([normalize_marker_str(m) for m in _markers if m]) + return _ensure_marker(normalize_marker_str(marker_str)) diff --git a/pipenv/vendor/requirementslib/models/requirements.py b/pipenv/vendor/requirementslib/models/requirements.py index 0546e9ed57..0537ca08db 100644 --- a/pipenv/vendor/requirementslib/models/requirements.py +++ b/pipenv/vendor/requirementslib/models/requirements.py @@ -15,7 +15,6 @@ import six import vistir from cached_property import cached_property -from first import first from packaging.markers import Marker from packaging.requirements import Requirement as PackagingRequirement from packaging.specifiers import ( @@ -793,27 +792,25 @@ def wheel_kwargs(self): def get_setup_info(self): # type: () -> SetupInfo - setup_info = SetupInfo.from_ireq(self.ireq) - if not setup_info.name: - setup_info.get_info() + setup_info = None + with pip_shims.shims.global_tempdir_manager(): + setup_info = SetupInfo.from_ireq(self.ireq) + if not setup_info.name: + setup_info.get_info() return setup_info @property def setup_info(self): # type: () -> Optional[SetupInfo] - if self._setup_info is None and not self.is_named and not self.is_wheel: - if self._setup_info: - if not self._setup_info.name: - self._setup_info.get_info() - else: - # make two attempts at this before failing to allow for stale data + if not self._setup_info and not self.is_named and not self.is_wheel: + # make two attempts at this before failing to allow for stale data + try: + self.setup_info = self.get_setup_info() + except FileNotFoundError: try: self.setup_info = self.get_setup_info() except FileNotFoundError: - try: - self.setup_info = self.get_setup_info() - except FileNotFoundError: - raise + raise return self._setup_info @setup_info.setter @@ -863,12 +860,16 @@ def metadata(self): @cached_property def parsed_setup_cfg(self): # type: () -> Dict[Any, Any] - if self.is_local and self.path and is_installable_dir(self.path): - setup_content = read_source(self.setup_cfg) - base_dir = os.path.dirname(os.path.abspath(self.setup_cfg)) - if self.setup_cfg: - return parse_setup_cfg(setup_content, base_dir) - return {} + if not ( + self.is_local + and self.path + and is_installable_dir(self.path) + and self.setup_cfg + ): + return {} + base_dir = os.path.dirname(os.path.abspath(self.setup_cfg)) + setup_content = read_source(self.setup_cfg) + return parse_setup_cfg(setup_content, base_dir) @cached_property def parsed_setup_py(self): @@ -886,7 +887,7 @@ def vcsrepo(self, repo): wheel_kwargs = self.wheel_kwargs.copy() wheel_kwargs["src_dir"] = repo.checkout_directory ireq.ensure_has_source_dir(wheel_kwargs["src_dir"]) - with temp_path(): + with pip_shims.shims.global_tempdir_manager(), temp_path(): sys.path = [repo.checkout_directory, "", ".", get_python_lib(plat_specific=0)] setupinfo = SetupInfo.create( repo.checkout_directory, @@ -1061,7 +1062,7 @@ def parse_requirement(self): # type: () -> "Line" if self._name is None: self.parse_name() - if not self._name and not self.is_vcs and not self.is_named: + if not any([self._name, self.is_vcs, self.is_named]): if self.setup_info and self.setup_info.name: self._name = self.setup_info.name name, extras, url = self.requirement_info @@ -1558,16 +1559,18 @@ def setup_info(self): self._parsed_line._setup_info and not self._parsed_line._setup_info.name ): - self._parsed_line._setup_info.get_info() + with pip_shims.shims.global_tempdir_manager(): + self._parsed_line._setup_info.get_info() self._setup_info = self.parsed_line._setup_info elif self.parsed_line and ( self.parsed_line.ireq and not self.parsed_line.is_wheel ): - self._setup_info = SetupInfo.from_ireq(self.parsed_line.ireq) + with pip_shims.shims.global_tempdir_manager(): + self._setup_info = SetupInfo.from_ireq(self.parsed_line.ireq) else: if self.link and not self.link.is_wheel: self._setup_info = Line(self.line_part).setup_info - if self._setup_info: + with pip_shims.shims.global_tempdir_manager(): self._setup_info.get_info() return self._setup_info @@ -1954,20 +1957,23 @@ def vcs_uri(self): def setup_info(self): if self._parsed_line and self._parsed_line.setup_info: if not self._parsed_line.setup_info.name: - self._parsed_line._setup_info.get_info() + with pip_shims.shims.global_tempdir_manager(): + self._parsed_line._setup_info.get_info() return self._parsed_line.setup_info if self._repo: from .setup_info import SetupInfo - self._setup_info = SetupInfo.from_ireq( - Line(self._repo.checkout_directory).ireq - ) - self._setup_info.get_info() + with pip_shims.shims.global_tempdir_manager(): + self._setup_info = SetupInfo.from_ireq( + Line(self._repo.checkout_directory).ireq + ) + self._setup_info.get_info() return self._setup_info ireq = self.parsed_line.ireq from .setup_info import SetupInfo - self._setup_info = SetupInfo.from_ireq(ireq) + with pip_shims.shims.global_tempdir_manager(): + self._setup_info = SetupInfo.from_ireq(ireq) return self._setup_info @setup_info.setter @@ -2271,7 +2277,7 @@ def _choose_vcs_source(pipfile): alt_type = "" # type: Optional[STRING_TYPE] vcs_value = "" # type: STRING_TYPE if src_keys: - chosen_key = first(src_keys) + chosen_key = next(iter(src_keys)) vcs_type = pipfile.pop("vcs") if chosen_key in pipfile: vcs_value = pipfile[chosen_key] @@ -2561,7 +2567,8 @@ def build_backend(self): if self.req is not None and ( not isinstance(self.req, NamedRequirement) and self.req.is_local ): - setup_info = self.run_requires() + with pip_shims.shims.global_tempdir_manager(): + setup_info = self.run_requires() build_backend = setup_info.get("build_backend") return build_backend return "setuptools.build_meta" @@ -2673,7 +2680,7 @@ def from_pipfile(cls, name, pipfile): if hasattr(pipfile, "keys"): _pipfile = dict(pipfile).copy() _pipfile["version"] = get_version(pipfile) - vcs = first([vcs for vcs in VCS_LIST if vcs in _pipfile]) + vcs = next(iter([vcs for vcs in VCS_LIST if vcs in _pipfile]), None) if vcs: _pipfile["vcs"] = vcs r = VCSRequirement.from_pipfile(name, pipfile) @@ -2955,10 +2962,11 @@ def run_requires(self, sources=None, finder=None): from .dependencies import get_finder finder = get_finder(sources=sources) - info = SetupInfo.from_requirement(self, finder=finder) - if info is None: - return {} - info_dict = info.get_info() + with pip_shims.shims.global_tempdir_manager(): + info = SetupInfo.from_requirement(self, finder=finder) + if info is None: + return {} + info_dict = info.get_info() if self.req and not self.req.setup_info: self.req._setup_info = info if self.req._has_hashed_name and info_dict.get("name"): diff --git a/pipenv/vendor/requirementslib/models/setup_info.py b/pipenv/vendor/requirementslib/models/setup_info.py index 10bf3a18b4..91f28615af 100644 --- a/pipenv/vendor/requirementslib/models/setup_info.py +++ b/pipenv/vendor/requirementslib/models/setup_info.py @@ -5,12 +5,15 @@ import atexit import contextlib import importlib +import io +import operator import os import shutil import sys from functools import partial import attr +import chardet import packaging.specifiers import packaging.utils import packaging.version @@ -46,6 +49,10 @@ import distutils from distutils.core import Distribution +try: + from contextlib import ExitStack +except ImportError: + from contextlib2 import ExitStack try: from os import scandir @@ -294,7 +301,11 @@ def parse_setup_cfg(setup_cfg_contents, base_dir): }, } parser = configparser.ConfigParser(default_opts) - parser.read_string(setup_cfg_contents) + if six.PY2: + buff = io.BytesIO(setup_cfg_contents) + parser.readfp(buff) + else: + parser.read_string(setup_cfg_contents) results = {} package_dir = get_package_dir_from_setupcfg(parser, base_dir=base_dir) name, version = get_name_and_version_from_setupcfg(parser, package_dir) @@ -629,6 +640,20 @@ def get_metadata_from_dist(dist): class Analyzer(ast.NodeVisitor): + OP_MAP = { + ast.Add: operator.add, + ast.Sub: operator.sub, + ast.Mult: operator.mul, + ast.Div: operator.floordiv, + ast.Mod: operator.mod, + ast.Pow: operator.pow, + ast.LShift: operator.lshift, + ast.RShift: operator.rshift, + ast.BitAnd: operator.and_, + ast.BitOr: operator.or_, + ast.BitXor: operator.xor + } + def __init__(self): self.name_types = [] self.function_map = {} # type: Dict[Any, Any] @@ -654,8 +679,10 @@ def generic_visit(self, node): def visit_BinOp(self, node): left = ast_unparse(node.left, initial_mapping=True) right = ast_unparse(node.right, initial_mapping=True) + op = ast_unparse(node.op, initial_mapping=True) node.left = left node.right = right + node.op = op self.binOps.append(node) def unmap_binops(self): @@ -691,25 +718,35 @@ def ast_unparse(item, initial_mapping=False, analyzer=None, recurse=True): # no elif isinstance(item, ast.BinOp): if analyzer and item in analyzer.binOps_map: unparsed = analyzer.binOps_map[item] - elif isinstance(item.op, ast.Add): + else: + right_item = unparse(item.right) + left_item = unparse(item.left) + if type(item.op) in Analyzer.OP_MAP: + item.op = Analyzer.OP_MAP[type(item.op)] if not initial_mapping: - right_item = unparse(item.right) - left_item = unparse(item.left) if not all( isinstance(side, (six.string_types, int, float, list, tuple)) for side in (left_item, right_item) ): - item.left = left_item - item.right = right_item - unparsed = item + if type(item.op) in Analyzer.OP_MAP: + item = Analyzer.OP_MAP[type(item.op)](left_item, right_item) + else: + item.left = left_item + item.right = right_item + item.op = unparse(item.op) + try: + unparsed = item.op(left_item, right_item) + except Exception: + unparsed = item else: - unparsed = left_item + right_item + if type(item.op) in Analyzer.OP_MAP: + item.op = Analyzer.OP_MAP[type(item.op)] + try: + unparsed = item.op(left_item, right_item) + except Exception: + unparsed = item else: unparsed = item - elif isinstance(item.op, ast.Sub): - unparsed = unparse(item.left) - unparse(item.right) - else: - unparsed = item elif isinstance(item, ast.Name): if not initial_mapping: unparsed = item.id @@ -747,10 +784,13 @@ def ast_unparse(item, initial_mapping=False, analyzer=None, recurse=True): # no unparsed = name if not unparsed else unparsed elif isinstance(item, ast.Call): unparsed = {} - if isinstance(item.func, ast.Name): - func_name = unparse(item.func) - elif isinstance(item.func, ast.Attribute): + if isinstance(item.func, (ast.Name, ast.Attribute)): func_name = unparse(item.func) + else: + try: + func_name = unparse(item.func) + except Exception: + func_name = None if func_name: unparsed[func_name] = {} for keyword in item.keywords: @@ -809,7 +849,15 @@ def ast_parse_attribute_from_file(path, attribute): def ast_parse_file(path): # type: (S) -> Analyzer - tree = ast.parse(read_source(path)) + try: + tree = ast.parse(read_source(path)) + except SyntaxError: + # The source may be encoded strangely, e.g. azure-storage + # which has a setup.py encoded with utf-8-sig + with open(path, "rb") as fh: + contents = fh.read() + encoding = chardet.detect(contents)["encoding"] + tree = ast.parse(contents.decode(encoding)) ast_analyzer = Analyzer() ast_analyzer.visit(tree) return ast_analyzer @@ -1111,6 +1159,8 @@ def parse_setup_cfg(self): try: parsed = setuptools_parse_setup_cfg(self.setup_cfg.as_posix()) except Exception: + if six.PY2: + contents = self.setup_cfg.read_bytes() parsed = parse_setup_cfg(contents, base_dir) if not parsed: return {} diff --git a/pipenv/vendor/requirementslib/models/utils.py b/pipenv/vendor/requirementslib/models/utils.py index 5d4708647b..6c3b7de8a5 100644 --- a/pipenv/vendor/requirementslib/models/utils.py +++ b/pipenv/vendor/requirementslib/models/utils.py @@ -12,7 +12,6 @@ import six import tomlkit from attr import validators -from first import first from packaging.markers import InvalidMarker, Marker, Op, Value, Variable from packaging.specifiers import InvalidSpecifier, Specifier, SpecifierSet from packaging.version import parse as parse_version @@ -103,6 +102,11 @@ def filter_none(k, v): return False +def filter_dict(dict_): + # type: (Dict[AnyStr, Any]) -> Dict[AnyStr, Any] + return {k: v for k, v in dict_.items() if filter_none(k, v)} + + def optional_instance_of(cls): # type: (Any) -> _ValidatorType[Optional[_T]] return validators.optional(validators.instance_of(cls)) @@ -548,8 +552,9 @@ def split_vcs_method_from_uri(uri): # type: (AnyStr) -> Tuple[Optional[STRING_TYPE], STRING_TYPE] """Split a vcs+uri formatted uri into (vcs, uri)""" vcs_start = "{0}+" - vcs = None # type: Optional[STRING_TYPE] - vcs = first([vcs for vcs in VCS_LIST if uri.startswith(vcs_start.format(vcs))]) + vcs = next( + iter([vcs for vcs in VCS_LIST if uri.startswith(vcs_start.format(vcs))]), None + ) if vcs: vcs, uri = uri.split("+", 1) return vcs, uri @@ -718,7 +723,7 @@ def get_pinned_version(ireq): except AttributeError: raise TypeError("Expected InstallRequirement, not {}".format(type(ireq).__name__)) - if ireq.editable: + if getattr(ireq, "editable", False): raise ValueError("InstallRequirement is editable") if not specifier: raise ValueError("InstallRequirement has no version specification") @@ -766,7 +771,7 @@ def as_tuple(ireq): raise TypeError("Expected a pinned InstallRequirement, got {}".format(ireq)) name = key_from_req(ireq.req) - version = first(ireq.specifier._specs)._spec[1] + version = next(iter(ireq.specifier._specs))._spec[1] extras = tuple(sorted(ireq.extras)) return name, version, extras @@ -906,7 +911,7 @@ def version_from_ireq(ireq): :rtype: str """ - return first(ireq.specifier._specs).version + return next(iter(ireq.specifier._specs)).version def _get_requires_python(candidate): diff --git a/pipenv/vendor/vendor.txt b/pipenv/vendor/vendor.txt index 566c0641ba..7ce6da7023 100644 --- a/pipenv/vendor/vendor.txt +++ b/pipenv/vendor/vendor.txt @@ -40,6 +40,8 @@ toml==0.10.0 cached-property==1.5.1 vistir==0.4.3 pip-shims==0.4.0 + contextlib2==0.6.0.post1 + funcsigs==1.0.2 enum34==1.1.6 # yaspin==0.15.0 yaspin==0.14.3 @@ -47,5 +49,8 @@ cerberus==1.3.2 resolvelib==0.2.2 backports.functools_lru_cache==1.5 pep517==0.8.1 + zipp==0.6.0 + importlib_metadata==1.3.0 + more-itertools==5.0.0 git+https://github.com/sarugaku/passa.git@master#egg=passa orderedmultidict==1.0.1 diff --git a/pipenv/vendor/zipp.LICENSE b/pipenv/vendor/zipp.LICENSE new file mode 100644 index 0000000000..5e795a61f3 --- /dev/null +++ b/pipenv/vendor/zipp.LICENSE @@ -0,0 +1,7 @@ +Copyright Jason R. Coombs + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/pipenv/vendor/zipp.py b/pipenv/vendor/zipp.py new file mode 100644 index 0000000000..8ab7d09908 --- /dev/null +++ b/pipenv/vendor/zipp.py @@ -0,0 +1,220 @@ +# coding: utf-8 + +from __future__ import division + +import io +import sys +import posixpath +import zipfile +import functools +import itertools + +import more_itertools + +__metaclass__ = type + + +def _parents(path): + """ + Given a path with elements separated by + posixpath.sep, generate all parents of that path. + + >>> list(_parents('b/d')) + ['b'] + >>> list(_parents('/b/d/')) + ['/b'] + >>> list(_parents('b/d/f/')) + ['b/d', 'b'] + >>> list(_parents('b')) + [] + >>> list(_parents('')) + [] + """ + return itertools.islice(_ancestry(path), 1, None) + + +def _ancestry(path): + """ + Given a path with elements separated by + posixpath.sep, generate all elements of that path + + >>> list(_ancestry('b/d')) + ['b/d', 'b'] + >>> list(_ancestry('/b/d/')) + ['/b/d', '/b'] + >>> list(_ancestry('b/d/f/')) + ['b/d/f', 'b/d', 'b'] + >>> list(_ancestry('b')) + ['b'] + >>> list(_ancestry('')) + [] + """ + path = path.rstrip(posixpath.sep) + while path and path != posixpath.sep: + yield path + path, tail = posixpath.split(path) + + +class Path: + """ + A pathlib-compatible interface for zip files. + + Consider a zip file with this structure:: + + . + ├── a.txt + └── b + ├── c.txt + └── d + └── e.txt + + >>> data = io.BytesIO() + >>> zf = zipfile.ZipFile(data, 'w') + >>> zf.writestr('a.txt', 'content of a') + >>> zf.writestr('b/c.txt', 'content of c') + >>> zf.writestr('b/d/e.txt', 'content of e') + >>> zf.filename = 'abcde.zip' + + Path accepts the zipfile object itself or a filename + + >>> root = Path(zf) + + From there, several path operations are available. + + Directory iteration (including the zip file itself): + + >>> a, b = root.iterdir() + >>> a + Path('abcde.zip', 'a.txt') + >>> b + Path('abcde.zip', 'b/') + + name property: + + >>> b.name + 'b' + + join with divide operator: + + >>> c = b / 'c.txt' + >>> c + Path('abcde.zip', 'b/c.txt') + >>> c.name + 'c.txt' + + Read text: + + >>> c.read_text() + 'content of c' + + existence: + + >>> c.exists() + True + >>> (b / 'missing.txt').exists() + False + + Coercion to string: + + >>> str(c) + 'abcde.zip/b/c.txt' + """ + + __repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})" + + def __init__(self, root, at=""): + self.root = ( + root + if isinstance(root, zipfile.ZipFile) + else zipfile.ZipFile(self._pathlib_compat(root)) + ) + self.at = at + + @staticmethod + def _pathlib_compat(path): + """ + For path-like objects, convert to a filename for compatibility + on Python 3.6.1 and earlier. + """ + try: + return path.__fspath__() + except AttributeError: + return str(path) + + @property + def open(self): + return functools.partial(self.root.open, self.at) + + @property + def name(self): + return posixpath.basename(self.at.rstrip("/")) + + def read_text(self, *args, **kwargs): + with self.open() as strm: + return io.TextIOWrapper(strm, *args, **kwargs).read() + + def read_bytes(self): + with self.open() as strm: + return strm.read() + + def _is_child(self, path): + return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/") + + def _next(self, at): + return Path(self.root, at) + + def is_dir(self): + return not self.at or self.at.endswith("/") + + def is_file(self): + return not self.is_dir() + + def exists(self): + return self.at in self._names() + + def iterdir(self): + if not self.is_dir(): + raise ValueError("Can't listdir a file") + subs = map(self._next, self._names()) + return filter(self._is_child, subs) + + def __str__(self): + return posixpath.join(self.root.filename, self.at) + + def __repr__(self): + return self.__repr.format(self=self) + + def joinpath(self, add): + add = self._pathlib_compat(add) + next = posixpath.join(self.at, add) + next_dir = posixpath.join(self.at, add, "") + names = self._names() + return self._next(next_dir if next not in names and next_dir in names else next) + + __truediv__ = joinpath + + @staticmethod + def _implied_dirs(names): + return more_itertools.unique_everseen( + parent + "/" + for name in names + for parent in _parents(name) + if parent + "/" not in names + ) + + @classmethod + def _add_implied_dirs(cls, names): + return names + list(cls._implied_dirs(names)) + + @property + def parent(self): + parent_at = posixpath.dirname(self.at.rstrip('/')) + if parent_at: + parent_at += '/' + return self._next(parent_at) + + def _names(self): + return self._add_implied_dirs(self.root.namelist()) + + if sys.version_info < (3,): + __div__ = __truediv__ diff --git a/tasks/vendoring/patches/patched/pip19.patch b/tasks/vendoring/patches/patched/pip19.patch index c40379f5bb..7d57cfd537 100644 --- a/tasks/vendoring/patches/patched/pip19.patch +++ b/tasks/vendoring/patches/patched/pip19.patch @@ -267,24 +267,6 @@ index c24158f4..37c3197f 100644 ) if not self.ignore_dependencies: -@@ -423,6 +443,17 @@ class Resolver(object): - for subreq in dist.requires(available_requested): - add_req(subreq, extras_requested=available_requested) - -+ # Hack for deep-resolving extras. -+ for available in available_requested: -+ if hasattr(dist, '_DistInfoDistribution__dep_map'): -+ for req in dist._DistInfoDistribution__dep_map[available]: -+ req = self._make_install_req( -+ req, -+ req_to_install -+ ) -+ -+ more_reqs.append(req) -+ - if not req_to_install.editable and not req_to_install.satisfied_by: - # XXX: --no-install leads this to report 'Successfully - # downloaded' for only non-editable reqs, even though we took diff --git a/pipenv/patched/pip/_internal/models/candidate.py b/pipenv/patched/pip/_internal/models/candidate.py index 4d49604d..cdfe65aa 100644 --- a/pipenv/patched/pip/_internal/models/candidate.py @@ -522,3 +504,90 @@ index 77d40be6..8a32cf2d 100644 - return path + return path \ No newline at end of file +diff --git a/pipenv/patched/notpip/_internal/commands/__init__.py b/pipenv/patched/notpip/_internal/commands/__init__.py +index abcafa55..ca155a94 100644 +--- a/pipenv/patched/notpip/_internal/commands/__init__.py ++++ b/pipenv/patched/notpip/_internal/commands/__init__.py +@@ -21,7 +21,7 @@ CommandInfo = namedtuple('CommandInfo', 'module_path, class_name, summary') + + # The ordering matters for help display. + # Also, even though the module path starts with the same +-# "pip._internal.commands" prefix in each case, we include the full path ++# "pipenv.patched.notpip._internal.commands" prefix in each case, we include the full path + # because it makes testing easier (specifically when modifying commands_dict + # in test setup / teardown by adding info for a FakeCommand class defined + # in a test-related module). +@@ -29,59 +29,59 @@ CommandInfo = namedtuple('CommandInfo', 'module_path, class_name, summary') + # so that the ordering won't be lost when using Python 2.7. + commands_dict = OrderedDict([ + ('install', CommandInfo( +- 'pip._internal.commands.install', 'InstallCommand', ++ 'pipenv.patched.notpip._internal.commands.install', 'InstallCommand', + 'Install packages.', + )), + ('download', CommandInfo( +- 'pip._internal.commands.download', 'DownloadCommand', ++ 'pipenv.patched.notpip._internal.commands.download', 'DownloadCommand', + 'Download packages.', + )), + ('uninstall', CommandInfo( +- 'pip._internal.commands.uninstall', 'UninstallCommand', ++ 'pipenv.patched.notpip._internal.commands.uninstall', 'UninstallCommand', + 'Uninstall packages.', + )), + ('freeze', CommandInfo( +- 'pip._internal.commands.freeze', 'FreezeCommand', ++ 'pipenv.patched.notpip._internal.commands.freeze', 'FreezeCommand', + 'Output installed packages in requirements format.', + )), + ('list', CommandInfo( +- 'pip._internal.commands.list', 'ListCommand', ++ 'pipenv.patched.notpip._internal.commands.list', 'ListCommand', + 'List installed packages.', + )), + ('show', CommandInfo( +- 'pip._internal.commands.show', 'ShowCommand', ++ 'pipenv.patched.notpip._internal.commands.show', 'ShowCommand', + 'Show information about installed packages.', + )), + ('check', CommandInfo( +- 'pip._internal.commands.check', 'CheckCommand', ++ 'pipenv.patched.notpip._internal.commands.check', 'CheckCommand', + 'Verify installed packages have compatible dependencies.', + )), + ('config', CommandInfo( +- 'pip._internal.commands.configuration', 'ConfigurationCommand', ++ 'pipenv.patched.notpip._internal.commands.configuration', 'ConfigurationCommand', + 'Manage local and global configuration.', + )), + ('search', CommandInfo( +- 'pip._internal.commands.search', 'SearchCommand', ++ 'pipenv.patched.notpip._internal.commands.search', 'SearchCommand', + 'Search PyPI for packages.', + )), + ('wheel', CommandInfo( +- 'pip._internal.commands.wheel', 'WheelCommand', ++ 'pipenv.patched.notpip._internal.commands.wheel', 'WheelCommand', + 'Build wheels from your requirements.', + )), + ('hash', CommandInfo( +- 'pip._internal.commands.hash', 'HashCommand', ++ 'pipenv.patched.notpip._internal.commands.hash', 'HashCommand', + 'Compute hashes of package archives.', + )), + ('completion', CommandInfo( +- 'pip._internal.commands.completion', 'CompletionCommand', ++ 'pipenv.patched.notpip._internal.commands.completion', 'CompletionCommand', + 'A helper command used for command completion.', + )), + ('debug', CommandInfo( +- 'pip._internal.commands.debug', 'DebugCommand', ++ 'pipenv.patched.notpip._internal.commands.debug', 'DebugCommand', + 'Show information useful for debugging.', + )), + ('help', CommandInfo( +- 'pip._internal.commands.help', 'HelpCommand', ++ 'pipenv.patched.notpip._internal.commands.help', 'HelpCommand', + 'Show help for commands.', + )), + ]) # type: OrderedDict[str, CommandInfo] diff --git a/tasks/vendoring/patches/patched/piptools.patch b/tasks/vendoring/patches/patched/piptools.patch index fe8d8fe9c1..cb6ffab1de 100644 --- a/tasks/vendoring/patches/patched/piptools.patch +++ b/tasks/vendoring/patches/patched/piptools.patch @@ -155,7 +155,7 @@ index f389784..c1bcf9d 100644 else: return self.repository.find_best_match(ireq, prereleases) diff --git a/pipenv/patched/piptools/repositories/pypi.py b/pipenv/patched/piptools/repositories/pypi.py -index acbd680..c9a23ad 100644 +index acbd680..4bd3e22 100644 --- a/pipenv/patched/piptools/repositories/pypi.py +++ b/pipenv/patched/piptools/repositories/pypi.py @@ -2,21 +2,29 @@ @@ -400,7 +400,7 @@ index acbd680..c9a23ad 100644 preparer_kwargs = { "build_dir": self.build_dir, -@@ -186,9 +311,11 @@ class PyPIRepository(BaseRepository): +@@ -186,21 +311,24 @@ class PyPIRepository(BaseRepository): "upgrade_strategy": "to-satisfy-only", "force_reinstall": False, "ignore_dependencies": False, @@ -413,7 +413,21 @@ index acbd680..c9a23ad 100644 } make_install_req_kwargs = {"isolated": False, "wheel_cache": wheel_cache} -@@ -208,6 +335,7 @@ class PyPIRepository(BaseRepository): + if PIP_VERSION < (19, 3): + resolver_kwargs.update(**make_install_req_kwargs) + else: +- from pip._internal.req.constructors import install_req_from_req_string ++ from pipenv.vendor.pip_shims.shims import install_req_from_req_string + + make_install_req = partial( + install_req_from_req_string, **make_install_req_kwargs + ) + resolver_kwargs["make_install_req"] = make_install_req ++ del resolver_kwargs["use_pep517"] + + if PIP_VERSION >= (20,): + preparer_kwargs["session"] = self.session +@@ -208,6 +336,7 @@ class PyPIRepository(BaseRepository): resolver = None preparer = None @@ -421,7 +435,7 @@ index acbd680..c9a23ad 100644 with RequirementTracker() as req_tracker: # Pip 18 uses a requirement tracker to prevent fork bombs if req_tracker: -@@ -216,7 +344,6 @@ class PyPIRepository(BaseRepository): +@@ -216,7 +345,6 @@ class PyPIRepository(BaseRepository): resolver_kwargs["preparer"] = preparer reqset = RequirementSet() ireq.is_direct = True @@ -429,7 +443,7 @@ index acbd680..c9a23ad 100644 resolver = PipResolver(**resolver_kwargs) require_hashes = False -@@ -225,12 +352,16 @@ class PyPIRepository(BaseRepository): +@@ -225,12 +353,16 @@ class PyPIRepository(BaseRepository): results = resolver._resolve_one(reqset, ireq) else: results = resolver._resolve_one(reqset, ireq, require_hashes) @@ -441,14 +455,15 @@ index acbd680..c9a23ad 100644 - reqset.cleanup_files() + results = set(results) if results else set() - return set(results) +- return set(results) ++ return results, ireq - def get_dependencies(self, ireq): + def get_legacy_dependencies(self, ireq): """ Given a pinned, URL, or editable InstallRequirement, returns a set of dependencies (also InstallRequirements, but not necessarily pinned). -@@ -265,9 +396,8 @@ class PyPIRepository(BaseRepository): +@@ -265,9 +397,8 @@ class PyPIRepository(BaseRepository): wheel_cache = WheelCache(CACHE_DIR, self.options.format_control) prev_tracker = os.environ.get("PIP_REQ_TRACKER") try: @@ -460,7 +475,7 @@ index acbd680..c9a23ad 100644 finally: if "PIP_REQ_TRACKER" in os.environ: if prev_tracker: -@@ -313,12 +443,10 @@ class PyPIRepository(BaseRepository): +@@ -313,12 +444,10 @@ class PyPIRepository(BaseRepository): # We need to get all of the candidates that match our current version # pin, these will represent all of the files that could possibly # satisfy this constraint. @@ -476,7 +491,7 @@ index acbd680..c9a23ad 100644 log.debug(" {}".format(ireq.name)) -@@ -328,30 +456,11 @@ class PyPIRepository(BaseRepository): +@@ -328,30 +457,11 @@ class PyPIRepository(BaseRepository): return candidate.link return { @@ -571,7 +586,7 @@ index fc53f18..c056665 100644 ] return self.dependency_cache.reverse_dependencies(non_editable) diff --git a/pipenv/patched/piptools/utils.py b/pipenv/patched/piptools/utils.py -index 8727f1e..1f4c10a 100644 +index 8727f1e..c9f53f7 100644 --- a/pipenv/patched/piptools/utils.py +++ b/pipenv/patched/piptools/utils.py @@ -1,6 +1,7 @@ @@ -648,8 +663,8 @@ index 8727f1e..1f4c10a 100644 + if getattr(c, "requires_python", None): + # Old specifications had people setting this to single digits + # which is effectively the same as '>=digit,<digit+1' -+ if c.requires_python.isdigit(): -+ c.requires_python = '>={0},<{1}'.format(c.requires_python, int(c.requires_python) + 1) ++ if len(c.requires_python) == 1 and c.requires_python in ("2", "3"): ++ c.requires_python = '>={0},<{1!s}'.format(c.requires_python, int(c.requires_python) + 1) + try: + specifierset = SpecifierSet(c.requires_python) + except InvalidSpecifier: From e629c0ce0205f95c684cf2bf1ffbf9dfbb2a83b6 Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Wed, 25 Mar 2020 17:22:48 -0400 Subject: [PATCH 03/49] Update vendor.txt and ecosystem libraries - Update `pip_shims`, `pythonfinder`, `requirementslib`, `vistir` - Add patches to update or fix import paths where necessary - Hardcode funcsigs license Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- pipenv/vendor/pip_shims/__init__.py | 2 +- pipenv/vendor/pip_shims/compat.py | 1511 +++++++++++++++++ pipenv/vendor/pip_shims/environment.py | 5 + pipenv/vendor/pip_shims/models.py | 103 +- pipenv/vendor/pip_shims/shims.py | 3 + pipenv/vendor/pip_shims/utils.py | 3 + pipenv/vendor/pythonfinder/__init__.py | 2 +- pipenv/vendor/pythonfinder/cli.py | 22 +- pipenv/vendor/pythonfinder/compat.py | 42 + pipenv/vendor/pythonfinder/models/mixins.py | 4 +- pipenv/vendor/pythonfinder/models/path.py | 4 +- pipenv/vendor/pythonfinder/models/python.py | 2 +- pipenv/vendor/pythonfinder/pythonfinder.py | 30 +- pipenv/vendor/pythonfinder/utils.py | 66 +- pipenv/vendor/requirementslib/__init__.py | 2 +- .../requirementslib/models/dependencies.py | 33 +- .../vendor/requirementslib/models/markers.py | 23 - .../requirementslib/models/setup_info.py | 190 ++- pipenv/vendor/requirementslib/models/utils.py | 2 +- pipenv/vendor/requirementslib/models/vcs.py | 8 +- pipenv/vendor/vendor.txt | 35 +- pipenv/vendor/vistir/__init__.py | 2 +- pipenv/vendor/vistir/_winconsole.py | 2 +- pipenv/vendor/vistir/contextmanagers.py | 35 +- pipenv/vendor/vistir/path.py | 29 +- pipenv/vendor/yaspin/spinners.py | 5 +- tasks/vendoring/__init__.py | 3 +- .../vendor/pip_shims_module_names.patch | 24 +- .../vendor/pythonfinder-pathlib-import.patch | 13 + .../vendor/yaspin-signal-handling.patch | 16 + 30 files changed, 1996 insertions(+), 225 deletions(-) create mode 100644 pipenv/vendor/pip_shims/compat.py create mode 100644 pipenv/vendor/pythonfinder/compat.py create mode 100644 tasks/vendoring/patches/vendor/pythonfinder-pathlib-import.patch diff --git a/pipenv/vendor/pip_shims/__init__.py b/pipenv/vendor/pip_shims/__init__.py index 93f3a4721f..7feca147f7 100644 --- a/pipenv/vendor/pip_shims/__init__.py +++ b/pipenv/vendor/pip_shims/__init__.py @@ -5,7 +5,7 @@ from . import shims -__version__ = "0.4.0" +__version__ = "0.5.1" if "pip_shims" in sys.modules: diff --git a/pipenv/vendor/pip_shims/compat.py b/pipenv/vendor/pip_shims/compat.py new file mode 100644 index 0000000000..ed99d97080 --- /dev/null +++ b/pipenv/vendor/pip_shims/compat.py @@ -0,0 +1,1511 @@ +# -*- coding=utf-8 -*- +""" +Backports and helper functionality to support using new functionality. +""" +from __future__ import absolute_import, print_function + +import atexit +import contextlib +import functools +import inspect +import os +import re +import sys +import types + +import six +from packaging import specifiers + +from .environment import MYPY_RUNNING +from .utils import ( + call_function_with_correct_args, + get_method_args, + nullcontext, + suppress_setattr, +) + +if sys.version_info[:2] < (3, 5): + from pipenv.vendor.vistir.compat import TemporaryDirectory +else: + from tempfile import TemporaryDirectory + +if six.PY3: + from contextlib import ExitStack +else: + from pipenv.vendor.contextlib2 import ExitStack + + +if MYPY_RUNNING: + from optparse import Values + from requests import Session + from typing import ( + Any, + Callable, + Dict, + Generator, + Generic, + Iterable, + Iterator, + List, + Optional, + Tuple, + Type, + TypeVar, + Union, + ) + from .utils import TShimmedPath, TShim, TShimmedFunc + + TFinder = TypeVar("TFinder") + TResolver = TypeVar("TResolver") + TReqTracker = TypeVar("TReqTracker") + TReqSet = TypeVar("TReqSet") + TLink = TypeVar("TLink") + TSession = TypeVar("TSession", bound=Session) + TCommand = TypeVar("TCommand", covariant=True) + TCommandInstance = TypeVar("TCommandInstance") + TCmdDict = Dict[str, Union[Tuple[str, str, str], TCommandInstance]] + TInstallRequirement = TypeVar("TInstallRequirement") + TShimmedCmdDict = Union[TShim, TCmdDict] + TWheelCache = TypeVar("TWheelCache") + TPreparer = TypeVar("TPreparer") + + +class SearchScope(object): + def __init__(self, find_links=None, index_urls=None): + self.index_urls = index_urls if index_urls else [] + self.find_links = find_links + + @classmethod + def create(cls, find_links=None, index_urls=None): + if not index_urls: + index_urls = ["https://pypi.org/simple"] + return cls(find_links=find_links, index_urls=index_urls) + + +class SelectionPreferences(object): + def __init__( + self, + allow_yanked=True, + allow_all_prereleases=False, + format_control=None, + prefer_binary=False, + ignore_requires_python=False, + ): + self.allow_yanked = allow_yanked + self.allow_all_prereleases = allow_all_prereleases + self.format_control = format_control + self.prefer_binary = prefer_binary + self.ignore_requires_python = ignore_requires_python + + +class TargetPython(object): + fallback_get_tags = None # type: Optional[TShimmedFunc] + + def __init__( + self, + platform=None, # type: Optional[str] + py_version_info=None, # type: Optional[Tuple[int, ...]] + abi=None, # type: Optional[str] + implementation=None, # type: Optional[str] + ): + # type: (...) -> None + self._given_py_version_info = py_version_info + if py_version_info is None: + py_version_info = sys.version_info[:3] + elif len(py_version_info) < 3: + py_version_info += (3 - len(py_version_info)) * (0,) + else: + py_version_info = py_version_info[:3] + py_version = ".".join(map(str, py_version_info[:2])) + self.abi = abi + self.implementation = implementation + self.platform = platform + self.py_version = py_version + self.py_version_info = py_version_info + self._valid_tags = None + + def get_tags(self): + if self._valid_tags is None and self.fallback_get_tags: + fallback_func = resolve_possible_shim(self.fallback_get_tags) + versions = None + if self._given_py_version_info: + versions = ["".join(map(str, self._given_py_version_info[:2]))] + self._valid_tags = fallback_func( + versions=versions, + platform=self.platform, + abi=self.abi, + impl=self.implementation, + ) + return self._valid_tags + + +class CandidatePreferences(object): + def __init__(self, prefer_binary=False, allow_all_prereleases=False): + self.prefer_binary = prefer_binary + self.allow_all_prereleases = allow_all_prereleases + + +class LinkCollector(object): + def __init__(self, session=None, search_scope=None): + self.session = session + self.search_scope = search_scope + + +class CandidateEvaluator(object): + @classmethod + def create( + cls, + project_name, # type: str + target_python=None, # type: Optional[TargetPython] + prefer_binary=False, # type: bool + allow_all_prereleases=False, # type: bool + specifier=None, # type: Optional[specifiers.BaseSpecifier] + hashes=None, # type: Optional[Any] + ): + if target_python is None: + target_python = TargetPython() + if specifier is None: + specifier = specifiers.SpecifierSet() + + supported_tags = target_python.get_tags() + + return cls( + project_name=project_name, + supported_tags=supported_tags, + specifier=specifier, + prefer_binary=prefer_binary, + allow_all_prereleases=allow_all_prereleases, + hashes=hashes, + ) + + def __init__( + self, + project_name, # type: str + supported_tags, # type: List[Any] + specifier, # type: specifiers.BaseSpecifier + prefer_binary=False, # type: bool + allow_all_prereleases=False, # type: bool + hashes=None, # type: Optional[Any] + ): + self._allow_all_prereleases = allow_all_prereleases + self._hashes = hashes + self._prefer_binary = prefer_binary + self._project_name = project_name + self._specifier = specifier + self._supported_tags = supported_tags + + +class LinkEvaluator(object): + def __init__( + self, + allow_yanked, + project_name, + canonical_name, + formats, + target_python, + ignore_requires_python=False, + ignore_compatibility=True, + ): + self._allow_yanked = allow_yanked + self._canonical_name = canonical_name + self._ignore_requires_python = ignore_requires_python + self._formats = formats + self._target_python = target_python + self._ignore_compatibility = ignore_compatibility + + self.project_name = project_name + + +class InvalidWheelFilename(Exception): + """Wheel Filename is Invalid""" + + +class Wheel(object): + wheel_file_re = re.compile( + r"""^(?P<namever>(?P<name>.+?)-(?P<ver>.*?)) + ((-(?P<build>\d[^-]*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?) + \.whl|\.dist-info)$""", + re.VERBOSE, + ) + + def __init__(self, filename): + # type: (str) -> None + wheel_info = self.wheel_file_re.match(filename) + if not wheel_info: + raise InvalidWheelFilename("%s is not a valid wheel filename." % filename) + self.filename = filename + self.name = wheel_info.group("name").replace("_", "-") + # we'll assume "_" means "-" due to wheel naming scheme + # (https://github.com/pypa/pip/issues/1150) + self.version = wheel_info.group("ver").replace("_", "-") + self.build_tag = wheel_info.group("build") + self.pyversions = wheel_info.group("pyver").split(".") + self.abis = wheel_info.group("abi").split(".") + self.plats = wheel_info.group("plat").split(".") + + # All the tag combinations from this file + self.file_tags = { + (x, y, z) for x in self.pyversions for y in self.abis for z in self.plats + } + + def get_formatted_file_tags(self): + # type: () -> List[str] + """ + Return the wheel's tags as a sorted list of strings. + """ + return sorted("-".join(tag) for tag in self.file_tags) + + def support_index_min(self, tags): + # type: (List[Any]) -> int + """ + Return the lowest index that one of the wheel's file_tag combinations + achieves in the given list of supported tags. + + For example, if there are 8 supported tags and one of the file tags + is first in the list, then return 0. + + :param tags: the PEP 425 tags to check the wheel against, in order + with most preferred first. + :raises ValueError: If none of the wheel's file tags match one of + the supported tags. + """ + return min(tags.index(tag) for tag in self.file_tags if tag in tags) + + def supported(self, tags): + # type: (List[Any]) -> bool + """ + Return whether the wheel is compatible with one of the given tags. + + :param tags: the PEP 425 tags to check the wheel against. + """ + return not self.file_tags.isdisjoint(tags) + + +def resolve_possible_shim(target): + # type: (TShimmedFunc) -> Optional[Union[Type, Callable]] + if target is None: + return target + if getattr(target, "shim", None) and isinstance( + target.shim, (types.MethodType, types.FunctionType) + ): + return target.shim() + return target + + +@contextlib.contextmanager +def temp_environ(): + """Allow the ability to set os.environ temporarily""" + environ = dict(os.environ) + try: + yield + finally: + os.environ.clear() + os.environ.update(environ) + + +@contextlib.contextmanager +def get_requirement_tracker(req_tracker_creator=None): + # type: (Optional[Callable]) -> Generator[Optional[TReqTracker], None, None] + root = os.environ.get("PIP_REQ_TRACKER") + if not req_tracker_creator: + yield None + else: + req_tracker_args = [] + _, required_args = get_method_args(req_tracker_creator.__init__) # type: ignore + with ExitStack() as ctx: + if root is None: + root = ctx.enter_context(TemporaryDirectory(prefix="req-tracker")) + if root: + root = str(root) + ctx.enter_context(temp_environ()) + os.environ["PIP_REQ_TRACKER"] = root + if required_args is not None and "root" in required_args: + req_tracker_args.append(root) + with req_tracker_creator(*req_tracker_args) as tracker: + yield tracker + + +@contextlib.contextmanager +def ensure_resolution_dirs(**kwargs): + # type: (Any) -> Iterator[Dict[str, Any]] + """ + Ensures that the proper directories are scaffolded and present in the provided kwargs + for performing dependency resolution via pip. + + :return: A new kwargs dictionary with scaffolded directories for **build_dir**, **src_dir**, + **download_dir**, and **wheel_download_dir** added to the key value pairs. + :rtype: Dict[str, Any] + """ + keys = ("build_dir", "src_dir", "download_dir", "wheel_download_dir") + if not any(kwargs.get(key) is None for key in keys): + yield kwargs + else: + with TemporaryDirectory(prefix="pip-shims-") as base_dir: + for key in keys: + if kwargs.get(key) is not None: + continue + target = os.path.join(base_dir, key) + os.makedirs(target) + kwargs[key] = target + yield kwargs + + +@contextlib.contextmanager +def wheel_cache( + wheel_cache_provider, # type: TShimmedFunc + tempdir_manager_provider, # type: TShimmedFunc + cache_dir, # type: str + format_control=None, # type: Any + format_control_provider=None, # type: Optional[TShimmedFunc] +): + tempdir_manager_provider = resolve_possible_shim(tempdir_manager_provider) + wheel_cache_provider = resolve_possible_shim(wheel_cache_provider) + format_control_provider = resolve_possible_shim(format_control_provider) + if not format_control and not format_control_provider: + raise TypeError("Format control or provider needed for wheel cache!") + if not format_control: + format_control = format_control_provider(None, None) + with ExitStack() as ctx: + ctx.enter_context(tempdir_manager_provider()) + wheel_cache = wheel_cache_provider(cache_dir, format_control) + yield wheel_cache + + +def partial_command(shimmed_path, cmd_mapping=None): + # type: (Type, Optional[TShimmedCmdDict]) -> Union[Type[TCommandInstance], functools.partial] + """ + Maps a default set of arguments across all members of a + :class:`~pip_shims.models.ShimmedPath` instance, specifically for + :class:`~pip._internal.command.Command` instances which need + `summary` and `name` arguments. + + :param :class:`~pip_shims.models.ShimmedPath` shimmed_path: A + :class:`~pip_shims.models.ShimmedCollection` instance + :param Any cmd_mapping: A reference to use for mapping against, e.g. an + import that depends on pip also + :return: A dictionary mapping new arguments to their default values + :rtype: Dict[str, str] + """ + basecls = shimmed_path.shim() + resolved_cmd_mapping = None # type: Optional[Dict[str, Any]] + cmd_mapping = resolve_possible_shim(cmd_mapping) + if cmd_mapping is not None and isinstance(cmd_mapping, dict): + resolved_cmd_mapping = cmd_mapping.copy() + base_args = [] # type: List[str] + for root_cls in basecls.mro(): + if root_cls.__name__ == "Command": + _, root_init_args = get_method_args(root_cls.__init__) + if root_init_args is not None: + base_args = root_init_args.args + needs_name_and_summary = any(arg in base_args for arg in ("name", "summary")) + if not needs_name_and_summary: + basecls.name = shimmed_path.name + return basecls + elif ( + not resolved_cmd_mapping + and needs_name_and_summary + and getattr(functools, "partialmethod", None) + ): + new_init = functools.partial( + basecls.__init__, name=shimmed_path.name, summary="Summary" + ) + basecls.__init__ = new_init + result = basecls + assert resolved_cmd_mapping is not None + for command_name, command_info in resolved_cmd_mapping.items(): + if getattr(command_info, "class_name", None) == shimmed_path.name: + summary = getattr(command_info, "summary", "Command summary") + result = functools.partial(basecls, command_name, summary) + break + return result + + +def get_session( + install_cmd_provider=None, # type: Optional[TShimmedFunc] + install_cmd=None, # type: TCommandInstance + options=None, # type: Optional[Values] +): + # type: (...) -> TSession + session = None # type: Optional[TSession] + if install_cmd is None: + assert install_cmd_provider is not None + install_cmd_provider = resolve_possible_shim(install_cmd_provider) + assert isinstance(install_cmd_provider, (type, functools.partial)) + install_cmd = install_cmd_provider() + if options is None: + options, _ = install_cmd.parser.parse_args([]) # type: ignore + session = install_cmd._build_session(options) # type: ignore + assert session is not None + atexit.register(session.close) + return session + + +def populate_options( + install_command=None, # type: TCommandInstance + options=None, # type: Optional[Values] + **kwargs # type: Any +): + # (...) -> Tuple[Dict[str, Any], Values] + results = {} + if install_command is None and options is None: + raise TypeError("Must pass either options or InstallCommand to populate options") + if options is None and install_command is not None: + options, _ = install_command.parser.parse_args([]) # type: ignore + options_dict = options.__dict__ + for provided_key, provided_value in kwargs.items(): + if provided_key == "isolated": + options_key = "isolated_mode" + elif provided_key == "source_dir": + options_key = "src_dir" + else: + options_key = provided_key + if provided_key in options_dict and provided_value is not None: + setattr(options, options_key, provided_value) + results[provided_key] = provided_value + elif getattr(options, options_key, None) is not None: + results[provided_key] = getattr(options, options_key) + else: + results[provided_key] = provided_value + return results, options + + +def get_requirement_set( + install_command=None, # type: Optional[TCommandInstance] + req_set_provider=None, # type: Optional[TShimmedFunc] + build_dir=None, # type: Optional[str] + src_dir=None, # type: Optional[str] + download_dir=None, # type: Optional[str] + wheel_download_dir=None, # type: Optional[str] + session=None, # type: Optional[TSession] + wheel_cache=None, # type: Optional[TWheelCache] + upgrade=False, # type: bool + upgrade_strategy=None, # type: Optional[str] + ignore_installed=False, # type: bool + ignore_dependencies=False, # type: bool + force_reinstall=False, # type: bool + use_user_site=False, # type: bool + isolated=False, # type: bool + ignore_requires_python=False, # type: bool + require_hashes=None, # type: bool + cache_dir=None, # type: Optional[str] + options=None, # type: Optional[Values] + install_cmd_provider=None, # type: Optional[TShimmedFunc] +): + # (...) -> TRequirementSet + """ + Creates a requirement set from the supplied parameters. + + Not all parameters are passed through for all pip versions, but any + invalid parameters will be ignored if they are not needed to generate a + requirement set on the current pip version. + + :param install_command: A :class:`~pip._internal.commands.install.InstallCommand` + instance which is used to generate the finder. + :param :class:`~pip_shims.models.ShimmedPathCollection` req_set_provider: A provider + to build requirement set instances. + :param str build_dir: The directory to build requirements in. Removed in pip 10, + defeaults to None + :param str source_dir: The directory to use for source requirements. Removed in + pip 10, defaults to None + :param str download_dir: The directory to download requirement artifacts to. Removed + in pip 10, defaults to None + :param str wheel_download_dir: The directory to download wheels to. Removed in pip + 10, defaults ot None + :param :class:`~requests.Session` session: The pip session to use. Removed in pip 10, + defaults to None + :param WheelCache wheel_cache: The pip WheelCache instance to use for caching wheels. + Removed in pip 10, defaults to None + :param bool upgrade: Whether to try to upgrade existing requirements. Removed in pip + 10, defaults to False. + :param str upgrade_strategy: The upgrade strategy to use, e.g. "only-if-needed". + Removed in pip 10, defaults to None. + :param bool ignore_installed: Whether to ignore installed packages when resolving. + Removed in pip 10, defaults to False. + :param bool ignore_dependencies: Whether to ignore dependencies of requirements + when resolving. Removed in pip 10, defaults to False. + :param bool force_reinstall: Whether to force reinstall of packages when resolving. + Removed in pip 10, defaults to False. + :param bool use_user_site: Whether to use user site packages when resolving. Removed + in pip 10, defaults to False. + :param bool isolated: Whether to resolve in isolation. Removed in pip 10, defaults + to False. + :param bool ignore_requires_python: Removed in pip 10, defaults to False. + :param bool require_hashes: Whether to require hashes when resolving. Defaults to + False. + :param Values options: An :class:`~optparse.Values` instance from an install cmd + :param install_cmd_provider: A shim for providing new install command instances. + :type install_cmd_provider: :class:`~pip_shims.models.ShimmedPathCollection` + :return: A new requirement set instance + :rtype: :class:`~pip._internal.req.req_set.RequirementSet` + """ + req_set_provider = resolve_possible_shim(req_set_provider) + if install_command is None: + install_cmd_provider = resolve_possible_shim(install_cmd_provider) + assert isinstance(install_cmd_provider, (type, functools.partial)) + install_command = install_cmd_provider() + required_args = inspect.getargs( + req_set_provider.__init__.__code__ + ).args # type: ignore + results, options = populate_options( + install_command, + options, + build_dir=build_dir, + src_dir=src_dir, + download_dir=download_dir, + upgrade=upgrade, + upgrade_strategy=upgrade_strategy, + ignore_installed=ignore_installed, + ignore_dependencies=ignore_dependencies, + force_reinstall=force_reinstall, + use_user_site=use_user_site, + isolated=isolated, + ignore_requires_python=ignore_requires_python, + require_hashes=require_hashes, + cache_dir=cache_dir, + ) + if session is None and "session" in required_args: + session = get_session(install_cmd=install_command, options=options) + results["wheel_cache"] = wheel_cache + results["session"] = session + results["wheel_download_dir"] = wheel_download_dir + return call_function_with_correct_args(req_set_provider, **results) + + +def get_package_finder( + install_cmd=None, # type: Optional[TCommand] + options=None, # type: Optional[Values] + session=None, # type: Optional[TSession] + platform=None, # type: Optional[str] + python_versions=None, # type: Optional[Tuple[str, ...]] + abi=None, # type: Optional[str] + implementation=None, # type: Optional[str] + target_python=None, # type: Optional[Any] + ignore_requires_python=None, # type: Optional[bool] + target_python_builder=None, # type: Optional[TShimmedFunc] + install_cmd_provider=None, # type: Optional[TShimmedFunc] +): + # type: (...) -> TFinder + """Shim for compatibility to generate package finders. + + Build and return a :class:`~pip._internal.index.package_finder.PackageFinder` + instance using the :class:`~pip._internal.commands.install.InstallCommand` helper + method to construct the finder, shimmed with backports as needed for compatibility. + + :param install_cmd_provider: A shim for providing new install command instances. + :type install_cmd_provider: :class:`~pip_shims.models.ShimmedPathCollection` + :param install_cmd: A :class:`~pip._internal.commands.install.InstallCommand` + instance which is used to generate the finder. + :param optparse.Values options: An optional :class:`optparse.Values` instance + generated by calling `install_cmd.parser.parse_args()` typically. + :param session: An optional session instance, can be created by the `install_cmd`. + :param Optional[str] platform: An optional platform string, e.g. linux_x86_64 + :param Optional[Tuple[str, ...]] python_versions: A tuple of 2-digit strings + representing python versions, e.g. ("27", "35", "36", "37"...) + :param Optional[str] abi: The target abi to support, e.g. "cp38" + :param Optional[str] implementation: An optional implementation string for limiting + searches to a specific implementation, e.g. "cp" or "py" + :param target_python: A :class:`~pip._internal.models.target_python.TargetPython` + instance (will be translated to alternate arguments if necessary on incompatible + pip versions). + :param Optional[bool] ignore_requires_python: Whether to ignore `requires_python` + on resulting candidates, only valid after pip version 19.3.1 + :param target_python_builder: A 'TargetPython' builder (e.g. the class itself, + uninstantiated) + :return: A :class:`pip._internal.index.package_finder.PackageFinder` instance + :rtype: :class:`pip._internal.index.package_finder.PackageFinder` + + :Example: + + >>> from pip_shims.shims import InstallCommand, get_package_finder + >>> install_cmd = InstallCommand() + >>> finder = get_package_finder( + ... install_cmd, python_versions=("27", "35", "36", "37", "38"), implementation=" + cp" + ... ) + >>> candidates = finder.find_all_candidates("requests") + >>> requests_222 = next(iter(c for c in candidates if c.version.public == "2.22.0")) + >>> requests_222 + <InstallationCandidate('requests', <Version('2.22.0')>, <Link https://files.pythonhos + ted.org/packages/51/bd/23c926cd341ea6b7dd0b2a00aba99ae0f828be89d72b2190f27c11d4b7fb/r + equests-2.22.0-py2.py3-none-any.whl#sha256=9cf5292fcd0f598c671cfc1e0d7d1a7f13bb8085e9 + a590f48c010551dc6c4b31 (from https://pypi.org/simple/requests/) (requires-python:>=2. + 7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*)>)> + """ + if install_cmd is None: + install_cmd_provider = resolve_possible_shim(install_cmd_provider) + assert isinstance(install_cmd_provider, (type, functools.partial)) + install_cmd = install_cmd_provider() + if options is None: + options, _ = install_cmd.parser.parse_args([]) # type: ignore + if session is None: + session = get_session(install_cmd=install_cmd, options=options) # type: ignore + builder_args = inspect.getargs( + install_cmd._build_package_finder.__code__ + ) # type: ignore + build_kwargs = {"options": options, "session": session} + expects_targetpython = "target_python" in builder_args.args + received_python = any(arg for arg in [platform, python_versions, abi, implementation]) + if expects_targetpython and received_python and not target_python: + if target_python_builder is None: + target_python_builder = TargetPython + py_version_info = None + if python_versions: + py_version_info_python = max(python_versions) + py_version_info = tuple([int(part) for part in py_version_info_python]) + target_python = target_python_builder( + platform=platform, + abi=abi, + implementation=implementation, + py_version_info=py_version_info, + ) + build_kwargs["target_python"] = target_python + elif any( + arg in builder_args.args + for arg in ["platform", "python_versions", "abi", "implementation"] + ): + if target_python and not received_python: + tags = target_python.get_tags() + version_impl = set([t[0] for t in tags]) + # impls = set([v[:2] for v in version_impl]) + # impls.remove("py") + # impl = next(iter(impls), "py") if not target_python + versions = set([v[2:] for v in version_impl]) + build_kwargs.update( + { + "platform": target_python.platform, + "python_versions": versions, + "abi": target_python.abi, + "implementation": target_python.implementation, + } + ) + if ( + ignore_requires_python is not None + and "ignore_requires_python" in builder_args.args + ): + build_kwargs["ignore_requires_python"] = ignore_requires_python + return install_cmd._build_package_finder(**build_kwargs) # type: ignore + + +def shim_unpack( + unpack_fn, # type: TShimmedFunc + download_dir, # type str + ireq=None, # type: Optional[Any] + link=None, # type: Optional[Any] + location=None, # type Optional[str], + hashes=None, # type: Optional[Any] + progress_bar="off", # type: str + only_download=None, # type: Optional[bool] + downloader_provider=None, # type: Optional[TShimmedFunc] + session=None, # type: Optional[Any] +): + # (...) -> None + """ + Accepts all parameters that have been valid to pass + to :func:`pip._internal.download.unpack_url` and selects or + drops parameters as needed before invoking the provided + callable. + + :param unpack_fn: A callable or shim referring to the pip implementation + :type unpack_fn: Callable + :param str download_dir: The directory to download the file to + :param Optional[:class:`~pip._internal.req.req_install.InstallRequirement`] ireq: + an Install Requirement instance, defaults to None + :param Optional[:class:`~pip._internal.models.link.Link`] link: A Link instance, + defaults to None. + :param Optional[str] location: A location or source directory if the target is + a VCS url, defaults to None. + :param Optional[Any] hashes: A Hashes instance, defaults to None + :param str progress_bar: Indicates progress par usage during download, defatuls to + off. + :param Optional[bool] only_download: Whether to skip install, defaults to None. + :param Optional[ShimmedPathCollection] downloader_provider: A downloader class + to instantiate, if applicable. + :param Optional[`~requests.Session`] session: A PipSession instance, defaults to + None. + :return: The result of unpacking the url. + :rtype: None + """ + unpack_fn = resolve_possible_shim(unpack_fn) + downloader_provider = resolve_possible_shim(downloader_provider) + required_args = inspect.getargs(unpack_fn.__code__).args # type: ignore + unpack_kwargs = {"download_dir": download_dir} + if ireq: + if not link and ireq.link: + link = ireq.link + if only_download is None: + only_download = ireq.is_wheel + if hashes is None: + hashes = ireq.hashes(True) + if location is None and getattr(ireq, "source_dir", None): + location = ireq.source_dir + unpack_kwargs.update({"link": link, "location": location}) + if hashes is not None and "hashes" in required_args: + unpack_kwargs["hashes"] = hashes + if "progress_bar" in required_args: + unpack_kwargs["progress_bar"] = progress_bar + if only_download is not None and "only_download" in required_args: + unpack_kwargs["only_download"] = only_download + if session is not None and "session" in required_args: + unpack_kwargs["session"] = session + if "downloader" in required_args and downloader_provider is not None: + assert session is not None + assert progress_bar is not None + unpack_kwargs["downloader"] = downloader_provider(session, progress_bar) + return unpack_fn(**unpack_kwargs) # type: ignore + + +def _ensure_finder( + finder=None, # type: Optional[TFinder] + finder_provider=None, # type: Optional[Callable] + install_cmd=None, # type: Optional[TCommandInstance] + options=None, # type: Optional[Values] + session=None, # type: Optional[TSession] +): + if not any([finder, finder_provider, install_cmd]): + raise TypeError( + "RequirementPreparer requires a packagefinder but no InstallCommand" + " was provided to build one and none was passed in." + ) + if finder is not None: + return finder + else: + if session is None: + session = get_session(install_cmd=install_cmd, options=options) + if finder_provider is not None and options is not None: + finder_provider(options=options, session=session) + else: + finder = get_package_finder(install_cmd, options=options, session=session) + return finder + + +@contextlib.contextmanager +def make_preparer( + preparer_fn, # type: TShimmedFunc + req_tracker_fn=None, # type: Optional[TShimmedFunc] + build_dir=None, # type: Optional[str] + src_dir=None, # type: Optional[str] + download_dir=None, # type: Optional[str] + wheel_download_dir=None, # type: Optional[str] + progress_bar="off", # type: str + build_isolation=False, # type: bool + session=None, # type: Optional[TSession] + finder=None, # type: Optional[TFinder] + options=None, # type: Optional[Values] + require_hashes=None, # type: Optional[bool] + use_user_site=None, # type: Optional[bool] + req_tracker=None, # type: Optional[Union[TReqTracker, TShimmedFunc]] + install_cmd_provider=None, # type: Optional[TShimmedFunc] + downloader_provider=None, # type: Optional[TShimmedFunc] + install_cmd=None, # type: Optional[TCommandInstance] + finder_provider=None, # type: Optional[TShimmedFunc] +): + # (...) -> ContextManager + """ + Creates a requirement preparer for preparing pip requirements. + + Provides a compatibilty shim that accepts all previously valid arguments and + discards any that are no longer used. + + :raises TypeError: No requirement tracker provided and one cannot be generated + :raises TypeError: No valid sessions provided and one cannot be generated + :raises TypeError: No valid finders provided and one cannot be generated + :param TShimmedFunc preparer_fn: Callable or shim for generating preparers. + :param Optional[TShimmedFunc] req_tracker_fn: Callable or shim for generating + requirement trackers, defualts to None + :param Optional[str] build_dir: Directory for building packages and wheels, + defaults to None + :param Optional[str] src_dir: Directory to find or extract source files, defaults + to None + :param Optional[str] download_dir: Target directory to download files, defaults to + None + :param Optional[str] wheel_download_dir: Target directoryto download wheels, defaults + to None + :param str progress_bar: Whether to display a progress bar, defaults to off + :param bool build_isolation: Whether to build requirements in isolation, defaults + to False + :param Optional[TSession] session: Existing session to use for getting requirements, + defaults to None + :param Optional[TFinder] finder: The package finder to use during resolution, + defaults to None + :param Optional[Values] options: Pip options to use if needed, defaults to None + :param Optional[bool] require_hashes: Whether to require hashes for preparation + :param Optional[bool] use_user_site: Whether to use the user site directory for + preparing requirements + :param Optional[Union[TReqTracker, TShimmedFunc]] req_tracker: The requirement + tracker to use for building packages, defaults to None + :param Optional[TShimmedFunc] downloader_provider: A downloader provider + :param Optional[TCommandInstance] install_cmd: The install command used to create + the finder, session, and options if needed, defaults to None + :param Optional[TShimmedFunc] finder_provider: A package finder provider + :yield: A new requirement preparer instance + :rtype: ContextManager[:class:`~pip._internal.operations.prepare.RequirementPreparer`] + + :Example: + + >>> from pip_shims.shims import ( + ... InstallCommand, get_package_finder, make_preparer, get_requirement_tracker + ... ) + >>> install_cmd = InstallCommand() + >>> pip_options, _ = install_cmd.parser.parse_args([]) + >>> session = install_cmd._build_session(pip_options) + >>> finder = get_package_finder( + ... install_cmd, session=session, options=pip_options + ... ) + >>> with make_preparer( + ... options=pip_options, finder=finder, session=session, install_cmd=ic + ... ) as preparer: + ... print(preparer) + <pip._internal.operations.prepare.RequirementPreparer object at 0x7f8a2734be80> + """ + preparer_fn = resolve_possible_shim(preparer_fn) + downloader_provider = resolve_possible_shim(downloader_provider) + finder_provider = resolve_possible_shim(finder_provider) + required_args = inspect.getargs(preparer_fn.__init__.__code__).args # type: ignore + if not req_tracker and not req_tracker_fn and "req_tracker" in required_args: + raise TypeError("No requirement tracker and no req tracker generator found!") + if "downloader" in required_args and not downloader_provider: + raise TypeError("no downloader provided, but one is required to continue!") + req_tracker_fn = resolve_possible_shim(req_tracker_fn) + pip_options_created = options is None + session_is_required = "session" in required_args + finder_is_required = "finder" in required_args + downloader_is_required = "downloader" in required_args + options_map = { + "src_dir": src_dir, + "download_dir": download_dir, + "wheel_download_dir": wheel_download_dir, + "build_dir": build_dir, + "progress_bar": progress_bar, + "build_isolation": build_isolation, + "require_hashes": require_hashes, + "use_user_site": use_user_site, + } + if install_cmd is None: + assert install_cmd_provider is not None + install_cmd_provider = resolve_possible_shim(install_cmd_provider) + assert isinstance(install_cmd_provider, (type, functools.partial)) + install_cmd = install_cmd_provider() + preparer_args, options = populate_options(install_cmd, options, **options_map) + if options is not None and pip_options_created: + for k, v in options_map.items(): + suppress_setattr(options, k, v, filter_none=True) + if all([session is None, install_cmd is None, session_is_required]): + raise TypeError( + "Preparer requires a session instance which was not supplied and cannot be " + "created without an InstallCommand." + ) + elif all([session is None, session_is_required]): + session = get_session(install_cmd=install_cmd, options=options) + preparer_args["session"] = session + if finder_is_required: + finder = _ensure_finder( + finder=finder, + finder_provider=finder_provider, + install_cmd=install_cmd, + options=options, + session=session, + ) + preparer_args["finder"] = finder + if downloader_is_required: + preparer_args["downloader"] = downloader_provider(session, progress_bar) + req_tracker_fn = nullcontext if not req_tracker_fn else req_tracker_fn + with req_tracker_fn() as tracker_ctx: + if "req_tracker" in required_args: + req_tracker = tracker_ctx if req_tracker is None else req_tracker + preparer_args["req_tracker"] = req_tracker + + result = call_function_with_correct_args(preparer_fn, **preparer_args) + yield result + + +def get_resolver( + resolver_fn, # type: TShimmedFunc + install_req_provider=None, # type: Optional[TShimmedFunc] + format_control_provider=None, # type: Optional[TShimmedFunc] + wheel_cache_provider=None, # type: Optional[TShimmedFunc] + finder=None, # type: Optional[TFinder] + upgrade_strategy="to-satisfy-only", # type: str + force_reinstall=None, # type: Optional[bool] + ignore_dependencies=None, # type: Optional[bool] + ignore_requires_python=None, # type: Optional[bool] + ignore_installed=True, # type: bool + use_user_site=False, # type: bool + isolated=None, # type: Optional[bool] + wheel_cache=None, # type: Optional[TWheelCache] + preparer=None, # type: Optional[TPreparer] + session=None, # type: Optional[TSession] + options=None, # type: Optional[Values] + make_install_req=None, # type: Optional[Callable] + install_cmd_provider=None, # type: Optional[TShimmedFunc] + install_cmd=None, # type: Optional[TCommandInstance] +): + # (...) -> TResolver + """ + A resolver creation compatibility shim for generating a resolver. + + Consumes any argument that was previously used to instantiate a + resolver, discards anything that is no longer valid. + + .. note:: This is only valid for **pip >= 10.0.0** + + :raises ValueError: A session is required but not provided and one cannot be created + :raises ValueError: A finder is required but not provided and one cannot be created + :raises ValueError: An install requirement provider is required and has not been + provided + :param TShimmedFunc resolver_fn: The resolver function used to create new resolver + instances. + :param TShimmedFunc install_req_provider: The provider function to use to generate + install requirements if needed. + :param TShimmedFunc format_control_provider: The provider function to use to generate + a format_control instance if needed. + :param TShimmedFunc wheel_cache_provider: The provider function to use to generate + a wheel cache if needed. + :param Optional[TFinder] finder: The package finder to use during resolution, + defaults to None. + :param str upgrade_strategy: Upgrade strategy to use, defaults to ``only-if-needed``. + :param Optional[bool] force_reinstall: Whether to simulate or assume package + reinstallation during resolution, defaults to None + :param Optional[bool] ignore_dependencies: Whether to ignore package dependencies, + defaults to None + :param Optional[bool] ignore_requires_python: Whether to ignore indicated + required_python versions on packages, defaults to None + :param bool ignore_installed: Whether to ignore installed packages during resolution, + defaults to True + :param bool use_user_site: Whether to use the user site location during resolution, + defaults to False + :param Optional[bool] isolated: Whether to isolate the resolution process, defaults + to None + :param Optional[TWheelCache] wheel_cache: The wheel cache to use, defaults to None + :param Optional[TPreparer] preparer: The requirement preparer to use, defaults to + None + :param Optional[TSession] session: Existing session to use for getting requirements, + defaults to None + :param Optional[Values] options: Pip options to use if needed, defaults to None + :param Optional[functools.partial] make_install_req: The partial function to pass in + to the resolver for actually generating install requirements, if necessary + :param Optional[TCommandInstance] install_cmd: The install command used to create + the finder, session, and options if needed, defaults to None. + :return: A new resolver instance. + :rtype: :class:`~pip._internal.legacy_resolve.Resolver` + + :Example: + + >>> import os + >>> from tempdir import TemporaryDirectory + >>> from pip_shims.shims import ( + ... InstallCommand, get_package_finder, make_preparer, get_requirement_tracker, + ... get_resolver, InstallRequirement, RequirementSet + ... ) + >>> install_cmd = InstallCommand() + >>> pip_options, _ = install_cmd.parser.parse_args([]) + >>> session = install_cmd._build_session(pip_options) + >>> finder = get_package_finder( + ... install_cmd, session=session, options=pip_options + ... ) + >>> wheel_cache = WheelCache(USER_CACHE_DIR, FormatControl(None, None)) + >>> with TemporaryDirectory() as temp_base: + ... reqset = RequirementSet() + ... ireq = InstallRequirement.from_line("requests") + ... ireq.is_direct = True + ... build_dir = os.path.join(temp_base, "build") + ... src_dir = os.path.join(temp_base, "src") + ... ireq.build_location(build_dir) + ... with make_preparer( + ... options=pip_options, finder=finder, session=session, + ... build_dir=build_dir, install_cmd=install_cmd, + ... ) as preparer: + ... resolver = get_resolver( + ... finder=finder, ignore_dependencies=False, ignore_requires_python=True, + ... preparer=preparer, session=session, options=pip_options, + ... install_cmd=install_cmd, wheel_cache=wheel_cache, + ... ) + ... resolver.require_hashes = False + ... reqset.add_requirement(ireq) + ... results = resolver.resolve(reqset) + ... #reqset.cleanup_files() + ... for result_req in reqset.requirements: + ... print(result_req) + requests + chardet + certifi + urllib3 + idna + """ + resolver_fn = resolve_possible_shim(resolver_fn) + install_req_provider = resolve_possible_shim(install_req_provider) + format_control_provider = resolve_possible_shim(format_control_provider) + wheel_cache_provider = resolve_possible_shim(wheel_cache_provider) + install_cmd_provider = resolve_possible_shim(install_cmd_provider) + required_args = inspect.getargs(resolver_fn.__init__.__code__).args # type: ignore + install_cmd_dependency_map = {"session": session, "finder": finder} + resolver_kwargs = {} # type: Dict[str, Any] + if install_cmd is None: + assert isinstance(install_cmd_provider, (type, functools.partial)) + install_cmd = install_cmd_provider() + if options is None and install_cmd is not None: + options, _ = install_cmd.parser.parse_args([]) # type: ignore + for arg, val in install_cmd_dependency_map.items(): + if arg not in required_args: + continue + elif val is None and install_cmd is None: + raise TypeError( + "Preparer requires a {0} but did not receive one " + "and cannot generate one".format(arg) + ) + elif arg == "session" and val is None: + val = get_session(install_cmd=install_cmd, options=options) + elif arg == "finder" and val is None: + val = get_package_finder(install_cmd, options=options, session=session) + resolver_kwargs[arg] = val + if "make_install_req" in required_args: + if make_install_req is None and install_req_provider is not None: + make_install_req = functools.partial( + install_req_provider, + isolated=isolated, + wheel_cache=wheel_cache, + # use_pep517=use_pep517, + ) + assert make_install_req is not None + resolver_kwargs["make_install_req"] = make_install_req + if "isolated" in required_args: + resolver_kwargs["isolated"] = isolated + if "wheel_cache" in required_args: + if wheel_cache is None and wheel_cache_provider is not None: + cache_dir = getattr(options, "cache_dir", None) + format_control = getattr( + options, + "format_control", + format_control_provider(None, None), # type: ignore + ) + wheel_cache = wheel_cache_provider(cache_dir, format_control) + resolver_kwargs["wheel_cache"] = wheel_cache + resolver_kwargs.update( + { + "upgrade_strategy": upgrade_strategy, + "force_reinstall": force_reinstall, + "ignore_dependencies": ignore_dependencies, + "ignore_requires_python": ignore_requires_python, + "ignore_installed": ignore_installed, + "use_user_site": use_user_site, + "preparer": preparer, + } + ) + return resolver_fn(**resolver_kwargs) # type: ignore + + +def resolve( # noqa:C901 + ireq, # type: TInstallRequirement + reqset_provider=None, # type: Optional[TShimmedFunc] + req_tracker_provider=None, # type: Optional[TShimmedFunc] + install_cmd_provider=None, # type: Optional[TShimmedFunc] + install_command=None, # type: Optional[TCommand] + finder_provider=None, # type: Optional[TShimmedFunc] + resolver_provider=None, # type: Optional[TShimmedFunc] + wheel_cache_provider=None, # type: Optional[TShimmedFunc] + format_control_provider=None, # type: Optional[TShimmedFunc] + make_preparer_provider=None, # type: Optional[TShimmedFunc] + tempdir_manager_provider=None, # type: Optional[TShimmedFunc] + options=None, # type: Optional[Values] + session=None, # type: Optional[TSession] + resolver=None, # type: Optional[TResolver] + finder=None, # type: Optional[TFinder] + upgrade_strategy="to-satisfy-only", # type: str + force_reinstall=None, # type: Optional[bool] + ignore_dependencies=None, # type: Optional[bool] + ignore_requires_python=None, # type: Optional[bool] + ignore_installed=True, # type: bool + use_user_site=False, # type: bool + isolated=None, # type: Optional[bool] + build_dir=None, # type: Optional[str] + source_dir=None, # type: Optional[str] + download_dir=None, # type: Optional[str] + cache_dir=None, # type: Optional[str] + wheel_download_dir=None, # type: Optional[str] + wheel_cache=None, # type: Optional[TWheelCache] + require_hashes=None, # type: bool + check_supported_wheels=True, # type: bool +): + # (...) -> Set[TInstallRequirement] + """ + Resolves the provided **InstallRequirement**, returning a dictionary. + + Maps a dictionary of names to corresponding ``InstallRequirement`` values. + + :param :class:`~pip._internal.req.req_install.InstallRequirement` ireq: An + InstallRequirement to initiate the resolution process + :param :class:`~pip_shims.models.ShimmedPathCollection` reqset_provider: A provider + to build requirement set instances. + :param :class:`~pip_shims.models.ShimmedPathCollection` req_tracker_provider: A + provider to build requirement tracker instances + :param install_cmd_provider: A shim for providing new install command instances. + :type install_cmd_provider: :class:`~pip_shims.models.ShimmedPathCollection` + :param Optional[TCommandInstance] install_command: The install command used to + create the finder, session, and options if needed, defaults to None. + :param :class:`~pip_shims.models.ShimmedPathCollection` finder_provider: A provider + to package finder instances. + :param :class:`~pip_shims.models.ShimmedPathCollection` resolver_provider: A provider + to build resolver instances + :param TShimmedFunc wheel_cache_provider: The provider function to use to generate a + wheel cache if needed. + :param TShimmedFunc format_control_provider: The provider function to use to generate + a format_control instance if needed. + :param TShimmedFunc make_preparer_provider: Callable or shim for generating preparers. + :param Optional[TShimmedFunc] tempdir_manager_provider: Shim for generating tempdir + manager for pip temporary directories + :param Optional[Values] options: Pip options to use if needed, defaults to None + :param Optional[TSession] session: Existing session to use for getting requirements, + defaults to None + :param :class:`~pip._internal.legacy_resolve.Resolver` resolver: A pre-existing + resolver instance to use for resolution + :param Optional[TFinder] finder: The package finder to use during resolution, + defaults to None. + :param str upgrade_strategy: Upgrade strategy to use, defaults to ``only-if-needed``. + :param Optional[bool] force_reinstall: Whether to simulate or assume package + reinstallation during resolution, defaults to None + :param Optional[bool] ignore_dependencies: Whether to ignore package dependencies, + defaults to None + :param Optional[bool] ignore_requires_python: Whether to ignore indicated + required_python versions on packages, defaults to None + :param bool ignore_installed: Whether to ignore installed packages during + resolution, defaults to True + :param bool use_user_site: Whether to use the user site location during resolution, + defaults to False + :param Optional[bool] isolated: Whether to isolate the resolution process, defaults + to None + :param Optional[str] build_dir: Directory for building packages and wheels, defaults + to None + :param str source_dir: The directory to use for source requirements. Removed in pip + 10, defaults to None + :param Optional[str] download_dir: Target directory to download files, defaults to + None + :param str cache_dir: The cache directory to use for caching artifacts during + resolution + :param Optional[str] wheel_download_dir: Target directoryto download wheels, defaults + to None + :param Optional[TWheelCache] wheel_cache: The wheel cache to use, defaults to None + :param bool require_hashes: Whether to require hashes when resolving. Defaults to + False. + :param bool check_supported_wheels: Whether to check support of wheels before including + them in resolution. + :return: A dictionary mapping requirements to corresponding + :class:`~pip._internal.req.req_install.InstallRequirement`s + :rtype: :class:`~pip._internal.req.req_install.InstallRequirement` + + :Example: + + >>> from pip_shims.shims import resolve, InstallRequirement + >>> ireq = InstallRequirement.from_line("requests>=2.20") + >>> results = resolve(ireq) + >>> for k, v in results.items(): + ... print("{0}: {1!r}".format(k, v)) + requests: <InstallRequirement object: requests>=2.20 from https://files.pythonhosted. + org/packages/51/bd/23c926cd341ea6b7dd0b2a00aba99ae0f828be89d72b2190f27c11d4b7fb/reque + sts-2.22.0-py2.py3-none-any.whl#sha256=9cf5292fcd0f598c671cfc1e0d7d1a7f13bb8085e9a590 + f48c010551dc6c4b31 editable=False> + idna: <InstallRequirement object: idna<2.9,>=2.5 from https://files.pythonhosted.org/ + packages/14/2c/cd551d81dbe15200be1cf41cd03869a46fe7226e7450af7a6545bfc474c9/idna-2.8- + py2.py3-none-any.whl#sha256=ea8b7f6188e6fa117537c3df7da9fc686d485087abf6ac197f9c46432 + f7e4a3c (from requests>=2.20) editable=False> + urllib3: <InstallRequirement object: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 from htt + ps://files.pythonhosted.org/packages/b4/40/a9837291310ee1ccc242ceb6ebfd9eb21539649f19 + 3a7c8c86ba15b98539/urllib3-1.25.7-py2.py3-none-any.whl#sha256=a8a318824cc77d1fd4b2bec + 2ded92646630d7fe8619497b142c84a9e6f5a7293 (from requests>=2.20) editable=False> + chardet: <InstallRequirement object: chardet<3.1.0,>=3.0.2 from https://files.pythonh + osted.org/packages/bc/a9/01ffebfb562e4274b6487b4bb1ddec7ca55ec7510b22e4c51f14098443b8 + /chardet-3.0.4-py2.py3-none-any.whl#sha256=fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed + 4531e3e15460124c106691 (from requests>=2.20) editable=False> + certifi: <InstallRequirement object: certifi>=2017.4.17 from https://files.pythonhost + ed.org/packages/18/b0/8146a4f8dd402f60744fa380bc73ca47303cccf8b9190fd16a827281eac2/ce + rtifi-2019.9.11-py2.py3-none-any.whl#sha256=fd7c7c74727ddcf00e9acd26bba8da604ffec95bf + 1c2144e67aff7a8b50e6cef (from requests>=2.20) editable=False> + """ + reqset_provider = resolve_possible_shim(reqset_provider) + finder_provider = resolve_possible_shim(finder_provider) + resolver_provider = resolve_possible_shim(resolver_provider) + wheel_cache_provider = resolve_possible_shim(wheel_cache_provider) + format_control_provider = resolve_possible_shim(format_control_provider) + make_preparer_provider = resolve_possible_shim(make_preparer_provider) + req_tracker_provider = resolve_possible_shim(req_tracker_provider) + install_cmd_provider = resolve_possible_shim(install_cmd_provider) + tempdir_manager_provider = resolve_possible_shim(tempdir_manager_provider) + if install_command is None: + assert isinstance(install_cmd_provider, (type, functools.partial)) + install_command = install_cmd_provider() + kwarg_map = { + "upgrade_strategy": upgrade_strategy, + "force_reinstall": force_reinstall, + "ignore_dependencies": ignore_dependencies, + "ignore_requires_python": ignore_requires_python, + "ignore_installed": ignore_installed, + "use_user_site": use_user_site, + "isolated": isolated, + "build_dir": build_dir, + "src_dir": source_dir, + "download_dir": download_dir, + "require_hashes": require_hashes, + "cache_dir": cache_dir, + } + kwargs, options = populate_options(install_command, options, **kwarg_map) + with ExitStack() as ctx: + ctx.enter_context(tempdir_manager_provider()) + kwargs = ctx.enter_context( + ensure_resolution_dirs(wheel_download_dir=wheel_download_dir, **kwargs) + ) + wheel_download_dir = kwargs.pop("wheel_download_dir") + if session is None: + session = get_session(install_cmd=install_command, options=options) + if finder is None: + finder = finder_provider(install_command, options=options, session=session) # type: ignore + format_control = getattr(options, "format_control", None) + if not format_control: + format_control = format_control_provider(None, None) # type: ignore + wheel_cache = wheel_cache_provider( + kwargs["cache_dir"], format_control + ) # type: ignore + ireq.is_direct = True # type: ignore + build_location_kwargs = {"build_dir": kwargs["build_dir"], "autodelete": True} + call_function_with_correct_args(ireq.build_location, **build_location_kwargs) + # ireq.build_location(kwargs["build_dir"]) # type: ignore + if reqset_provider is None: + raise TypeError( + "cannot resolve without a requirement set provider... failed!" + ) + reqset = reqset_provider(install_command, options=options, session=session, wheel_download_dir=wheel_download_dir, **kwargs) # type: ignore + if getattr(reqset, "prepare_files", None): + reqset.add_requirement(ireq) + results = reqset.prepare_files(finder) + result = reqset.requirements + reqset.cleanup_files() + return result + if make_preparer_provider is None: + raise TypeError("Cannot create requirement preparer, cannot resolve!") + + preparer_args = { + "build_dir": kwargs["build_dir"], + "src_dir": kwargs["src_dir"], + "download_dir": kwargs["download_dir"], + "wheel_download_dir": wheel_download_dir, + "build_isolation": kwargs["isolated"], + "install_cmd": install_command, + "options": options, + "finder": finder, + "session": session, + "use_user_site": use_user_site, + "require_hashes": require_hashes, + } + # with req_tracker_provider() as req_tracker: + if isinstance(req_tracker_provider, (types.FunctionType, functools.partial)): + preparer_args["req_tracker"] = ctx.enter_context(req_tracker_provider()) + resolver_keys = [ + "upgrade_strategy", + "force_reinstall", + "ignore_dependencies", + "ignore_installed", + "use_user_site", + "isolated", + "use_user_site", + ] + resolver_args = {key: kwargs[key] for key in resolver_keys if key in kwargs} + if resolver_provider is None: + raise TypeError("Cannot resolve without a resolver provider... failed!") + preparer = ctx.enter_context(make_preparer_provider(**preparer_args)) + resolver = resolver_provider( + finder=finder, + preparer=preparer, + session=session, + options=options, + install_cmd=install_command, + wheel_cache=wheel_cache, + **resolver_args + ) # type: ignore + resolver.require_hashes = kwargs.get("require_hashes", False) # type: ignore + _, required_resolver_args = get_method_args(resolver.resolve) + resolver_args = [] + if "requirement_set" in required_resolver_args.args: + reqset.add_requirement(ireq) + resolver_args.append(reqset) + elif "root_reqs" in required_resolver_args.args: + resolver_args.append([ireq]) + if "check_supported_wheels" in required_resolver_args.args: + resolver_args.append(check_supported_wheels) + result_reqset = resolver.resolve(*resolver_args) # type: ignore + if result_reqset is None: + result_reqset = reqset + results = result_reqset.requirements + cleanup_fn = getattr(reqset, "cleanup_files", None) + if cleanup_fn is not None: + cleanup_fn() + return results + + +def build_wheel( + req=None, # type: Optional[TInstallRequirement] + reqset=None, # type: Optional[Union[TReqSet, Iterable[TInstallRequirement]]] + output_dir=None, # type: Optional[str] + preparer=None, # type: Optional[TPreparer] + wheel_cache=None, # type: Optional[TWheelCache] + build_options=None, # type: Optional[List[str]] + global_options=None, # type: Optional[List[str]] + check_binary_allowed=None, # type: Optional[Callable[TInstallRequirement, bool]] + no_clean=False, # type: bool + session=None, # type: Optional[TSession] + finder=None, # type: Optional[TFinder] + install_command=None, # type: Optional[TCommand] + req_tracker=None, # type: Optional[TReqTracker] + build_dir=None, # type: Optional[str] + src_dir=None, # type: Optional[str] + download_dir=None, # type: Optional[str] + wheel_download_dir=None, # type: Optional[str] + cache_dir=None, # type: Optional[str] + use_user_site=False, # type: bool + use_pep517=None, # type: Optional[bool] + format_control_provider=None, # type: Optional[TShimmedFunc] + wheel_cache_provider=None, # type: Optional[TShimmedFunc] + preparer_provider=None, # type: Optional[TShimmedFunc] + wheel_builder_provider=None, # type: Optional[TShimmedFunc] + build_one_provider=None, # type: Optional[TShimmedFunc] + build_one_inside_env_provider=None, # type: Optional[TShimmedFunc] + build_many_provider=None, # type: Optional[TShimmedFunc] + install_command_provider=None, # type: Optional[TShimmedFunc] + finder_provider=None, # type: Optional[TShimmedFunc] +): + # type: (...) -> Optional[Union[str, Tuple[List[TInstallRequirement], List[TInstallRequirement]]]] + """ + Build a wheel or a set of wheels + + :raises TypeError: Raised when no requirements are provided + :param Optional[TInstallRequirement] req: An `InstallRequirement` to build + :param Optional[TReqSet] reqset: A `RequirementSet` instance (`pip<10`) or an + iterable of `InstallRequirement` instances (`pip>=10`) to build + :param Optional[str] output_dir: Target output directory, only useful when building + one wheel using pip>=20.0 + :param Optional[TPreparer] preparer: A preparer instance, defaults to None + :param Optional[TWheelCache] wheel_cache: A wheel cache instance, defaults to None + :param Optional[List[str]] build_options: A list of build options to pass in + :param Optional[List[str]] global_options: A list of global options to pass in + :param Optional[Callable[TInstallRequirement, bool]] check_binary_allowed: A callable + to check whether we are allowed to build and cache wheels for an ireq + :param bool no_clean: Whether to avoid cleaning up wheels + :param Optional[TSession] session: A `PipSession` instance to pass to create a + `finder` if necessary + :param Optional[TFinder] finder: A `PackageFinder` instance to use for generating a + `WheelBuilder` instance on `pip<20` + :param Optional[TCommandInstance] install_command: The install command used to + create the finder, session, and options if needed, defaults to None. + :param Optional[TReqTracker] req_tracker: An optional requirement tracker instance, + if one already exists + :param Optional[str] build_dir: Passthrough parameter for building preparer + :param Optional[str] src_dir: Passthrough parameter for building preparer + :param Optional[str] download_dir: Passthrough parameter for building preparer + :param Optional[str] wheel_download_dir: Passthrough parameter for building preparer + :param Optional[str] cache_dir: Passthrough cache directory for wheel cache options + :param bool use_user_site: Whether to use the user site directory when preparing + install requirements on `pip<20` + :param Optional[bool] use_pep517: When set to *True* or *False*, prefers building + with or without pep517 as specified, otherwise uses requirement preference. + Only works for single requirements. + :param Optional[TShimmedFunc] format_control_provider: A provider for the + `FormatControl` class + :param Optional[TShimmedFunc] wheel_cache_provider: A provider for the `WheelCache` + class + :param Optional[TShimmedFunc] preparer_provider: A provider for the + `RequirementPreparer` class + :param Optional[TShimmedFunc] wheel_builder_provider: A provider for the + `WheelBuilder` class, if it exists + :param Optional[TShimmedFunc] build_one_provider: A provider for the `_build_one` + function, if it exists + :param Optional[TShimmedFunc] build_one_inside_env_provider: A provider for the + `_build_one_inside_env` function, if it exists + :param Optional[TShimmedFunc] build_many_provider: A provider for the `build` + function, if it exists + :param Optional[TShimmedFunc] install_command_provider: A shim for providing new + install command instances + :param TShimmedFunc finder_provider: A provider to package finder instances + :return: A tuple of successful and failed install requirements or else a path to + a wheel + :rtype: Optional[Union[str, Tuple[List[TInstallRequirement], List[TInstallRequirement]]]] + """ + wheel_cache_provider = resolve_possible_shim(wheel_cache_provider) + preparer = resolve_possible_shim(preparer) + wheel_builder_provider = resolve_possible_shim(wheel_builder_provider) + build_one_provider = resolve_possible_shim(build_one_provider) + build_one_inside_env_provider = resolve_possible_shim(build_one_inside_env_provider) + build_many_provider = resolve_possible_shim(build_many_provider) + install_cmd_provider = resolve_possible_shim(install_command_provider) + format_control_provider = resolve_possible_shim(format_control_provider) + finder_provider = resolve_possible_shim(finder_provider) + global_options = [] if global_options is None else global_options + build_options = [] if build_options is None else build_options + options = None + kwarg_map = { + "cache_dir": cache_dir, + "src_dir": src_dir, + "download_dir": download_dir, + "wheel_download_dir": wheel_download_dir, + "build_dir": build_dir, + "use_user_site": use_user_site, + } + if not req and not reqset: + raise TypeError("Must provide either a requirement or requirement set to build") + if wheel_cache is None and (reqset is not None or output_dir is None): + if install_command is None: + assert isinstance(install_cmd_provider, (type, functools.partial)) + install_command = install_cmd_provider() + kwargs, options = populate_options(install_command, options, **kwarg_map) + format_control = getattr(options, "format_control", None) + if not format_control: + format_control = format_control_provider(None, None) # type: ignore + wheel_cache = wheel_cache_provider(options.cache_dir, format_control) + if req and not reqset and not output_dir: + output_dir = wheel_cache.get_path_for_link(req.link) + if not reqset and build_one_provider: + yield build_one_provider(req, output_dir, build_options, global_options) + elif build_many_provider: + yield build_many_provider( + reqset, wheel_cache, build_options, global_options, check_binary_allowed + ) + else: + with ExitStack() as ctx: + if session is None and finder is None: + session = get_session(install_cmd=install_command, options=options) + finder = finder_provider( + install_command, options=options, session=session + ) + if preparer is None: + preparer_kwargs = { + "build_dir": kwargs["build_dir"], + "src_dir": kwargs["src_dir"], + "download_dir": kwargs["download_dir"], + "wheel_download_dir": kwargs["wheel_download_dir"], + "finder": finder, + "session": session + if session + else get_session(install_cmd=install_command, options=options), + "install_cmd": install_command, + "options": options, + "use_user_site": use_user_site, + "req_tracker": req_tracker, + } + preparer = ctx.enter_context(preparer_provider(**preparer_kwargs)) + check_bin = check_binary_allowed if check_binary_allowed else lambda x: True + builder_kwargs = { + "requirement_set": reqset, + "finder": finder, + "preparer": preparer, + "wheel_cache": wheel_cache, + "no_clean": no_clean, + "build_options": build_options, + "global_options": global_options, + "check_binary_allowed": check_bin, + } + builder = call_function_with_correct_args( + wheel_builder_provider, **builder_kwargs + ) + if req and not reqset: + if not output_dir: + output_dir = wheel_cache.get_path_for_link(req.link) + if use_pep517 is not None: + req.use_pep517 = use_pep517 + yield builder._build_one(req, output_dir) + else: + yield builder.build(reqset) diff --git a/pipenv/vendor/pip_shims/environment.py b/pipenv/vendor/pip_shims/environment.py index 1fa7df4537..2268ea26f5 100644 --- a/pipenv/vendor/pip_shims/environment.py +++ b/pipenv/vendor/pip_shims/environment.py @@ -1,4 +1,9 @@ # -*- coding=utf-8 -*- +""" +Module with functionality to learn about the environment. +""" +from __future__ import absolute_import + import importlib import os diff --git a/pipenv/vendor/pip_shims/models.py b/pipenv/vendor/pip_shims/models.py index f3fc06426d..d641c6dd44 100644 --- a/pipenv/vendor/pip_shims/models.py +++ b/pipenv/vendor/pip_shims/models.py @@ -1,4 +1,7 @@ # -*- coding: utf-8 -*- +""" +Helper module for shimming functionality across pip versions. +""" from __future__ import absolute_import, print_function import collections @@ -12,7 +15,7 @@ import six -from . import backports +from . import compat from .environment import BASE_IMPORT_PATH, MYPY_RUNNING, get_pip_version from .utils import ( add_mixin_to_class, @@ -801,7 +804,7 @@ def import_pip(): InstallCommand = ShimmedPathCollection("InstallCommand", ImportTypes.CLASS) InstallCommand.pre_shim( - functools.partial(backports.partial_command, cmd_mapping=commands_dict) + functools.partial(compat.partial_command, cmd_mapping=commands_dict) ) InstallCommand.create_path("commands.install.InstallCommand", "7.0.0", "9999") @@ -911,12 +914,19 @@ def import_pip(): is_file_url.set_default(fallback_is_file_url) is_file_url.create_path("download.is_file_url", "7.0.0", "19.2.3") +Downloader = ShimmedPathCollection("Downloader", ImportTypes.CLASS) +Downloader.create_path("network.download.Downloader", "19.3.9", "9999") + unpack_url = ShimmedPathCollection("unpack_url", ImportTypes.FUNCTION) unpack_url.create_path("download.unpack_url", "7.0.0", "19.3.9") unpack_url.create_path("operations.prepare.unpack_url", "20.0", "9999") shim_unpack = ShimmedPathCollection("shim_unpack", ImportTypes.FUNCTION) -shim_unpack.set_default(functools.partial(backports.shim_unpack, unpack_fn=unpack_url)) +shim_unpack.set_default( + functools.partial( + compat.shim_unpack, unpack_fn=unpack_url, downloader_provider=Downloader + ) +) is_installable_dir = ShimmedPathCollection("is_installable_dir", ImportTypes.FUNCTION) is_installable_dir.create_path("utils.misc.is_installable_dir", "10.0.0", "9999") @@ -930,7 +940,10 @@ def import_pip(): make_abstract_dist = ShimmedPathCollection("make_abstract_dist", ImportTypes.FUNCTION) make_abstract_dist.create_path( - "distributions.make_distribution_for_install_requirement", "19.1.2", "9999" + "distributions.make_distribution_for_install_requirement", "20.0.0", "9999" +) +make_abstract_dist.create_path( + "distributions.make_distribution_for_install_requirement", "19.1.2", "19.3.9" ) make_abstract_dist.create_path( "operations.prepare.make_abstract_dist", "10.0.0", "19.1.1" @@ -938,10 +951,13 @@ def import_pip(): make_abstract_dist.create_path("req.req_set.make_abstract_dist", "7.0.0", "9.0.3") make_distribution_for_install_requirement = ShimmedPathCollection( - "make_distribution_for_install_requirement", ImportTypes.CLASS + "make_distribution_for_install_requirement", ImportTypes.FUNCTION ) make_distribution_for_install_requirement.create_path( - "distributions.make_distribution_for_install_requirement", "19.1.2", "9999" + "distributions.make_distribution_for_install_requirement", "20.0.0", "9999" +) +make_distribution_for_install_requirement.create_path( + "distributions.make_distribution_for_install_requirement", "19.1.2", "19.9.9" ) make_option_group = ShimmedPathCollection("make_option_group", ImportTypes.FUNCTION) @@ -953,38 +969,38 @@ def import_pip(): PackageFinder.create_path("index.package_finder.PackageFinder", "20.0", "9999") CandidateEvaluator = ShimmedPathCollection("CandidateEvaluator", ImportTypes.CLASS) -CandidateEvaluator.set_default(backports.CandidateEvaluator) +CandidateEvaluator.set_default(compat.CandidateEvaluator) CandidateEvaluator.create_path("index.CandidateEvaluator", "19.1.0", "19.3.9") CandidateEvaluator.create_path("index.package_finder.CandidateEvaluator", "20.0", "9999") CandidatePreferences = ShimmedPathCollection("CandidatePreferences", ImportTypes.CLASS) -CandidatePreferences.set_default(backports.CandidatePreferences) +CandidatePreferences.set_default(compat.CandidatePreferences) CandidatePreferences.create_path("index.CandidatePreferences", "19.2.0", "19.9") CandidatePreferences.create_path( "index.package_finder.CandidatePreferences", "20.0", "9999" ) LinkCollector = ShimmedPathCollection("LinkCollector", ImportTypes.CLASS) -LinkCollector.set_default(backports.LinkCollector) +LinkCollector.set_default(compat.LinkCollector) LinkCollector.create_path("collector.LinkCollector", "19.3.0", "19.9") LinkCollector.create_path("index.collector.LinkCollector", "20.0", "9999") LinkEvaluator = ShimmedPathCollection("LinkEvaluator", ImportTypes.CLASS) -LinkEvaluator.set_default(backports.LinkEvaluator) +LinkEvaluator.set_default(compat.LinkEvaluator) LinkEvaluator.create_path("index.LinkEvaluator", "19.2.0", "19.9") LinkEvaluator.create_path("index.package_finder.LinkEvaluator", "20.0", "9999") TargetPython = ShimmedPathCollection("TargetPython", ImportTypes.CLASS) -backports.TargetPython.fallback_get_tags = get_tags -TargetPython.set_default(backports.TargetPython) +compat.TargetPython.fallback_get_tags = get_tags +TargetPython.set_default(compat.TargetPython) TargetPython.create_path("models.target_python.TargetPython", "19.2.0", "9999") SearchScope = ShimmedPathCollection("SearchScope", ImportTypes.CLASS) -SearchScope.set_default(backports.SearchScope) +SearchScope.set_default(compat.SearchScope) SearchScope.create_path("models.search_scope.SearchScope", "19.2.0", "9999") SelectionPreferences = ShimmedPathCollection("SelectionPreferences", ImportTypes.CLASS) -SelectionPreferences.set_default(backports.SelectionPreferences) +SelectionPreferences.set_default(compat.SelectionPreferences) SelectionPreferences.create_path( "models.selection_prefs.SelectionPreferences", "19.2.0", "9999" ) @@ -1013,11 +1029,18 @@ def import_pip(): TempDirectory = ShimmedPathCollection("TempDirectory", ImportTypes.CLASS) TempDirectory.create_path("utils.temp_dir.TempDirectory", "7.0.0", "9999") +global_tempdir_manager = ShimmedPathCollection( + "global_tempdir_manager", ImportTypes.CONTEXTMANAGER +) +global_tempdir_manager.create_path( + "utils.temp_dir.global_tempdir_manager", "7.0.0", "9999" +) + get_requirement_tracker = ShimmedPathCollection( "get_requirement_tracker", ImportTypes.CONTEXTMANAGER ) get_requirement_tracker.set_default( - functools.partial(backports.get_requirement_tracker, RequirementTracker.shim()) + functools.partial(compat.get_requirement_tracker, RequirementTracker.shim()) ) get_requirement_tracker.create_path( "req.req_tracker.get_requirement_tracker", "7.0.0", "9999" @@ -1025,7 +1048,8 @@ def import_pip(): Resolver = ShimmedPathCollection("Resolver", ImportTypes.CLASS) Resolver.create_path("resolve.Resolver", "7.0.0", "19.1.1") -Resolver.create_path("legacy_resolve.Resolver", "19.1.2", "9999") +Resolver.create_path("legacy_resolve.Resolver", "19.1.2", "20.0.89999") +Resolver.create_path("resolution.legacy.resolver.Resolver", "20.0.99999", "99999") SafeFileCache = ShimmedPathCollection("SafeFileCache", ImportTypes.CLASS) SafeFileCache.create_path("network.cache.SafeFileCache", "19.3.0", "9999") @@ -1046,7 +1070,8 @@ def import_pip(): VcsSupport.create_path("vcs.versioncontrol.VcsSupport", "19.2", "9999") Wheel = ShimmedPathCollection("Wheel", ImportTypes.CLASS) -Wheel.create_path("wheel.Wheel", "7.0.0", "9999") +Wheel.create_path("wheel.Wheel", "7.0.0", "19.3.9") +Wheel.set_default(compat.Wheel) WheelCache = ShimmedPathCollection("WheelCache", ImportTypes.CLASS) WheelCache.create_path("cache.WheelCache", "10.0.0", "9999") @@ -1054,7 +1079,15 @@ def import_pip(): WheelBuilder = ShimmedPathCollection("WheelBuilder", ImportTypes.CLASS) WheelBuilder.create_path("wheel.WheelBuilder", "7.0.0", "19.9") -WheelBuilder.create_path("wheel_builder.WheelBuilder", "20.0", "9999") + +build = ShimmedPathCollection("build", ImportTypes.FUNCTION) +build.create_path("wheel_builder.build", "19.9", "9999") + +build_one = ShimmedPathCollection("build_one", ImportTypes.FUNCTION) +build_one.create_path("wheel_builder._build_one", "19.9", "9999") + +build_one_inside_env = ShimmedPathCollection("build_one_inside_env", ImportTypes.FUNCTION) +build_one_inside_env.create_path("wheel_builder._build_one_inside_env", "19.9", "9999") AbstractDistribution = ShimmedPathCollection("AbstractDistribution", ImportTypes.CLASS) AbstractDistribution.create_path( @@ -1075,11 +1108,14 @@ def import_pip(): SourceDistribution.create_path( "distributions.source.legacy.SourceDistribution", "19.3.0", "19.9" ) -SourceDistribution.create_path("distributions.source.SourceDistribution", "20.0", "9999") +SourceDistribution.create_path("distributions.sdist.SourceDistribution", "20.0", "9999") WheelDistribution = ShimmedPathCollection("WheelDistribution", ImportTypes.CLASS) WheelDistribution.create_path("distributions.wheel.WheelDistribution", "19.1.2", "9999") +Downloader = ShimmedPathCollection("Downloader", ImportTypes.CLASS) +Downloader.create_path("network.download.Downloader", "20.0.0", "9999") + PyPI = ShimmedPathCollection("PyPI", ImportTypes.ATTRIBUTE) PyPI.create_path("models.index.PyPI", "7.0.0", "9999") @@ -1095,7 +1131,7 @@ def import_pip(): get_package_finder = ShimmedPathCollection("get_package_finder", ImportTypes.FUNCTION) get_package_finder.set_default( functools.partial( - backports.get_package_finder, + compat.get_package_finder, install_cmd_provider=InstallCommand, target_python_builder=TargetPython.shim(), ) @@ -1105,10 +1141,12 @@ def import_pip(): make_preparer = ShimmedPathCollection("make_preparer", ImportTypes.FUNCTION) make_preparer.set_default( functools.partial( - backports.make_preparer, + compat.make_preparer, install_cmd_provider=InstallCommand, preparer_fn=RequirementPreparer, + downloader_provider=Downloader, req_tracker_fn=get_requirement_tracker, + finder_provider=get_package_finder, ) ) @@ -1116,7 +1154,7 @@ def import_pip(): get_resolver = ShimmedPathCollection("get_resolver", ImportTypes.FUNCTION) get_resolver.set_default( functools.partial( - backports.get_resolver, + compat.get_resolver, install_cmd_provider=InstallCommand, resolver_fn=Resolver, install_req_provider=install_req_from_req_string, @@ -1129,7 +1167,7 @@ def import_pip(): get_requirement_set = ShimmedPathCollection("get_requirement_set", ImportTypes.FUNCTION) get_requirement_set.set_default( functools.partial( - backports.get_requirement_set, + compat.get_requirement_set, install_cmd_provider=InstallCommand, req_set_provider=RequirementSet, ) @@ -1139,7 +1177,7 @@ def import_pip(): resolve = ShimmedPathCollection("resolve", ImportTypes.FUNCTION) resolve.set_default( functools.partial( - backports.resolve, + compat.resolve, install_cmd_provider=InstallCommand, reqset_provider=get_requirement_set, finder_provider=get_package_finder, @@ -1148,5 +1186,22 @@ def import_pip(): format_control_provider=FormatControl, make_preparer_provider=make_preparer, req_tracker_provider=get_requirement_tracker, + tempdir_manager_provider=global_tempdir_manager, + ) +) + + +build_wheel = ShimmedPathCollection("build_wheel", ImportTypes.FUNCTION) +build_wheel.set_default( + functools.partial( + compat.build_wheel, + install_command_provider=InstallCommand, + wheel_cache_provider=WheelCache, + wheel_builder_provider=WheelBuilder, + build_one_provider=build_one, + build_one_inside_env_provider=build_one_inside_env, + build_many_provider=build, + preparer_provider=make_preparer, + format_control_provider=FormatControl, ) ) diff --git a/pipenv/vendor/pip_shims/shims.py b/pipenv/vendor/pip_shims/shims.py index d5a11f0137..fb02937835 100644 --- a/pipenv/vendor/pip_shims/shims.py +++ b/pipenv/vendor/pip_shims/shims.py @@ -1,4 +1,7 @@ # -*- coding=utf-8 -*- +""" +Main module with magic self-replacement mechanisms to handle import speedups. +""" from __future__ import absolute_import import sys diff --git a/pipenv/vendor/pip_shims/utils.py b/pipenv/vendor/pip_shims/utils.py index 931ff137c6..76661eccb9 100644 --- a/pipenv/vendor/pip_shims/utils.py +++ b/pipenv/vendor/pip_shims/utils.py @@ -1,4 +1,7 @@ # -*- coding=utf-8 -*- +""" +Shared utility functions which are not specific to any particular module. +""" from __future__ import absolute_import import contextlib diff --git a/pipenv/vendor/pythonfinder/__init__.py b/pipenv/vendor/pythonfinder/__init__.py index 428599bdcd..6ba8a67de9 100644 --- a/pipenv/vendor/pythonfinder/__init__.py +++ b/pipenv/vendor/pythonfinder/__init__.py @@ -10,7 +10,7 @@ from .models import SystemPath, WindowsFinder from .pythonfinder import Finder -__version__ = "1.2.2.dev0" +__version__ = "1.2.2" logger = logging.getLogger(__name__) diff --git a/pipenv/vendor/pythonfinder/cli.py b/pipenv/vendor/pythonfinder/cli.py index eb2e603a50..fc86abd0b2 100644 --- a/pipenv/vendor/pythonfinder/cli.py +++ b/pipenv/vendor/pythonfinder/cli.py @@ -1,10 +1,7 @@ # -*- coding=utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals -import sys - import click -import crayons from . import __version__ from .pythonfinder import Finder @@ -32,10 +29,11 @@ def cli( if version: click.echo( "{0} version {1}".format( - crayons.white("PythonFinder", bold=True), crayons.yellow(__version__) + click.style("PythonFinder", fg="white", bold=True), + click.style(str(__version__), fg="yellow") ) ) - sys.exit(0) + ctx.exit() finder = Finder(ignore_unsupported=ignore_unsupported) if findall: versions = [v for v in finder.find_all_python_versions()] @@ -54,7 +52,7 @@ def cli( ), fg="yellow", ) - sys.exit(0) + ctx.exit() else: click.secho( "ERROR: No valid python versions found! Check your path and try again.", @@ -78,22 +76,22 @@ def cli( ), fg="yellow", ) - sys.exit(0) + ctx.exit() else: click.secho("Failed to find matching executable...", fg="yellow") - sys.exit(1) + ctx.exit(1) elif which: found = finder.system_path.which(which.strip()) if found: click.secho("Found Executable: {0}".format(found), fg="white") - sys.exit(0) + ctx.exit() else: click.secho("Failed to find matching executable...", fg="yellow") - sys.exit(1) + ctx.exit(1) else: click.echo("Please provide a command", color="red") - sys.exit(1) - sys.exit() + ctx.exit(1) + ctx.exit() if __name__ == "__main__": diff --git a/pipenv/vendor/pythonfinder/compat.py b/pipenv/vendor/pythonfinder/compat.py new file mode 100644 index 0000000000..d76c4efc70 --- /dev/null +++ b/pipenv/vendor/pythonfinder/compat.py @@ -0,0 +1,42 @@ +# -*- coding=utf-8 -*- +import sys + +import six + +if sys.version_info[:2] <= (3, 4): + from pipenv.vendor.pathlib2 import Path # type: ignore # noqa +else: + from pathlib import Path + +if six.PY3: + from functools import lru_cache + from builtins import TimeoutError +else: + from backports.functools_lru_cache import lru_cache # type: ignore # noqa + + class TimeoutError(OSError): + pass + + +def getpreferredencoding(): + import locale + # Borrowed from Invoke + # (see https://github.com/pyinvoke/invoke/blob/93af29d/invoke/runners.py#L881) + _encoding = locale.getpreferredencoding(False) + if six.PY2 and not sys.platform == "win32": + _default_encoding = locale.getdefaultlocale()[1] + if _default_encoding is not None: + _encoding = _default_encoding + return _encoding + + +DEFAULT_ENCODING = getpreferredencoding() + + +def fs_str(string): + """Encodes a string into the proper filesystem encoding""" + + if isinstance(string, str): + return string + assert not isinstance(string, bytes) + return string.encode(DEFAULT_ENCODING) diff --git a/pipenv/vendor/pythonfinder/models/mixins.py b/pipenv/vendor/pythonfinder/models/mixins.py index 4c473b1817..763271153b 100644 --- a/pipenv/vendor/pythonfinder/models/mixins.py +++ b/pipenv/vendor/pythonfinder/models/mixins.py @@ -7,8 +7,8 @@ import attr import six -from vistir.compat import fs_str +from ..compat import fs_str from ..environment import MYPY_RUNNING from ..exceptions import InvalidPythonVersion from ..utils import ( @@ -35,7 +35,7 @@ TypeVar, Type, ) - from vistir.compat import Path + from ..compat import Path # noqa BaseFinderType = TypeVar("BaseFinderType") diff --git a/pipenv/vendor/pythonfinder/models/path.py b/pipenv/vendor/pythonfinder/models/path.py index f46677e98c..b855a05da0 100644 --- a/pipenv/vendor/pythonfinder/models/path.py +++ b/pipenv/vendor/pythonfinder/models/path.py @@ -10,8 +10,7 @@ import attr import six from cached_property import cached_property -from vistir.compat import Path, fs_str -from vistir.misc import dedup +from ..compat import Path, fs_str from ..environment import ( ASDF_DATA_DIR, @@ -26,6 +25,7 @@ from ..utils import ( Iterable, Sequence, + dedup, ensure_path, expand_paths, filter_pythons, diff --git a/pipenv/vendor/pythonfinder/models/python.py b/pipenv/vendor/pythonfinder/models/python.py index 3c859980c7..619e77612d 100644 --- a/pipenv/vendor/pythonfinder/models/python.py +++ b/pipenv/vendor/pythonfinder/models/python.py @@ -10,8 +10,8 @@ import attr import six from packaging.version import Version -from vistir.compat import Path, lru_cache +from ..compat import Path, lru_cache from ..environment import ASDF_DATA_DIR, MYPY_RUNNING, PYENV_ROOT, SYSTEM_ARCH from ..exceptions import InvalidPythonVersion from ..utils import ( diff --git a/pipenv/vendor/pythonfinder/pythonfinder.py b/pipenv/vendor/pythonfinder/pythonfinder.py index 400a317005..96737355e8 100644 --- a/pipenv/vendor/pythonfinder/pythonfinder.py +++ b/pipenv/vendor/pythonfinder/pythonfinder.py @@ -7,9 +7,9 @@ import six from click import secho -from vistir.compat import lru_cache from . import environment +from .compat import lru_cache from .exceptions import InvalidPythonVersion from .utils import Iterable, filter_pythons, version_re @@ -51,7 +51,8 @@ def __init__( :param system: bool, optional :param global_search: Whether to search the global path from os.environ, defaults to True :param global_search: bool, optional - :param ignore_unsupported: Whether to ignore unsupported python versions, if False, an error is raised, defaults to True + :param ignore_unsupported: Whether to ignore unsupported python versions, if False, an + error is raised, defaults to True :param ignore_unsupported: bool, optional :param bool sort_by_path: Whether to always sort by path :returns: a :class:`~pythonfinder.pythonfinder.Finder` object. @@ -133,8 +134,16 @@ def which(self, exe): return self.system_path.which(exe) @classmethod - def parse_major(cls, major, minor=None, patch=None, pre=None, dev=None, arch=None): - # type: (Optional[str], Optional[int], Optional[int], Optional[bool], Optional[bool], Optional[str]) -> Dict[str, Union[int, str, bool, None]] + def parse_major( + cls, + major, # type: Optional[str] + minor=None, # type: Optional[int] + patch=None, # type: Optional[int] + pre=None, # type: Optional[bool] + dev=None, # type: Optional[bool] + arch=None, # type: Optional[str] + ): + # type: (...) -> Dict[str, Union[int, str, bool, None]] from .models import PythonVersion major_is_str = major and isinstance(major, six.string_types) @@ -289,11 +298,18 @@ def find_python_version( @lru_cache(maxsize=1024) def find_all_python_versions( - self, major=None, minor=None, patch=None, pre=None, dev=None, arch=None, name=None + self, + major=None, # type: Optional[Union[str, int]] + minor=None, # type: Optional[int] + patch=None, # type: Optional[int] + pre=None, # type: Optional[bool] + dev=None, # type: Optional[bool] + arch=None, # type: Optional[str] + name=None, # type: Optional[str] ): - # type: (Optional[Union[str, int]], Optional[int], Optional[int], Optional[bool], Optional[bool], Optional[str], Optional[str]) -> List[PathEntry] + # type: (...) -> List[PathEntry] version_sort = operator.attrgetter("as_python.version_sort") - python_version_dict = getattr(self.system_path, "python_version_dict") + python_version_dict = getattr(self.system_path, "python_version_dict", {}) if python_version_dict: paths = ( path diff --git a/pipenv/vendor/pythonfinder/utils.py b/pipenv/vendor/pythonfinder/utils.py index 1c190b0148..8150545cb9 100644 --- a/pipenv/vendor/pythonfinder/utils.py +++ b/pipenv/vendor/pythonfinder/utils.py @@ -5,14 +5,16 @@ import itertools import os import re +import subprocess +from collections import OrderedDict from fnmatch import fnmatch from threading import Timer import attr import six -import vistir from packaging.version import LegacyVersion, Version +from .compat import Path, lru_cache, TimeoutError # noqa from .environment import MYPY_RUNNING, PYENV_ROOT, SUBPROCESS_TIMEOUT from .exceptions import InvalidPythonVersion @@ -27,11 +29,6 @@ from six.moves import Sequence # type: ignore # noqa # isort:skip # fmt: on -try: - from functools import lru_cache -except ImportError: - from backports.functools_lru_cache import lru_cache # type: ignore # noqa - if MYPY_RUNNING: from typing import Any, Union, List, Callable, Set, Tuple, Dict, Optional, Iterator from attr.validators import _OptionalValidator # type: ignore @@ -98,21 +95,26 @@ def get_python_version(path): "-c", "import sys; print('.'.join([str(i) for i in sys.version_info[:3]]))", ] + subprocess_kwargs = { + "env": os.environ.copy(), + "universal_newlines": True, + "stdout": subprocess.PIPE, + "stderr": subprocess.PIPE, + "shell": False, + } + c = subprocess.Popen(version_cmd, **subprocess_kwargs) + timer = Timer(SUBPROCESS_TIMEOUT, c.kill) try: - c = vistir.misc.run( - version_cmd, - block=True, - nospin=True, - return_object=True, - combine_stderr=False, - write_to_stdout=False, - ) - timer = Timer(SUBPROCESS_TIMEOUT, c.kill) + out, _ = c.communicate() + except (SystemExit, KeyboardInterrupt, TimeoutError): + c.terminate() + out, _ = c.communicate() + raise except OSError: raise InvalidPythonVersion("%s is not a valid python path" % path) - if not c.out: + if not out: raise InvalidPythonVersion("%s is not a valid python path" % path) - return c.out.strip() + return out.strip() @lru_cache(maxsize=1024) @@ -190,13 +192,13 @@ def path_is_executable(path): @lru_cache(maxsize=1024) def path_is_known_executable(path): - # type: (vistir.compat.Path) -> bool + # type: (Path) -> bool """ Returns whether a given path is a known executable from known executable extensions or has the executable bit toggled. :param path: The path to the target executable. - :type path: :class:`~vistir.compat.Path` + :type path: :class:`~Path` :return: True if the path has chmod +x, or is a readable, known executable extension. :rtype: bool """ @@ -229,12 +231,12 @@ def looks_like_python(name): @lru_cache(maxsize=1024) def path_is_python(path): - # type: (vistir.compat.Path) -> bool + # type: (Path) -> bool """ Determine whether the supplied path is executable and looks like a possible path to python. :param path: The path to an executable. - :type path: :class:`~vistir.compat.Path` + :type path: :class:`~Path` :return: Whether the provided path is an executable path to python. :rtype: bool """ @@ -278,7 +280,7 @@ def path_is_pythoncore(path): @lru_cache(maxsize=1024) def ensure_path(path): - # type: (Union[vistir.compat.Path, str]) -> vistir.compat.Path + # type: (Union[Path, str]) -> Path """ Given a path (either a string or a Path object), expand variables and return a Path object. @@ -288,9 +290,9 @@ def ensure_path(path): :rtype: :class:`~pathlib.Path` """ - if isinstance(path, vistir.compat.Path): + if isinstance(path, Path): return path - path = vistir.compat.Path(os.path.expandvars(path)) + path = Path(os.path.expandvars(path)) return path.absolute() @@ -313,10 +315,10 @@ def normalize_path(path): @lru_cache(maxsize=1024) def filter_pythons(path): - # type: (Union[str, vistir.compat.Path]) -> Iterable + # type: (Union[str, Path]) -> Iterable """Return all valid pythons in a given path""" - if not isinstance(path, vistir.compat.Path): - path = vistir.compat.Path(str(path)) + if not isinstance(path, Path): + path = Path(str(path)) if not path.is_dir(): return path if path_is_python(path) else None return filter(path_is_python, path.iterdir()) @@ -377,7 +379,7 @@ def split_version_and_name( patch=None, # type: Optional[Union[str, int]] name=None, # type: Optional[str] ): - # type: (...) -> Tuple[Optional[Union[str, int]], Optional[Union[str, int]], Optional[Union[str, int]], Optional[str]] + # type: (...) -> Tuple[Optional[Union[str, int]], Optional[Union[str, int]], Optional[Union[str, int]], Optional[str]] # noqa if isinstance(major, six.string_types) and not minor and not patch: # Only proceed if this is in the format "x.y.z" or similar if major.isdigit() or (major.count(".") > 0 and major[0].isdigit()): @@ -437,3 +439,11 @@ def expand_paths(path, only_python=True): else: if path is not None and path.is_python and path.as_python is not None: yield path + + +def dedup(iterable): + # type: (Iterable) -> Iterable + """Deduplicate an iterable object like iter(set(iterable)) but + order-reserved. + """ + return iter(OrderedDict.fromkeys(iterable)) diff --git a/pipenv/vendor/requirementslib/__init__.py b/pipenv/vendor/requirementslib/__init__.py index 8182d2ead1..488da8493e 100644 --- a/pipenv/vendor/requirementslib/__init__.py +++ b/pipenv/vendor/requirementslib/__init__.py @@ -10,7 +10,7 @@ from .models.pipfile import Pipfile from .models.requirements import Requirement -__version__ = "1.5.4.dev0" +__version__ = "1.5.4" logger = logging.getLogger(__name__) diff --git a/pipenv/vendor/requirementslib/models/dependencies.py b/pipenv/vendor/requirementslib/models/dependencies.py index b8af28e905..78c78ace2b 100644 --- a/pipenv/vendor/requirementslib/models/dependencies.py +++ b/pipenv/vendor/requirementslib/models/dependencies.py @@ -32,6 +32,11 @@ version_from_ireq, ) +try: + from contextlib import ExitStack +except ImportError: + from contextlib2 import ExitStack + if MYPY_RUNNING: from typing import ( Any, @@ -575,18 +580,22 @@ def start_resolver(finder=None, session=None, wheel_cache=None): _build_dir = create_tracked_tempdir(fs_str("build")) _source_dir = create_tracked_tempdir(fs_str("source")) try: - with pip_shims.shims.make_preparer( - options=pip_options, - finder=finder, - session=session, - build_dir=_build_dir, - src_dir=_source_dir, - download_dir=download_dir, - wheel_download_dir=WHEEL_DOWNLOAD_DIR, - progress_bar="off", - build_isolation=False, - install_cmd=pip_command, - ) as preparer: + with ExitStack() as ctx: + ctx.enter_context(pip_shims.shims.global_tempdir_manager()) + preparer = ctx.enter_context( + pip_shims.shims.make_preparer( + options=pip_options, + finder=finder, + session=session, + build_dir=_build_dir, + src_dir=_source_dir, + download_dir=download_dir, + wheel_download_dir=WHEEL_DOWNLOAD_DIR, + progress_bar="off", + build_isolation=False, + install_cmd=pip_command, + ) + ) resolver = pip_shims.shims.get_resolver( finder=finder, ignore_dependencies=False, diff --git a/pipenv/vendor/requirementslib/models/markers.py b/pipenv/vendor/requirementslib/models/markers.py index 6e46b518ec..8dda049518 100644 --- a/pipenv/vendor/requirementslib/models/markers.py +++ b/pipenv/vendor/requirementslib/models/markers.py @@ -196,7 +196,6 @@ def _get_specs(specset): return sorted(result, key=operator.itemgetter(1)) -# TODO: Rename this to something meaningful def _group_by_op(specs): # type: (Union[Set[Specifier], SpecifierSet]) -> Iterator specs = [_get_specs(x) for x in list(specs)] @@ -206,7 +205,6 @@ def _group_by_op(specs): return grouping -# TODO: rename this to something meaningful def normalize_specifier_set(specs): # type: (Union[str, SpecifierSet]) -> Optional[Set[Specifier]] """Given a specifier set, a string, or an iterable, normalize the specifiers @@ -237,8 +235,6 @@ def normalize_specifier_set(specs): return normalize_specifier_set(SpecifierSet(",".join(spec_list))) -# TODO: Check if this is used by anything public otherwise make it private -# And rename it to something meaningful def get_sorted_version_string(version_set): # type: (Set[AnyStr]) -> AnyStr version_list = sorted( @@ -248,9 +244,6 @@ def get_sorted_version_string(version_set): return version -# TODO: Rename this to something meaningful -# TODO: Add a deprecation decorator and deprecate this -- i'm sure it's used -# in other libraries @lru_cache(maxsize=1024) def cleanup_pyspecs(specs, joiner="or"): specs = normalize_specifier_set(specs) @@ -295,7 +288,6 @@ def cleanup_pyspecs(specs, joiner="or"): return sorted([(k, v) for k, v in results.items()], key=operator.itemgetter(1)) -# TODO: Rename this to something meaningful @lru_cache(maxsize=1024) def fix_version_tuple(version_tuple): # type: (Tuple[AnyStr, AnyStr]) -> Tuple[AnyStr, AnyStr] @@ -310,7 +302,6 @@ def fix_version_tuple(version_tuple): return (op, version) -# TODO: Rename this to something meaningful, deprecate it (See prior function) @lru_cache(maxsize=128) def get_versions(specset, group_by_operator=True): # type: (Union[Set[Specifier], SpecifierSet], bool) -> List[Tuple[STRING_TYPE, STRING_TYPE]] @@ -599,7 +590,6 @@ def get_specset(marker_list): return specifiers -# TODO: Refactor this (reduce complexity) def parse_marker_dict(marker_dict): op = marker_dict["op"] lhs = marker_dict["lhs"] @@ -709,16 +699,3 @@ def marker_from_specifier(spec): marker_segments.append(format_pyversion(marker_segment)) marker_str = " and ".join(marker_segments).replace('"', "'") return Marker(marker_str) - - -def merge_markers(m1, m2): - # type: (Marker, Marker) -> Optional[Marker] - if not all((m1, m2)): - return next(iter(v for v in (m1, m2) if v), None) - m1 = _ensure_marker(m1) - m2 = _ensure_marker(m2) - _markers = [] # type: List[Marker] - for marker in (m1, m2): - _markers.append(str(marker)) - marker_str = " and ".join([normalize_marker_str(m) for m in _markers if m]) - return _ensure_marker(normalize_marker_str(marker_str)) diff --git a/pipenv/vendor/requirementslib/models/setup_info.py b/pipenv/vendor/requirementslib/models/setup_info.py index 91f28615af..38fffd4f55 100644 --- a/pipenv/vendor/requirementslib/models/setup_info.py +++ b/pipenv/vendor/requirementslib/models/setup_info.py @@ -19,7 +19,6 @@ import packaging.version import pep517.envbuild import pep517.wrappers -import pkg_resources.extern.packaging.requirements as pkg_resources_requirements import six from appdirs import user_cache_dir from distlib.wheel import Wheel @@ -43,6 +42,11 @@ strip_extras_markers_from_requirement, ) +try: + import pkg_resources.extern.packaging.requirements as pkg_resources_requirements +except ImportError: + pkg_resources_requirements = None + try: from setuptools.dist import distutils, Distribution except ImportError: @@ -76,6 +80,7 @@ AnyStr, Sequence, ) + import requests from pip_shims.shims import InstallRequirement, PackageFinder from pkg_resources import ( PathMetadata, @@ -200,7 +205,9 @@ def make_base_requirements(reqs): for req in reqs: if isinstance(req, BaseRequirement): requirements.add(req) - elif isinstance(req, pkg_resources_requirements.Requirement): + elif pkg_resources_requirements is not None and isinstance( + req, pkg_resources_requirements.Requirement + ): requirements.add(BaseRequirement.from_req(req)) elif req and isinstance(req, six.string_types) and not req.startswith("#"): requirements.add(BaseRequirement.from_string(req)) @@ -287,8 +294,11 @@ def get_extras_from_setupcfg(parser): return extras -def parse_setup_cfg(setup_cfg_contents, base_dir): - # type: (S, S) -> Dict[S, Union[S, None, Set[BaseRequirement], List[S], Dict[STRING_TYPE, Tuple[BaseRequirement]]]] +def parse_setup_cfg( + setup_cfg_contents, # type: S + base_dir, # type: S +): + # type: (...) -> Dict[S, Union[S, None, Set[BaseRequirement], List[S], Dict[STRING_TYPE, Tuple[BaseRequirement]]]] default_opts = { "metadata": {"name": "", "version": ""}, "options": { @@ -639,21 +649,42 @@ def get_metadata_from_dist(dist): } -class Analyzer(ast.NodeVisitor): - OP_MAP = { - ast.Add: operator.add, - ast.Sub: operator.sub, - ast.Mult: operator.mul, - ast.Div: operator.floordiv, - ast.Mod: operator.mod, - ast.Pow: operator.pow, - ast.LShift: operator.lshift, - ast.RShift: operator.rshift, - ast.BitAnd: operator.and_, - ast.BitOr: operator.or_, - ast.BitXor: operator.xor - } +AST_BINOP_MAP = dict( + ( + (ast.Add, operator.add), + (ast.Sub, operator.sub), + (ast.Mult, operator.mul), + (ast.Div, operator.floordiv), + (ast.Mod, operator.mod), + (ast.Pow, operator.pow), + (ast.LShift, operator.lshift), + (ast.RShift, operator.rshift), + (ast.BitAnd, operator.and_), + (ast.BitOr, operator.or_), + (ast.BitXor, operator.xor), + ) +) + +AST_COMPARATORS = dict( + ( + (ast.Lt, operator.lt), + (ast.LtE, operator.le), + (ast.Eq, operator.eq), + (ast.Gt, operator.gt), + (ast.GtE, operator.ge), + (ast.NotEq, operator.ne), + (ast.Is, operator.is_), + (ast.IsNot, operator.is_not), + (ast.And, operator.and_), + (ast.Or, operator.or_), + (ast.Not, operator.not_), + (ast.In, operator.contains), + ) +) + + +class Analyzer(ast.NodeVisitor): def __init__(self): self.name_types = [] self.function_map = {} # type: Dict[Any, Any] @@ -677,12 +708,7 @@ def generic_visit(self, node): super(Analyzer, self).generic_visit(node) def visit_BinOp(self, node): - left = ast_unparse(node.left, initial_mapping=True) - right = ast_unparse(node.right, initial_mapping=True) - op = ast_unparse(node.op, initial_mapping=True) - node.left = left - node.right = right - node.op = op + node = ast_unparse(node, initial_mapping=True) self.binOps.append(node) def unmap_binops(self): @@ -705,6 +731,11 @@ def ast_unparse(item, initial_mapping=False, analyzer=None, recurse=True): # no unparse = partial( ast_unparse, initial_mapping=initial_mapping, analyzer=analyzer, recurse=recurse ) + if getattr(ast, "Constant", None): + constant = (ast.Constant, ast.Ellipsis) + else: + constant = ast.Ellipsis + unparsed = item if isinstance(item, ast.Dict): unparsed = dict(zip(unparse(item.keys), unparse(item.values))) elif isinstance(item, ast.List): @@ -715,37 +746,27 @@ def ast_unparse(item, initial_mapping=False, analyzer=None, recurse=True): # no unparsed = item.s elif isinstance(item, ast.Subscript): unparsed = unparse(item.value) + elif any(isinstance(item, k) for k in AST_BINOP_MAP.keys()): + unparsed = AST_BINOP_MAP[type(item)] + elif isinstance(item, ast.Num): + unparsed = item.n elif isinstance(item, ast.BinOp): if analyzer and item in analyzer.binOps_map: unparsed = analyzer.binOps_map[item] else: right_item = unparse(item.right) left_item = unparse(item.left) - if type(item.op) in Analyzer.OP_MAP: - item.op = Analyzer.OP_MAP[type(item.op)] + op = getattr(item, "op", None) + op_func = unparse(op) if op is not None else op if not initial_mapping: - if not all( - isinstance(side, (six.string_types, int, float, list, tuple)) - for side in (left_item, right_item) - ): - if type(item.op) in Analyzer.OP_MAP: - item = Analyzer.OP_MAP[type(item.op)](left_item, right_item) - else: - item.left = left_item - item.right = right_item - item.op = unparse(item.op) - try: - unparsed = item.op(left_item, right_item) - except Exception: - unparsed = item - else: - if type(item.op) in Analyzer.OP_MAP: - item.op = Analyzer.OP_MAP[type(item.op)] - try: - unparsed = item.op(left_item, right_item) - except Exception: - unparsed = item + try: + unparsed = op_func(left_item, right_item) + except Exception: + unparsed = (left_item, op_func, right_item) else: + item.left = left_item + item.right = right_item + item.op = op_func unparsed = item elif isinstance(item, ast.Name): if not initial_mapping: @@ -763,6 +784,48 @@ def ast_unparse(item, initial_mapping=False, analyzer=None, recurse=True): # no unparsed = item elif six.PY3 and isinstance(item, ast.NameConstant): unparsed = item.value + elif any(isinstance(item, k) for k in AST_COMPARATORS.keys()): + unparsed = AST_COMPARATORS[type(item)] + elif isinstance(item, constant): + unparsed = item.value + elif isinstance(item, ast.Compare): + if isinstance(item.left, ast.Attribute): + import importlib + + left = unparse(item.left) + if "." in left: + name, _, val = left.rpartition(".") + left = getattr(importlib.import_module(name), val, left) + comparators = [] + for comparator in item.comparators: + right = unparse(comparator) + if isinstance(comparator, ast.Attribute) and "." in right: + name, _, val = right.rpartition(".") + right = getattr(importlib.import_module(name), val, right) + comparators.append(right) + unparsed = (left, unparse(item.ops), comparators) + elif isinstance(item, ast.IfExp): + if initial_mapping: + unparsed = item + else: + ops, truth_vals = [], [] + if isinstance(item.test, ast.Compare): + left, ops, right = unparse(item.test) + else: + result = ast_unparse(item.test) + if isinstance(result, dict): + k, v = result.popitem() + if not v: + truth_vals = [False] + for i, op in enumerate(ops): + if i == 0: + truth_vals.append(op(left, right[i])) + else: + truth_vals.append(op(right[i - 1], right[i])) + if all(truth_vals): + unparsed = unparse(item.body) + else: + unparsed = unparse(item.orelse) elif isinstance(item, ast.Attribute): attr_name = getattr(item, "value", None) attr_attr = getattr(item, "attr", None) @@ -791,9 +854,14 @@ def ast_unparse(item, initial_mapping=False, analyzer=None, recurse=True): # no func_name = unparse(item.func) except Exception: func_name = None - if func_name: + if isinstance(func_name, dict): + unparsed.update(func_name) + func_name = next(iter(func_name.keys())) + for keyword in getattr(item, "keywords", []): + unparsed[func_name].update(unparse(keyword)) + elif func_name: unparsed[func_name] = {} - for keyword in item.keywords: + for keyword in getattr(item, "keywords", []): unparsed[func_name].update(unparse(keyword)) elif isinstance(item, ast.keyword): unparsed = {unparse(item.arg): unparse(item.value)} @@ -822,8 +890,6 @@ def ast_unparse(item, initial_mapping=False, analyzer=None, recurse=True): # no unparsed = type(item)([unparse(el) for el in item]) elif isinstance(item, six.string_types): unparsed = item - else: - return item return unparsed @@ -1532,17 +1598,17 @@ def from_ireq(cls, ireq, subdir=None, finder=None, session=None): build_location_func(kwargs["build_dir"]) ireq.ensure_has_source_dir(kwargs["src_dir"]) src_dir = ireq.source_dir - - ireq.populate_link(finder, False, False) - pip_shims.shims.shim_unpack( - link=ireq.link, - location=kwargs["src_dir"], - download_dir=download_dir, - only_download=only_download, - session=session, - hashes=ireq.hashes(False), - progress_bar="off", - ) + with pip_shims.shims.global_tempdir_manager(): + ireq.populate_link(finder, False, False) + pip_shims.shims.shim_unpack( + link=ireq.link, + location=kwargs["src_dir"], + download_dir=download_dir, + only_download=only_download, + session=session, + hashes=ireq.hashes(False), + progress_bar="off", + ) created = cls.create(src_dir, subdirectory=subdir, ireq=ireq, kwargs=kwargs) return created diff --git a/pipenv/vendor/requirementslib/models/utils.py b/pipenv/vendor/requirementslib/models/utils.py index 6c3b7de8a5..9b6beb64ae 100644 --- a/pipenv/vendor/requirementslib/models/utils.py +++ b/pipenv/vendor/requirementslib/models/utils.py @@ -723,7 +723,7 @@ def get_pinned_version(ireq): except AttributeError: raise TypeError("Expected InstallRequirement, not {}".format(type(ireq).__name__)) - if getattr(ireq, "editable", False): + if ireq.editable: raise ValueError("InstallRequirement is editable") if not specifier: raise ValueError("InstallRequirement has no version specification") diff --git a/pipenv/vendor/requirementslib/models/vcs.py b/pipenv/vendor/requirementslib/models/vcs.py index 447e54e475..2cd62249fc 100644 --- a/pipenv/vendor/requirementslib/models/vcs.py +++ b/pipenv/vendor/requirementslib/models/vcs.py @@ -61,15 +61,17 @@ def is_local(self): def obtain(self): # type: () -> None - lte_pip_19 = ( - pip_shims.parsed_pip_version.parsed_version < pip_shims.parse_version("19.0") + lt_pip_19_2 = ( + pip_shims.parsed_pip_version.parsed_version < pip_shims.parse_version("19.2") ) + if lt_pip_19_2: + self.repo_backend = self.repo_backend(self.url) if os.path.exists( self.checkout_directory ) and not self.repo_backend.is_repository_directory(self.checkout_directory): self.repo_backend.unpack(self.checkout_directory) elif not os.path.exists(self.checkout_directory): - if lte_pip_19: + if lt_pip_19_2: self.repo_backend.obtain(self.checkout_directory) else: self.repo_backend.obtain(self.checkout_directory, self.parsed_url) diff --git a/pipenv/vendor/vendor.txt b/pipenv/vendor/vendor.txt index 7ce6da7023..27caa4d30d 100644 --- a/pipenv/vendor/vendor.txt +++ b/pipenv/vendor/vendor.txt @@ -1,45 +1,45 @@ appdirs==1.4.3 backports.shutil_get_terminal_size==1.0.0 backports.weakref==1.0.post1 -click==7.0 +click==7.1.1 click-completion==0.5.2 click-didyoumean==0.0.3 -colorama==0.4.1 +colorama==0.4.3 delegator.py==0.1.1 - pexpect==4.7.0 + pexpect==4.8.0 ptyprocess==0.6.0 python-dotenv==0.10.3 first==2.0.1 iso8601==0.1.12 -jinja2==2.10.3 +jinja2==2.11.1 markupsafe==1.1.1 -parse==1.13.0 +parse==1.15.0 pathlib2==2.3.5 scandir==1.10 pipdeptree==0.13.2 pipreqs==0.4.10 docopt==0.6.2 yarg==0.1.9 -pythonfinder==1.2.1 -requests==2.22.0 +pythonfinder==1.2.2 +requests==2.23.0 chardet==3.0.4 - idna==2.8 - urllib3==1.25.7 + idna==2.9 + urllib3==1.25.8 certifi==2019.11.28 -requirementslib==1.5.3 +requirementslib==1.5.4 attrs==19.3.0 distlib==0.3.0 - packaging==19.2 - pyparsing==2.4.5 + packaging==20.3 + pyparsing==2.4.6 git+https://github.com/sarugaku/plette.git@master#egg=plette tomlkit==0.5.8 -shellingham==1.3.1 -six==1.13.0 +shellingham==1.3.2 +six==1.14.0 semver==2.9.0 toml==0.10.0 cached-property==1.5.1 -vistir==0.4.3 -pip-shims==0.4.0 +vistir==0.5.0 +pip-shims==0.5.1 contextlib2==0.6.0.post1 funcsigs==1.0.2 enum34==1.1.6 @@ -50,7 +50,8 @@ resolvelib==0.2.2 backports.functools_lru_cache==1.5 pep517==0.8.1 zipp==0.6.0 - importlib_metadata==1.3.0 + importlib_metadata==1.5.1 + importlib-resources==1.4.0 more-itertools==5.0.0 git+https://github.com/sarugaku/passa.git@master#egg=passa orderedmultidict==1.0.1 diff --git a/pipenv/vendor/vistir/__init__.py b/pipenv/vendor/vistir/__init__.py index 09af185425..53c1dc4363 100644 --- a/pipenv/vendor/vistir/__init__.py +++ b/pipenv/vendor/vistir/__init__.py @@ -36,7 +36,7 @@ from .path import create_tracked_tempdir, create_tracked_tempfile, mkdir_p, rmtree from .spin import create_spinner -__version__ = "0.4.3" +__version__ = "0.5.1" __all__ = [ diff --git a/pipenv/vendor/vistir/_winconsole.py b/pipenv/vendor/vistir/_winconsole.py index a29c22d8fa..a8be4772ae 100644 --- a/pipenv/vendor/vistir/_winconsole.py +++ b/pipenv/vendor/vistir/_winconsole.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# This Module is taken in full from the click project +# This Module is taken in part from the click project and expanded # see https://github.com/pallets/click/blob/6cafd32/click/_winconsole.py # Copyright © 2014 by the Pallets team. diff --git a/pipenv/vendor/vistir/contextmanagers.py b/pipenv/vendor/vistir/contextmanagers.py index 49ec964fe1..66fde577cd 100644 --- a/pipenv/vendor/vistir/contextmanagers.py +++ b/pipenv/vendor/vistir/contextmanagers.py @@ -5,7 +5,7 @@ import os import stat import sys -from contextlib import contextmanager +from contextlib import closing, contextmanager import six @@ -285,20 +285,27 @@ def open_file(link, session=None, stream=True): # Remote URL headers = {"Accept-Encoding": "identity"} if not session: - from requests import Session - - session = Session() - with session.get(link, headers=headers, stream=stream) as resp: try: - raw = getattr(resp, "raw", None) - result = raw if raw else resp - yield result - finally: - if raw: - conn = getattr(raw, "_connection") - if conn is not None: - conn.close() - result.close() + from requests import Session + except ImportError: + session = None + else: + session = Session() + if session is None: + with closing(six.moves.urllib.request.urlopen(link)) as f: + yield f + else: + with session.get(link, headers=headers, stream=stream) as resp: + try: + raw = getattr(resp, "raw", None) + result = raw if raw else resp + yield result + finally: + if raw: + conn = getattr(raw, "_connection") + if conn is not None: + conn.close() + result.close() @contextmanager diff --git a/pipenv/vendor/vistir/path.py b/pipenv/vendor/vistir/path.py index 2d6e80c6a8..25d29eb9ca 100644 --- a/pipenv/vendor/vistir/path.py +++ b/pipenv/vendor/vistir/path.py @@ -30,6 +30,14 @@ fs_encode, ) +# fmt: off +if six.PY3: + from urllib.parse import quote_from_bytes as quote +else: + from urllib import quote +# fmt: on + + if IS_TYPE_CHECKING: from typing import Optional, Callable, Text, ByteString, AnyStr @@ -158,13 +166,23 @@ def path_to_url(path): >>> path_to_url("/home/user/code/myrepo/myfile.zip") 'file:///home/user/code/myrepo/myfile.zip' """ - from .misc import to_text, to_bytes + from .misc import to_bytes if not path: return path - path = to_bytes(path, encoding="utf-8") - normalized_path = to_text(normalize_drive(os.path.abspath(path)), encoding="utf-8") - return to_text(Path(normalized_path).as_uri(), encoding="utf-8") + normalized_path = Path(normalize_drive(os.path.abspath(path))).as_posix() + if os.name == "nt" and normalized_path[1] == ":": + drive, _, path = normalized_path.partition(":") + # XXX: This enables us to handle half-surrogates that were never + # XXX: actually part of a surrogate pair, but were just incidentally + # XXX: passed in as a piece of a filename + quoted_path = quote(fs_encode(path)) + return fs_decode("file:///{0}:{1}".format(drive, quoted_path)) + # XXX: This is also here to help deal with incidental dangling surrogates + # XXX: on linux, by making sure they are preserved during encoding so that + # XXX: we can urlencode the backslash correctly + bytes_path = to_bytes(normalized_path, errors="backslashreplace") + return fs_decode("file://{0}".format(quote(bytes_path))) def url_to_path(url): @@ -174,7 +192,6 @@ def url_to_path(url): Follows logic taken from pip's equivalent function """ - from .misc import to_bytes assert is_file_url(url), "Only file: urls can be converted to local paths" _, netloc, path, _, _ = urllib_parse.urlsplit(url) @@ -183,7 +200,7 @@ def url_to_path(url): netloc = "\\\\" + netloc path = urllib_request.url2pathname(netloc + path) - return to_bytes(path, encoding="utf-8") + return urllib_parse.unquote(path) def is_valid_url(url): diff --git a/pipenv/vendor/yaspin/spinners.py b/pipenv/vendor/yaspin/spinners.py index 9c3fa7b84b..60822a2c67 100644 --- a/pipenv/vendor/yaspin/spinners.py +++ b/pipenv/vendor/yaspin/spinners.py @@ -11,10 +11,7 @@ import os from collections import namedtuple -try: - import simplejson as json -except ImportError: - import json +import json THIS_DIR = os.path.dirname(os.path.realpath(__file__)) diff --git a/tasks/vendoring/__init__.py b/tasks/vendoring/__init__.py index 925d48a9be..6c66d00850 100644 --- a/tasks/vendoring/__init__.py +++ b/tasks/vendoring/__init__.py @@ -61,7 +61,8 @@ 'distlib': 'https://github.com/vsajip/distlib/raw/master/LICENSE.txt', 'pythonfinder': 'https://raw.githubusercontent.com/techalchemy/pythonfinder/master/LICENSE.txt', 'pyparsing': 'https://raw.githubusercontent.com/pyparsing/pyparsing/master/LICENSE', - 'resolvelib': 'https://raw.githubusercontent.com/sarugaku/resolvelib/master/LICENSE' + 'resolvelib': 'https://raw.githubusercontent.com/sarugaku/resolvelib/master/LICENSE', + 'funcsigs': 'https://raw.githubusercontent.com/aliles/funcsigs/master/LICENSE' } FILE_WHITE_LIST = ( diff --git a/tasks/vendoring/patches/vendor/pip_shims_module_names.patch b/tasks/vendoring/patches/vendor/pip_shims_module_names.patch index dfddacca6a..f783c96e46 100644 --- a/tasks/vendoring/patches/vendor/pip_shims_module_names.patch +++ b/tasks/vendoring/patches/vendor/pip_shims_module_names.patch @@ -2,7 +2,7 @@ diff --git a/pipenv/vendor/pip_shims/__init__.py b/pipenv/vendor/pip_shims/__ini index 2af4166e..598b9ad8 100644 --- a/pipenv/vendor/pip_shims/__init__.py +++ b/pipenv/vendor/pip_shims/__init__.py -@@ -11,10 +11,13 @@ __version__ = "0.4.1.dev0" +@@ -11,10 +11,13 @@ __version__ = "0.5.1" if "pip_shims" in sys.modules: # mainly to keep a reference to the old module on hand so it doesn't get # weakref'd away @@ -18,3 +18,25 @@ index 2af4166e..598b9ad8 100644 module.shims = shims module.__dict__.update( { +diff --git a/pipenv/vendor/pip_shims/compat.py b/../pip-shims/src/pip_shims/compat.py +index ed99d970..63061a6a 100644 +--- a/pipenv/vendor/pip_shims/compat.py ++++ b/../pip-shims/src/pip_shims/compat.py +@@ -25,14 +25,14 @@ from .utils import ( + ) + + if sys.version_info[:2] < (3, 5): +- from backports.tempfile import TemporaryDirectory ++ from pipenv.vendor.vistir.compat import TemporaryDirectory + else: + from tempfile import TemporaryDirectory + + if six.PY3: + from contextlib import ExitStack + else: +- from contextlib2 import ExitStack ++ from pipenv.vendor.contextlib2 import ExitStack + + + if MYPY_RUNNING: + diff --git a/tasks/vendoring/patches/vendor/pythonfinder-pathlib-import.patch b/tasks/vendoring/patches/vendor/pythonfinder-pathlib-import.patch new file mode 100644 index 0000000000..89cad03792 --- /dev/null +++ b/tasks/vendoring/patches/vendor/pythonfinder-pathlib-import.patch @@ -0,0 +1,13 @@ +diff --git a/pipenv/vendor/pythonfinder/compat.py b/pipenv/vendor/pythonfinder/compat.py +index 6fb4542f..d76c4efc 100644 +--- a/pipenv/vendor/pythonfinder/compat.py ++++ b/pipenv/vendor/pythonfinder/compat.py +@@ -4,7 +4,7 @@ import sys + import six + + if sys.version_info[:2] <= (3, 4): +- from pathlib2 import Path # type: ignore # noqa ++ from pipenv.vendor.pathlib2 import Path # type: ignore # noqa + else: + from pathlib import Path + diff --git a/tasks/vendoring/patches/vendor/yaspin-signal-handling.patch b/tasks/vendoring/patches/vendor/yaspin-signal-handling.patch index 511e782ae5..c6144f4073 100644 --- a/tasks/vendoring/patches/vendor/yaspin-signal-handling.patch +++ b/tasks/vendoring/patches/vendor/yaspin-signal-handling.patch @@ -60,3 +60,19 @@ index d01fb98e..06b8b621 100644 def _clear_line(): - sys.stdout.write("\033[K") + sys.stdout.write(chr(27) + "[K") +diff --git a/pipenv/vendor/yaspin/spinners.py b/pipenv/vendor/yaspin/spinners.py +index 9c3fa7b8..60822a2c 100644 +--- a/pipenv/vendor/yaspin/spinners.py ++++ b/pipenv/vendor/yaspin/spinners.py +@@ -11,10 +11,7 @@ import codecs + import os + from collections import namedtuple + +-try: +- import simplejson as json +-except ImportError: +- import json ++import json + + + THIS_DIR = os.path.dirname(os.path.realpath(__file__)) From 14566756a0eb73b9ad47d438582d2549023bad03 Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Wed, 25 Mar 2020 17:25:18 -0400 Subject: [PATCH 04/49] Fix outline table conversion for new tomlkit release - Fix conversion of outline tables with new tomlkit release - Fix pip and pip-shims patches Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- pipenv/utils.py | 6 +++++- tasks/vendoring/patches/patched/pip19.patch | 6 +++--- tasks/vendoring/patches/vendor/pip_shims_module_names.patch | 4 ++-- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/pipenv/utils.py b/pipenv/utils.py index 55cb7a9653..680b7e218c 100644 --- a/pipenv/utils.py +++ b/pipenv/utils.py @@ -89,7 +89,11 @@ def cleanup_toml(tml): def convert_toml_outline_tables(parsed): """Converts all outline tables to inline tables.""" def convert_tomlkit_table(section): - for key, value in section._body: + if isinstance(section, tomlkit.items.Table): + body = section.value._body + else: + body = section._body + for key, value in body: if not key: continue if hasattr(value, "keys") and not isinstance(value, tomlkit.items.InlineTable): diff --git a/tasks/vendoring/patches/patched/pip19.patch b/tasks/vendoring/patches/patched/pip19.patch index 7d57cfd537..fa0a7dea39 100644 --- a/tasks/vendoring/patches/patched/pip19.patch +++ b/tasks/vendoring/patches/patched/pip19.patch @@ -504,10 +504,10 @@ index 77d40be6..8a32cf2d 100644 - return path + return path \ No newline at end of file -diff --git a/pipenv/patched/notpip/_internal/commands/__init__.py b/pipenv/patched/notpip/_internal/commands/__init__.py +diff --git a/pipenv/patched/pip/_internal/commands/__init__.py b/pipenv/patched/pip/_internal/commands/__init__.py index abcafa55..ca155a94 100644 ---- a/pipenv/patched/notpip/_internal/commands/__init__.py -+++ b/pipenv/patched/notpip/_internal/commands/__init__.py +--- a/pipenv/patched/pip/_internal/commands/__init__.py ++++ b/pipenv/patched/pip/_internal/commands/__init__.py @@ -21,7 +21,7 @@ CommandInfo = namedtuple('CommandInfo', 'module_path, class_name, summary') # The ordering matters for help display. diff --git a/tasks/vendoring/patches/vendor/pip_shims_module_names.patch b/tasks/vendoring/patches/vendor/pip_shims_module_names.patch index f783c96e46..7c49f82a96 100644 --- a/tasks/vendoring/patches/vendor/pip_shims_module_names.patch +++ b/tasks/vendoring/patches/vendor/pip_shims_module_names.patch @@ -18,10 +18,10 @@ index 2af4166e..598b9ad8 100644 module.shims = shims module.__dict__.update( { -diff --git a/pipenv/vendor/pip_shims/compat.py b/../pip-shims/src/pip_shims/compat.py +diff --git a/pipenv/vendor/pip_shims/compat.py b/pipenv/vendor/pip_shims/compat.py index ed99d970..63061a6a 100644 --- a/pipenv/vendor/pip_shims/compat.py -+++ b/../pip-shims/src/pip_shims/compat.py ++++ b/pipenv/vendor/pip_shims/compat.py @@ -25,14 +25,14 @@ from .utils import ( ) From e7fc6e9425e9698e7e0650fc2a873e6a5de42f68 Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Wed, 25 Mar 2020 19:30:16 -0400 Subject: [PATCH 05/49] Re-vendor dependencies - Update tomlkit => 0.5.11 Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- Pipfile.lock | 549 +++++--- pipenv/patched/piptools/repositories/pypi.py | 1 - pipenv/patched/safety/__init__.py | 2 +- pipenv/patched/safety/safety.py | 2 +- pipenv/vendor/click/LICENSE.rst | 47 +- pipenv/vendor/click/__init__.py | 150 +- pipenv/vendor/click/_bashcomplete.py | 214 ++- pipenv/vendor/click/_compat.py | 443 +++--- pipenv/vendor/click/_termui_impl.py | 272 ++-- pipenv/vendor/click/_textwrap.py | 3 +- pipenv/vendor/click/_unicodefun.py | 110 +- pipenv/vendor/click/_winconsole.py | 181 ++- pipenv/vendor/click/core.py | 652 +++++---- pipenv/vendor/click/decorators.py | 140 +- pipenv/vendor/click/exceptions.py | 82 +- pipenv/vendor/click/formatting.py | 125 +- pipenv/vendor/click/globals.py | 9 +- pipenv/vendor/click/parser.py | 93 +- pipenv/vendor/click/termui.py | 251 ++-- pipenv/vendor/click/testing.py | 144 +- pipenv/vendor/click/types.py | 356 +++-- pipenv/vendor/click/utils.py | 107 +- pipenv/vendor/colorama/__init__.py | 2 +- pipenv/vendor/funcsigs/LICENSE | 13 + pipenv/vendor/idna/LICENSE.rst | 52 +- pipenv/vendor/idna/core.py | 4 +- pipenv/vendor/idna/idnadata.py | 56 +- pipenv/vendor/idna/package_data.py | 2 +- pipenv/vendor/idna/uts46data.py | 724 ++++++---- pipenv/vendor/importlib_metadata/__init__.py | 111 +- pipenv/vendor/importlib_metadata/_compat.py | 11 +- .../importlib_metadata/docs/changelog.rst | 27 + pipenv/vendor/importlib_metadata/docs/conf.py | 3 + .../vendor/importlib_metadata/docs/index.rst | 12 +- .../vendor/importlib_metadata/docs/using.rst | 37 +- .../importlib_metadata/tests/fixtures.py | 36 +- .../tests/test_integration.py | 22 +- .../importlib_metadata/tests/test_main.py | 28 + pipenv/vendor/jinja2/__init__.py | 123 +- pipenv/vendor/jinja2/_compat.py | 61 +- pipenv/vendor/jinja2/_identifier.py | 6 +- pipenv/vendor/jinja2/asyncfilters.py | 69 +- pipenv/vendor/jinja2/asyncsupport.py | 186 +-- pipenv/vendor/jinja2/bccache.py | 137 +- pipenv/vendor/jinja2/compiler.py | 1242 +++++++++-------- pipenv/vendor/jinja2/constants.py | 15 +- pipenv/vendor/jinja2/debug.py | 531 +++---- pipenv/vendor/jinja2/defaults.py | 68 +- pipenv/vendor/jinja2/environment.py | 592 ++++---- pipenv/vendor/jinja2/exceptions.py | 73 +- pipenv/vendor/jinja2/ext.py | 387 +++-- pipenv/vendor/jinja2/filters.py | 706 ++++++---- pipenv/vendor/jinja2/idtracking.py | 58 +- pipenv/vendor/jinja2/lexer.py | 768 +++++----- pipenv/vendor/jinja2/loaders.py | 277 ++-- pipenv/vendor/jinja2/meta.py | 33 +- pipenv/vendor/jinja2/nativetypes.py | 275 ++-- pipenv/vendor/jinja2/nodes.py | 427 +++--- pipenv/vendor/jinja2/optimizer.py | 60 +- pipenv/vendor/jinja2/parser.py | 568 ++++---- pipenv/vendor/jinja2/runtime.py | 693 +++++---- pipenv/vendor/jinja2/sandbox.py | 198 +-- pipenv/vendor/jinja2/tests.py | 139 +- pipenv/vendor/jinja2/utils.py | 369 +++-- pipenv/vendor/jinja2/visitor.py | 14 +- pipenv/vendor/packaging/LICENSE.APACHE | 2 +- pipenv/vendor/packaging/__about__.py | 2 +- pipenv/vendor/packaging/_compat.py | 9 +- pipenv/vendor/packaging/_structures.py | 26 +- pipenv/vendor/packaging/_typing.py | 39 + pipenv/vendor/packaging/markers.py | 54 +- pipenv/vendor/packaging/py.typed | 0 pipenv/vendor/packaging/requirements.py | 9 +- pipenv/vendor/packaging/specifiers.py | 168 ++- pipenv/vendor/packaging/tags.py | 557 ++++++-- pipenv/vendor/packaging/utils.py | 13 +- pipenv/vendor/packaging/version.py | 151 +- pipenv/vendor/parse.py | 55 +- pipenv/vendor/pexpect/__init__.py | 2 +- pipenv/vendor/pexpect/_async.py | 6 +- pipenv/vendor/pexpect/expect.py | 133 +- pipenv/vendor/pexpect/pty_spawn.py | 17 +- pipenv/vendor/pexpect/run.py | 2 +- pipenv/vendor/pexpect/screen.py | 2 +- pipenv/vendor/pexpect/spawnbase.py | 3 + pipenv/vendor/pip_shims/backports.py | 1183 ---------------- pipenv/vendor/pyparsing.py | 225 ++- pipenv/vendor/requests/LICENSE | 2 +- pipenv/vendor/requests/__init__.py | 12 +- pipenv/vendor/requests/__version__.py | 8 +- pipenv/vendor/requests/api.py | 7 +- pipenv/vendor/requests/auth.py | 4 +- pipenv/vendor/requests/compat.py | 2 + pipenv/vendor/requests/models.py | 7 +- pipenv/vendor/requests/sessions.py | 23 +- pipenv/vendor/requests/status_codes.py | 15 +- pipenv/vendor/requests/structures.py | 4 +- pipenv/vendor/requests/utils.py | 11 +- pipenv/vendor/requirementslib/LICENSE | 2 +- pipenv/vendor/requirementslib/utils.py | 2 +- pipenv/vendor/shellingham/__init__.py | 2 +- pipenv/vendor/shellingham/posix/__init__.py | 60 +- pipenv/vendor/six.LICENSE | 2 +- pipenv/vendor/six.py | 57 +- pipenv/vendor/tomlkit/__init__.py | 2 +- pipenv/vendor/tomlkit/container.py | 134 +- pipenv/vendor/tomlkit/items.py | 1 + pipenv/vendor/tomlkit/parser.py | 78 +- pipenv/vendor/tomlkit/toml_char.py | 2 +- pipenv/vendor/urllib3/__init__.py | 2 +- pipenv/vendor/urllib3/connection.py | 34 - pipenv/vendor/urllib3/connectionpool.py | 4 +- .../urllib3/contrib/_appengine_environ.py | 14 +- pipenv/vendor/urllib3/response.py | 2 +- pipenv/vendor/urllib3/util/ssl_.py | 2 +- pipenv/vendor/urllib3/util/url.py | 18 +- pipenv/vendor/vistir/__init__.py | 2 +- .../vendoring/patches/patched/piptools.patch | 2 +- 118 files changed, 9115 insertions(+), 7208 deletions(-) create mode 100644 pipenv/vendor/funcsigs/LICENSE create mode 100644 pipenv/vendor/packaging/_typing.py create mode 100644 pipenv/vendor/packaging/py.typed delete mode 100644 pipenv/vendor/pip_shims/backports.py diff --git a/Pipfile.lock b/Pipfile.lock index 924c988ad7..69e4612846 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -5,11 +5,13 @@ }, "pipfile-spec": 6, "requires": {}, - "sources": [{ + "sources": [ + { "name": "pypi", "url": "https://pypi.org/simple", "verify_ssl": true - }] + } + ] }, "default": {}, "develop": { @@ -37,10 +39,10 @@ }, "arpeggio": { "hashes": [ - "sha256:a5258b84f76661d558492fa87e42db634df143685a0e51802d59cae7daad8732", - "sha256:dc5c0541e7cc2c6033dc0338133436abfac53655624784736e9bc8bd35e56583" + "sha256:948ce06163a48a72c97f4fe79ad3d1c1330b6fec4f22ece182fb60ef60bd022b", + "sha256:b9178917594bb9758002faed31e1e1c968b5ea7f2a8f78fd4a5b8fecaccfcfcd" ], - "version": "==1.9.0" + "version": "==1.9.2" }, "atomicwrites": { "hashes": [ @@ -51,51 +53,51 @@ }, "attrs": { "hashes": [ - "sha256:69c0dbf2ed392de1cb5ec704444b08a5ef81680a61cb899dc08127123af36a79", - "sha256:f0b870f674851ecbfbbbd364d6b5cbdff9dcedbc7f3f5e18a6891057f21fe399" + "sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c", + "sha256:f7b7ce16570fe9965acd6d30101a28f62fb4a7f9e926b3bbc9b61f8b04247e72" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==19.1.0" + "version": "==19.3.0" }, "babel": { "hashes": [ - "sha256:af92e6106cb7c55286b25b38ad7695f8b4efb36a90ba483d7f7a6628c46158ab", - "sha256:e86135ae101e31e2c8ec20a4e0c5220f4eed12487d5cf3f78be7e98d3a57fc28" + "sha256:1aac2ae2d0d8ea368fa90906567f5c08463d98ade155c0c4bfedd6a0f7160e38", + "sha256:d670ea0b10f8b723672d3a6abeb87b565b244da220d76b4dba1b66269ec152d4" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==2.7.0" + "version": "==2.8.0" }, "backports.functools-lru-cache": { "hashes": [ - "sha256:9d98697f088eb1b0fa451391f91afb5e3ebde16bbdb272819fd091151fda4f1a", - "sha256:f0b0e4eba956de51238e17573b7087e852dfe9854afd2e9c873f73fc0ca0a6dd" + "sha256:0bada4c2f8a43d533e4ecb7a12214d9420e66eb206d54bf2d682581ca4b80848", + "sha256:8fde5f188da2d593bd5bc0be98d9abc46c95bb8a9dde93429570192ee6cc2d4a" ], - "markers": "python_version < '3'", - "version": "==1.5" + "markers": "python_version < '3.2'", + "version": "==1.6.1" }, "beautifulsoup4": { "hashes": [ - "sha256:034740f6cb549b4e932ae1ab975581e6103ac8f942200a0e9759065984391858", - "sha256:945065979fb8529dd2f37dbb58f00b661bdbcbebf954f93b32fdf5263ef35348", - "sha256:ba6d5c59906a85ac23dadfe5c88deaf3e179ef565f4898671253e50a78680718" + "sha256:05fd825eb01c290877657a56df4c6e4c311b3965bda790c613a3d6fb01a5462a", + "sha256:9fbb4d6e48ecd30bcacc5b63b94088192dcda178513b2ae3c394229f8911b887", + "sha256:e1505eeed31b0f4ce2dbb3bc8eb256c04cc2b3b72af7d551a4ab6efd5cbe5dae" ], - "version": "==4.7.1" + "version": "==4.8.2" }, "black": { "hashes": [ - "sha256:09a9dcb7c46ed496a9850b76e4e825d6049ecd38b611f1224857a79bd985a8cf", - "sha256:68950ffd4d9169716bcb8719a56c07a2f4485354fec061cdd5910aa07369731c" + "sha256:1b30e59be925fafc1ee4565e5e08abef6b03fe455102883820fe5ee2e4734e0b", + "sha256:c2edb73a08e9e0e6f65a0e6af18b059b8b1cdd5bef997d7a0b181df93dc81539" ], "markers": "python_version >= '3.6'", - "version": "==19.3b0" + "version": "==19.10b0" }, "bleach": { "hashes": [ - "sha256:213336e49e102af26d9cde77dd2d0397afabc5a6bf2fed985dc35b5d1e285a16", - "sha256:3fdf7f77adcf649c9911387df51254b813185e32b2c6619f690b593a617e19fa" + "sha256:cc8da25076a1fe56c3ac63671e2194458e0c4d9c7becfd52ca251650d517903c", + "sha256:e78e426105ac07026ba098f04de8abe9b6e3e98b5befbf89b51a5ef0a4292b03" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==3.1.0" + "version": "==3.1.4" }, "bs4": { "hashes": [ @@ -105,10 +107,43 @@ }, "certifi": { "hashes": [ - "sha256:046832c04d4e752f37383b628bc601a7ea7211496b4638f6514d0e5b9acc4939", - "sha256:945e3ba63a0b9f577b1395204e13c3a231f9bc0223888be653286534e5873695" - ], - "version": "==2019.6.16" + "sha256:017c25db2a153ce562900032d5bc68e9f191e44e9a0f762f373977de9df1fbb3", + "sha256:25b64c7da4cd7479594d035c08c2d809eb4aab3a26e5a990ea98cc450c320f1f" + ], + "version": "==2019.11.28" + }, + "cffi": { + "hashes": [ + "sha256:001bf3242a1bb04d985d63e138230802c6c8d4db3668fb545fb5005ddf5bb5ff", + "sha256:00789914be39dffba161cfc5be31b55775de5ba2235fe49aa28c148236c4e06b", + "sha256:028a579fc9aed3af38f4892bdcc7390508adabc30c6af4a6e4f611b0c680e6ac", + "sha256:14491a910663bf9f13ddf2bc8f60562d6bc5315c1f09c704937ef17293fb85b0", + "sha256:1cae98a7054b5c9391eb3249b86e0e99ab1e02bb0cc0575da191aedadbdf4384", + "sha256:2089ed025da3919d2e75a4d963d008330c96751127dd6f73c8dc0c65041b4c26", + "sha256:2d384f4a127a15ba701207f7639d94106693b6cd64173d6c8988e2c25f3ac2b6", + "sha256:337d448e5a725bba2d8293c48d9353fc68d0e9e4088d62a9571def317797522b", + "sha256:399aed636c7d3749bbed55bc907c3288cb43c65c4389964ad5ff849b6370603e", + "sha256:3b911c2dbd4f423b4c4fcca138cadde747abdb20d196c4a48708b8a2d32b16dd", + "sha256:3d311bcc4a41408cf5854f06ef2c5cab88f9fded37a3b95936c9879c1640d4c2", + "sha256:62ae9af2d069ea2698bf536dcfe1e4eed9090211dbaafeeedf5cb6c41b352f66", + "sha256:66e41db66b47d0d8672d8ed2708ba91b2f2524ece3dee48b5dfb36be8c2f21dc", + "sha256:675686925a9fb403edba0114db74e741d8181683dcf216be697d208857e04ca8", + "sha256:7e63cbcf2429a8dbfe48dcc2322d5f2220b77b2e17b7ba023d6166d84655da55", + "sha256:8a6c688fefb4e1cd56feb6c511984a6c4f7ec7d2a1ff31a10254f3c817054ae4", + "sha256:8c0ffc886aea5df6a1762d0019e9cb05f825d0eec1f520c51be9d198701daee5", + "sha256:95cd16d3dee553f882540c1ffe331d085c9e629499ceadfbda4d4fde635f4b7d", + "sha256:99f748a7e71ff382613b4e1acc0ac83bf7ad167fb3802e35e90d9763daba4d78", + "sha256:b8c78301cefcf5fd914aad35d3c04c2b21ce8629b5e4f4e45ae6812e461910fa", + "sha256:c420917b188a5582a56d8b93bdd8e0f6eca08c84ff623a4c16e809152cd35793", + "sha256:c43866529f2f06fe0edc6246eb4faa34f03fe88b64a0a9a942561c8e22f4b71f", + "sha256:cab50b8c2250b46fe738c77dbd25ce017d5e6fb35d3407606e7a4180656a5a6a", + "sha256:cef128cb4d5e0b3493f058f10ce32365972c554572ff821e175dbc6f8ff6924f", + "sha256:cf16e3cf6c0a5fdd9bc10c21687e19d29ad1fe863372b5543deaec1039581a30", + "sha256:e56c744aa6ff427a607763346e4170629caf7e48ead6921745986db3692f987f", + "sha256:e577934fc5f8779c554639376beeaa5657d54349096ef24abe8c74c5d9c117c3", + "sha256:f2b0fa0c01d8a0c7483afd9f31d7ecf2d71760ca24499c8697aeb5ca37dc090c" + ], + "version": "==1.14.0" }, "chardet": { "hashes": [ @@ -127,34 +162,61 @@ }, "click": { "hashes": [ - "sha256:2335065e6395b9e67ca716de5f7526736bfa6ceead690adf616d925bdc622b13", - "sha256:5b94b49521f6456670fdb30cd82a4eca9412788a93fa6dd6df72c94d5a8ff2d7" + "sha256:8a18b4ea89d8820c5d0c7da8a64b2c324b4dabb695804dbfea19b9be9d88c0cc", + "sha256:e345d143d80bf5ee7534056164e5e112ea5e22716bbb1ce727941f4c8b471b9a" ], "index": "pypi", - "version": "==7.0" + "version": "==7.1.1" }, "colorama": { "hashes": [ - "sha256:05eed71e2e327246ad6b38c540c4a3117230b19679b875190486ddd2d721422d", - "sha256:f8ac84de7840f5b9c4e3347b3c1eaa50f7e49c2b07596221daec5edaabbd7c48" + "sha256:7d73d2a99753107a36ac6b455ee49046802e59d9d076ef8e47b61499fa29afff", + "sha256:e96da0d330793e2cb9485e9ddfd918d456036c7149416295932478192f4436a1" ], - "version": "==0.4.1" + "version": "==0.4.3" }, "configparser": { "hashes": [ - "sha256:8be81d89d6e7b4c0d4e44bcc525845f6da25821de80cb5e06e7e0238a2899e32", - "sha256:da60d0014fd8c55eb48c1c5354352e363e2d30bbf7057e5e171a468390184c75" + "sha256:254c1d9c79f60c45dfde850850883d5aaa7f19a23f13561243a050d5a7c3fe4c", + "sha256:c7d282687a5308319bf3d2e7706e575c635b0a470342641c93bea0ea3b5331df" ], - "markers": "python_version < '3'", - "version": "==3.7.4" + "markers": "python_version < '3.2'", + "version": "==4.0.2" }, "contextlib2": { "hashes": [ - "sha256:509f9419ee91cdd00ba34443217d5ca51f5a364a404e1dce9e8979cea969ca48", - "sha256:f5260a6e679d2ff42ec91ec5252f4eeffdcf21053db9113bd0a8e4d953769c00" + "sha256:01f490098c18b19d2bd5bb5dc445b2054d2fa97f09a4280ba2c5f3c394c8162e", + "sha256:3355078a159fbb44ee60ea80abd0d87b80b78c248643b49aa6d94673b413609b" ], "markers": "python_version < '3'", - "version": "==0.5.5" + "version": "==0.6.0.post1" + }, + "cryptography": { + "hashes": [ + "sha256:02079a6addc7b5140ba0825f542c0869ff4df9a69c360e339ecead5baefa843c", + "sha256:1df22371fbf2004c6f64e927668734070a8953362cd8370ddd336774d6743595", + "sha256:369d2346db5934345787451504853ad9d342d7f721ae82d098083e1f49a582ad", + "sha256:3cda1f0ed8747339bbdf71b9f38ca74c7b592f24f65cdb3ab3765e4b02871651", + "sha256:44ff04138935882fef7c686878e1c8fd80a723161ad6a98da31e14b7553170c2", + "sha256:4b1030728872c59687badcca1e225a9103440e467c17d6d1730ab3d2d64bfeff", + "sha256:58363dbd966afb4f89b3b11dfb8ff200058fbc3b947507675c19ceb46104b48d", + "sha256:6ec280fb24d27e3d97aa731e16207d58bd8ae94ef6eab97249a2afe4ba643d42", + "sha256:7270a6c29199adc1297776937a05b59720e8a782531f1f122f2eb8467f9aab4d", + "sha256:73fd30c57fa2d0a1d7a49c561c40c2f79c7d6c374cc7750e9ac7c99176f6428e", + "sha256:7f09806ed4fbea8f51585231ba742b58cbcfbfe823ea197d8c89a5e433c7e912", + "sha256:90df0cc93e1f8d2fba8365fb59a858f51a11a394d64dbf3ef844f783844cc793", + "sha256:971221ed40f058f5662a604bd1ae6e4521d84e6cad0b7b170564cc34169c8f13", + "sha256:a518c153a2b5ed6b8cc03f7ae79d5ffad7315ad4569b2d5333a13c38d64bd8d7", + "sha256:b0de590a8b0979649ebeef8bb9f54394d3a41f66c5584fff4220901739b6b2f0", + "sha256:b43f53f29816ba1db8525f006fa6f49292e9b029554b3eb56a189a70f2a40879", + "sha256:d31402aad60ed889c7e57934a03477b572a03af7794fa8fb1780f21ea8f6551f", + "sha256:de96157ec73458a7f14e3d26f17f8128c959084931e8997b9e655a39c8fde9f9", + "sha256:df6b4dca2e11865e6cfbfb708e800efb18370f5a46fd601d3755bc7f85b3a8a2", + "sha256:ecadccc7ba52193963c0475ac9f6fa28ac01e01349a2ca48509667ef41ffd2cf", + "sha256:fb81c17e0ebe3358486cd8cc3ad78adbae58af12fc2bf2bc0bb84e8090fa5ce8" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==2.8" }, "decorator": { "hashes": [ @@ -163,13 +225,19 @@ ], "version": "==4.4.0" }, + "distlib": { + "hashes": [ + "sha256:2e166e231a26b36d6dfe35a48c4464346620f8645ed0ace01ee31822b288de21" + ], + "version": "==0.3.0" + }, "docutils": { "hashes": [ - "sha256:02aec4bd92ab067f6ff27a38a38a41173bf01bed8f89157768c1573f53e474a6", - "sha256:51e64ef2ebfb29cae1faa133b3710143496eca21c530f3f71424d77687764274", - "sha256:7a4bd47eaf6596e1295ecb11361139febe29b084a87bf005bf899f9a42edc3c6" + "sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af", + "sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc" ], - "version": "==0.14" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==0.16" }, "entrypoints": { "hashes": [ @@ -181,51 +249,58 @@ }, "enum34": { "hashes": [ - "sha256:2d81cbbe0e73112bdfe6ef8576f2238f2ba27dd0d55752a776c41d38b7da2850", - "sha256:644837f692e5f550741432dd3f223bbb9852018674981b1664e5dc339387588a", - "sha256:6bd0f6ad48ec2aa117d3d141940d484deccda84d4fcd884f5c3d93c23ecd8c79", - "sha256:8ad8c4783bf61ded74527bffb48ed9b54166685e4230386a9ed9b1279e2df5b1" + "sha256:a98a201d6de3f2ab3db284e70a33b0f896fbf35f8086594e8c9e74b909058d53", + "sha256:c3858660960c984d6ab0ebad691265180da2b43f07e061c0f8dca9ef3cffd328", + "sha256:cce6a7477ed816bd2542d03d53db9f0db935dd013b70f336a95c73979289f248" ], - "markers": "python_version < '3'", - "version": "==1.1.6" + "markers": "python_version < '3.0'", + "version": "==1.1.10" }, "execnet": { "hashes": [ - "sha256:027ee5d961afa01e97b90d6ccc34b4ed976702bc58e7f092b3c513ea288cb6d2", - "sha256:752a3786f17416d491f833a29217dda3ea4a471fc5269c492eebcee8cc4772d3" + "sha256:cacb9df31c9680ec5f95553976c4da484d407e85e41c83cb812aa014f0eddc50", + "sha256:d4efd397930c46415f62f8a31388d6be4f27a91d7550eb79bc64a756e0056547" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==1.6.0" + "version": "==1.7.1" + }, + "filelock": { + "hashes": [ + "sha256:18d82244ee114f543149c66a6e0c14e9c4f8a1044b5cdaadd0f82159d6a6ff59", + "sha256:929b7d63ec5b7d6b71b0fa5ac14e030b3f70b75747cef1b10da9b879fef15836" + ], + "version": "==3.0.12" }, "flake8": { "hashes": [ - "sha256:859996073f341f2670741b51ec1e67a01da142831aa1fdc6242dbf88dffbe661", - "sha256:a796a115208f5c03b18f332f7c11729812c8c3ded6c46319c59b53efd3819da8" + "sha256:45681a117ecc81e870cbf1262835ae4af5e7a8b08e40b944a8a6e6b895914cfb", + "sha256:49356e766643ad15072a789a20915d3c91dc89fd313ccd71802303fd67e4deca" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==3.7.7" + "version": "==3.7.9" }, "flaky": { "hashes": [ - "sha256:12bd5e41f372b2190e8d754b6e5829c2f11dbc764e10b30f57e59f829c9ca1da", - "sha256:a94931c46a33469ec26f09b652bc88f55a8f5cc77807b90ca7bbafef1108fd7d" + "sha256:5471615b32b0f8086573de924475b1f0d31e0e8655a089eb9c38a0fbff3f11aa", + "sha256:8cd5455bb00c677f787da424eaf8c4a58a922d0e97126d3085db5b279a98b698" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==3.5.3" + "version": "==3.6.1" }, "flask": { "hashes": [ - "sha256:ad7c6d841e64296b962296c2c2dabc6543752985727af86a975072dea984b6f3", - "sha256:e7d32475d1de5facaa55e3958bc4ec66d3762076b074296aa50ef8fdc5b9df61" + "sha256:13f9f196f330c7c2c5d7a5cf91af894110ca0215ac051b5844701f2bfd934d52", + "sha256:45eb5a6fd193d6cf7e0cf5d8a5b31f83d5faae0293695626f539a823e93b13f6" ], - "version": "==1.0.3" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==1.1.1" }, "funcsigs": { "hashes": [ "sha256:330cc27ccbf7f1e992e69fef78261dc7c6569012cf397db8d3de0234e6c937ca", "sha256:a7bb0f2cf3a3fd1ab2732cb49eba4252c2af4240442415b4abce3b87022a8f50" ], - "markers": "python_version < '3.0'", + "markers": "python_version < '3.3'", "version": "==1.0.2" }, "functools32": { @@ -245,34 +320,43 @@ }, "futures": { "hashes": [ - "sha256:9ec02aa7d674acb8618afb127e27fde7fc68994c0437ad759fa094a574adb265", - "sha256:ec0a6cb848cc212002b9828c3e34c675e0c9ff6741dc445cab6fdd4e1085d1f1" + "sha256:49b3f5b064b6e3afc3316421a3f25f66c137ae88f068abbf72830170033c5e16", + "sha256:7e033af76a5e35f58e56da7a91e687706faf4e7bdfb2cbc3f2cca6b9bcda9794" ], "markers": "python_version < '3.2'", - "version": "==3.2.0" + "version": "==3.3.0" }, "idna": { "hashes": [ - "sha256:c357b3f628cf53ae2c4c05627ecc484553142ca23264e593d327bcde5e9c3407", - "sha256:ea8b7f6188e6fa117537c3df7da9fc686d485087abf6ac197f9c46432f7e4a3c" + "sha256:7588d1c14ae4c77d74036e8c22ff447b26d0fde8f007354fd48a7814db15b7cb", + "sha256:a068a21ceac8a4d63dbfd964670474107f541babbd2250d61922f029858365fa" ], - "version": "==2.8" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==2.9" }, "imagesize": { "hashes": [ - "sha256:3f349de3eb99145973fefb7dbe38554414e5c30abd0c8e4b970a7c9d09f3a1d8", - "sha256:f3832918bc3c66617f92e35f5d70729187676313caa60c187eb0f28b8fe5e3b5" + "sha256:6965f19a6a2039c7d48bca7dba2473069ff854c36ae6f19d2cde309d998228a1", + "sha256:b1f6b5a4eab1f73479a50fb79fcf729514a900c341d8503d62a62dbc4127a2b1" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==1.1.0" + "version": "==1.2.0" }, "importlib-metadata": { "hashes": [ - "sha256:6dfd58dfe281e8d240937776065dd3624ad5469c835248219bd16cf2e12dbeb7", - "sha256:cb6ee23b46173539939964df59d3d72c3e0c1b5d54b84f1d8a7e912fe43612db" + "sha256:298a914c82144c6b3b06c568a8973b89ad2176685f43cd1ea9ba968307300fa9", + "sha256:dfc83688553a91a786c6c91eeb5f3b1d31f24d71877bbd94ecbf5484e57690a2" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==0.18" + "markers": "python_version < '3.8'", + "version": "==1.5.2" + }, + "importlib-resources": { + "hashes": [ + "sha256:4019b6a9082d8ada9def02bece4a76b131518866790d58fdda0b5f8c603b36c2", + "sha256:dd98ceeef3f5ad2ef4cc287b8586da4ebad15877f351e9688987ad663a0a29b8" + ], + "markers": "python_version < '3.7'", + "version": "==1.4.0" }, "incremental": { "hashes": [ @@ -283,19 +367,19 @@ }, "invoke": { "hashes": [ - "sha256:4f4de934b15c2276caa4fbc5a3b8a61c0eb0b234f2be1780d2b793321995c2d6", - "sha256:dc492f8f17a0746e92081aec3f86ae0b4750bf41607ea2ad87e5a7b5705121b7", - "sha256:eb6f9262d4d25b40330fb21d1e99bf0f85011ccc3526980f8a3eaedd4b43892e" + "sha256:87b3ef9d72a1667e104f89b159eaf8a514dbf2f3576885b2bbdefe74c3fb2132", + "sha256:93e12876d88130c8e0d7fd6618dd5387d6b36da55ad541481dfa5e001656f134", + "sha256:de3f23bfe669e3db1085789fd859eb8ca8e0c5d9c20811e2407fa042e8a5e15d" ], - "version": "==1.2.0" + "version": "==1.4.1" }, "isort": { "hashes": [ - "sha256:c40744b6bc5162bbb39c1257fe298b7a393861d50978b565f3ccd9cb9de0182a", - "sha256:f57abacd059dc3bd666258d1efb0377510a89777fda3e3274e3c01f7c03ae22d" + "sha256:54da7e92468955c4fceacd0c86bd0ec997b0e1ee80d97f67c35a78b719dccab1", + "sha256:6e811fcb295968434526407adb8796944f1988c5b65e8139058f2014cbe100fd" ], "index": "pypi", - "version": "==4.3.20" + "version": "==4.3.21" }, "itsdangerous": { "hashes": [ @@ -307,18 +391,19 @@ }, "jedi": { "hashes": [ - "sha256:2bb0603e3506f708e792c7f4ad8fc2a7a9d9c2d292a358fbbd58da531695595b", - "sha256:2c6bcd9545c7d6440951b12b44d373479bf18123a401a52025cf98563fbd826c" + "sha256:b4f4052551025c6b0b0b193b29a6ff7bdb74c52450631206c262aef9f7159ad2", + "sha256:d5c871cb9360b414f981e7072c52c33258d598305280fef91c6cae34739d65d5" ], "index": "pypi", - "version": "==0.13.3" + "version": "==0.16.0" }, "jinja2": { "hashes": [ - "sha256:065c4f02ebe7f7cf559e49ee5a95fb800a9e4528727aec6f24402a5374c65013", - "sha256:14dd6caf1527abb21f08f86c784eac40853ba93edb79552aa1e4b8aef1b61c7b" + "sha256:93187ffbc7808079673ef52771baa950426fd664d3aad1d0fa3e95644360e250", + "sha256:b0eaf100007721b5c16c1fc1eecb87409464edc10469ddc9a22a27a99123be49" ], - "version": "==2.10.1" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==2.11.1" }, "markupsafe": { "hashes": [ @@ -326,13 +411,16 @@ "sha256:09027a7803a62ca78792ad89403b1b7a73a01c8cb65909cd876f7fcebd79b161", "sha256:09c4b7f37d6c648cb13f9230d847adf22f8171b1ccc4d5682398e77f40309235", "sha256:1027c282dad077d0bae18be6794e6b6b8c91d58ed8a8d89a89d59693b9131db5", + "sha256:13d3144e1e340870b25e7b10b98d779608c02016d5184cfb9927a9f10c689f42", "sha256:24982cc2533820871eba85ba648cd53d8623687ff11cbb805be4ff7b4c971aff", "sha256:29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b", "sha256:43a55c2930bbc139570ac2452adf3d70cdbb3cfe5912c71cdce1c2c6bbd9c5d1", "sha256:46c99d2de99945ec5cb54f23c8cd5689f6d7177305ebff350a58ce5f8de1669e", "sha256:500d4957e52ddc3351cabf489e79c91c17f6e0899158447047588650b5e69183", "sha256:535f6fc4d397c1563d08b88e485c3496cf5784e927af890fb3c3aac7f933ec66", + "sha256:596510de112c685489095da617b5bcbbac7dd6384aeebeda4df6025d0256a81b", "sha256:62fe6c95e3ec8a7fad637b7f3d372c15ec1caa01ab47926cfdf7a75b40e0eac1", + "sha256:6788b695d50a51edb699cb55e35487e430fa21f1ed838122d722e0ff0ac5ba15", "sha256:6dd73240d2af64df90aa7c4e7481e23825ea70af4b4922f8ede5b9e35f78a3b1", "sha256:717ba8fe3ae9cc0006d7c451f0bb265ee07739daf76355d06366154ee68d221e", "sha256:79855e1c5b8da654cf486b830bd42c06e8780cea587384cf6545b7d9ac013a0b", @@ -349,7 +437,9 @@ "sha256:ba59edeaa2fc6114428f1637ffff42da1e311e29382d81b339c1817d37ec93c6", "sha256:c8716a48d94b06bb3b2524c2b77e055fb313aeb4ea620c8dd03a105574ba704f", "sha256:cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f", - "sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7" + "sha256:cdb132fc825c38e1aeec2c8aa9338310d29d337bebbd7baa06889d09a60a1fa2", + "sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7", + "sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.1.1" @@ -386,46 +476,54 @@ }, "packaging": { "hashes": [ - "sha256:0c98a5d0be38ed775798ece1b9727178c4469d9c3b4ada66e8e6b7849f8732af", - "sha256:9e1cbf8c12b1f1ce0bb5344b8d7ecf66a6f8a6e91bcb0c84593ed6d3ab5c4ab3" + "sha256:3c292b474fda1671ec57d46d739d072bfd495a4f51ad01a055121d81e952b7a3", + "sha256:82f77b9bee21c1bafbf35a84905d604d5d1223801d639cf3ed140bd651c08752" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==19.0" + "version": "==20.3" }, "parso": { "hashes": [ - "sha256:5052bb33be034cba784193e74b1cde6ebf29ae8b8c1e4ad94df0c4209bfc4826", - "sha256:db5881df1643bf3e66c097bfd8935cf03eae73f4cb61ae4433c9ea4fb6613446" + "sha256:0c5659e0c6eba20636f99a04f469798dca8da279645ce5c387315b2c23912157", + "sha256:8515fc12cfca6ee3aa59138741fc5624d62340c97e401c74875769948d4f2995" ], - "version": "==0.5.0" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==0.6.2" }, "parver": { "hashes": [ - "sha256:1b37a691af145a3a193eff269d53ba5b2ab16dfbb65d47d85360755919f5fe4b", - "sha256:72d056b8f8883ac90eef5554a9c8a47fac39d3b66479f3d2c8d5bc21b849cdba" + "sha256:b57d94e6f389f9db399bfc3ee4c4066f4cfb374ffef5727d5ae6a9c04eb8d228", + "sha256:bb9d19637c17819e276b5cf04e2dbfb81c4e2136da8873cc70dcd0e4fd3d14a3" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==0.2.1" + "version": "==0.3.0" }, "passa": { "git": "https://github.com/sarugaku/passa.git", - "ref": "a2ba0b30c86339cae5ef3a03046fc9c583452c40", + "ref": "2ac00f16cd5a8f07d679a3ab02b7cc13c6f42bee", "version": "==0.3.1.dev0" }, "pathlib2": { "hashes": [ - "sha256:25199318e8cc3c25dcb45cbe084cc061051336d5a9ea2a12448d3d8cb748f742", - "sha256:5887121d7f7df3603bca2f710e7219f3eca0eb69e0b7cc6e0a022e155ac931a7" + "sha256:0ec8205a157c80d7acc301c0b18fbd5d44fe655968f5d947b6ecef5290fc35db", + "sha256:6cd9a47b597b37cc57de1c05e56fb1a1c9cc9fab04fe78c29acd090418529868" ], - "markers": "python_version < '3.6'", - "version": "==2.3.3" + "markers": "python_version < '3'", + "version": "==2.3.5" + }, + "pathspec": { + "hashes": [ + "sha256:163b0632d4e31cef212976cf57b43d9fd6b0bac6e67c26015d611a647d5e7424", + "sha256:562aa70af2e0d434367d9790ad37aed893de47f1693e4201fd1d3dca15d19b96" + ], + "version": "==0.7.0" }, "pbr": { "hashes": [ - "sha256:9181e2a34d80f07a359ff1d0504fad3a47e00e1cf2c475b0aa7dcb030af54c40", - "sha256:94bdc84da376b3dd5061aa0c3b6faffe943ee2e56fa4ff9bd63e1643932f34fc" + "sha256:139d2625547dbfa5fb0b81daebb39601c478c21956dc57e2e07b74450a8c506b", + "sha256:61aa52a0f18b71c5cc58232d2cf8f8d09cd67fcad60b742a60124cb8d6951488" ], - "version": "==5.3.1" + "version": "==5.4.4" }, "pipenv": { "editable": true, @@ -444,19 +542,19 @@ }, "pluggy": { "hashes": [ - "sha256:0825a152ac059776623854c1543d65a4ad408eb3d33ee114dff91e57ec6ae6fc", - "sha256:b9817417e95936bf75d85d3f8767f7df6cdde751fc40aed3bb3074cbcb77757c" + "sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0", + "sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==0.12.0" + "version": "==0.13.1" }, "py": { "hashes": [ - "sha256:64f65755aee5b381cea27766a3a147c3f15b9b6b9ac88676de66ba2ae36793fa", - "sha256:dc639b046a6e2cff5bbe40194ad65936d6ba360b52b3c3fe1d08a82dd50b5e53" + "sha256:5e27081401262157467ad6e7f851b7aa402c5852dbcb3dae06768434de5752aa", + "sha256:c20fdd83a5dbc0af9efd622bee9a5564e278f6380fffcacc43ba6f43db2813b0" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==1.8.0" + "version": "==1.8.1" }, "pycodestyle": { "hashes": [ @@ -466,6 +564,14 @@ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.5.0" }, + "pycparser": { + "hashes": [ + "sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0", + "sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==2.20" + }, "pyflakes": { "hashes": [ "sha256:17dbeb2e3f4d772725c777fabc446d5634d1038f234e77343108ce445ea69ce0", @@ -476,34 +582,35 @@ }, "pygments": { "hashes": [ - "sha256:71e430bc85c88a430f000ac1d9b331d2407f681d6f6aec95e8bcfbc3df5b0127", - "sha256:881c4c157e45f30af185c1ffe8d549d48ac9127433f2c380c24b84572ad66297" + "sha256:2a3fe295e54a20164a9df49c75fa58526d3be48e14aceba6d6b1e8ac0bfd6f1b", + "sha256:98c8aa5a9f778fcd1026a17361ddaf7330d1b7c62ae97c3bb0ae73e0b9b6b0fe" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==2.4.2" + "version": "==2.5.2" }, "pyparsing": { "hashes": [ - "sha256:1873c03321fc118f4e9746baf201ff990ceb915f433f23b395f5580d1840cb2a", - "sha256:9b6323ef4ab914af344ba97510e966d64ba91055d6b9afa6b30799340e89cc03" + "sha256:4c830582a84fb022400b85429791bc551f1f4871c33f23e44f353119e92f969f", + "sha256:c342dccb5250c08d45fd6f8b4a559613ca603b57498511740e65cd11a2e7dcec" ], "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==2.4.0" + "version": "==2.4.6" }, "pytest": { "hashes": [ - "sha256:4a784f1d4f2ef198fe9b7aef793e9fa1a3b2f84e822d9b3a64a181293a572d45", - "sha256:926855726d8ae8371803f7b2e6ec0a69953d9c6311fa7c3b6c1b929ff92d27da" + "sha256:19e8f75eac01dd3f211edd465b39efbcbdc8fc5f7866d7dd49fedb30d8adf339", + "sha256:c77a5f30a90e0ce24db9eaa14ddfd38d4afb5ea159309bdd2dae55b931bc9324" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==4.6.3" + "version": "==4.6.9" }, "pytest-forked": { "hashes": [ - "sha256:5fe33fbd07d7b1302c95310803a5e5726a4ff7f19d5a542b7ce57c76fed8135f", - "sha256:d352aaced2ebd54d42a65825722cb433004b4446ab5d2044851d9cc7a00c9e38" + "sha256:1805699ed9c9e60cb7a8179b8d4fa2b8898098e82d229b0825d8095f0f261100", + "sha256:1ae25dba8ee2e56fb47311c9638f9e58552691da87e82d25b0ce0e4bf52b7d87" ], - "version": "==1.0.2" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==1.1.3" }, "pytest-pypi": { "editable": true, @@ -511,40 +618,66 @@ }, "pytest-tap": { "hashes": [ - "sha256:3b05ec931424bbe44e944726b68f7ef185bb6d25ce9ce21ac52c9af7ffa9b506", - "sha256:ca063de56298034302f3cbce55c87a27d7bfa7af7de591cdb9ec6ce45fea5467" + "sha256:7de72c291dfc8de944a137366acd1e5877e21029868bd536dedaa8a61af7d2b4", + "sha256:87503e7496f9f5505aa603fc6a7b48cf224e9f6be0206958b1ee276810a2fe8a" ], - "version": "==2.3" + "version": "==3.1" }, "pytest-xdist": { "hashes": [ - "sha256:3489d91516d7847db5eaecff7a2e623dba68984835dbe6cedb05ae126c4fb17f", - "sha256:501795cb99e567746f30fe78850533d4cd500c93794128e6ab9988e92a17b1f8" + "sha256:0f46020d3d9619e6d17a65b5b989c1ebbb58fc7b1da8fb126d70f4bac4dfeed1", + "sha256:7dc0d027d258cd0defc618fb97055fbd1002735ca7a6d17037018cf870e24011" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==1.29.0" + "version": "==1.31.0" }, "pytz": { "hashes": [ - "sha256:303879e36b721603cc54604edcac9d20401bdbe31e1e4fdee5b9f98d5d31dfda", - "sha256:d747dd3d23d77ef44c6a3526e274af6efeb0a6f1afd5a69ba4d5be4098c8e141" + "sha256:1c557d7d0e871de1f5ccd5833f60fb2550652da6be2693c1e02300743d21500d", + "sha256:b02c06db6cf09c12dd25137e563b31700d3b80fcc4ad23abb7a315f2789819be" ], - "version": "==2019.1" + "version": "==2019.3" }, "readme-renderer": { "hashes": [ - "sha256:bb16f55b259f27f75f640acf5e00cf897845a8b3e4731b5c1a436e4b8529202f", - "sha256:c8532b79afc0375a85f10433eca157d6b50f7d6990f337fa498c96cd4bfc203d" - ], - "version": "==24.0" + "sha256:1b6d8dd1673a0b293766b4106af766b6eff3654605f9c4f239e65de6076bc222", + "sha256:e67d64242f0174a63c3b727801a2fff4c1f38ebe5d71d95ff7ece081945a6cd4" + ], + "version": "==25.0" + }, + "regex": { + "hashes": [ + "sha256:01b2d70cbaed11f72e57c1cfbaca71b02e3b98f739ce33f5f26f71859ad90431", + "sha256:046e83a8b160aff37e7034139a336b660b01dbfe58706f9d73f5cdc6b3460242", + "sha256:113309e819634f499d0006f6200700c8209a2a8bf6bd1bdc863a4d9d6776a5d1", + "sha256:200539b5124bc4721247a823a47d116a7a23e62cc6695744e3eb5454a8888e6d", + "sha256:25f4ce26b68425b80a233ce7b6218743c71cf7297dbe02feab1d711a2bf90045", + "sha256:269f0c5ff23639316b29f31df199f401e4cb87529eafff0c76828071635d417b", + "sha256:5de40649d4f88a15c9489ed37f88f053c15400257eeb18425ac7ed0a4e119400", + "sha256:7f78f963e62a61e294adb6ff5db901b629ef78cb2a1cfce3cf4eeba80c1c67aa", + "sha256:82469a0c1330a4beb3d42568f82dffa32226ced006e0b063719468dcd40ffdf0", + "sha256:8c2b7fa4d72781577ac45ab658da44c7518e6d96e2a50d04ecb0fd8f28b21d69", + "sha256:974535648f31c2b712a6b2595969f8ab370834080e00ab24e5dbb9d19b8bfb74", + "sha256:99272d6b6a68c7ae4391908fc15f6b8c9a6c345a46b632d7fdb7ef6c883a2bbb", + "sha256:9b64a4cc825ec4df262050c17e18f60252cdd94742b4ba1286bcfe481f1c0f26", + "sha256:9e9624440d754733eddbcd4614378c18713d2d9d0dc647cf9c72f64e39671be5", + "sha256:9ff16d994309b26a1cdf666a6309c1ef51ad4f72f99d3392bcd7b7139577a1f2", + "sha256:b33ebcd0222c1d77e61dbcd04a9fd139359bded86803063d3d2d197b796c63ce", + "sha256:bba52d72e16a554d1894a0cc74041da50eea99a8483e591a9edf1025a66843ab", + "sha256:bed7986547ce54d230fd8721aba6fd19459cdc6d315497b98686d0416efaff4e", + "sha256:c7f58a0e0e13fb44623b65b01052dae8e820ed9b8b654bb6296bc9c41f571b70", + "sha256:d58a4fa7910102500722defbde6e2816b0372a4fcc85c7e239323767c74f5cbc", + "sha256:f1ac2dc65105a53c1c2d72b1d3e98c2464a133b4067a51a3d2477b28449709a0" + ], + "version": "==2020.2.20" }, "requests": { "hashes": [ - "sha256:11e007a8a2aa0323f5a921e9e6a2d7e4e67d9877e85773fba9ba6419025cbeb4", - "sha256:9cf5292fcd0f598c671cfc1e0d7d1a7f13bb8085e9a590f48c010551dc6c4b31" + "sha256:43999036bfa82904b6af1d99e4882b560e5e2c68e5c4b0aa03b655f3d7d73fee", + "sha256:b3f43d496c6daba4493e7c431722aeb7dbc6288f52a6e04e7b6023b0247817e6" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==2.22.0" + "version": "==2.23.0" }, "requests-toolbelt": { "hashes": [ @@ -562,12 +695,12 @@ }, "rope": { "hashes": [ - "sha256:6b728fdc3e98a83446c27a91fc5d56808a004f8beab7a31ab1d7224cecc7d969", - "sha256:c5c5a6a87f7b1a2095fb311135e2a3d1f194f5ecb96900fdd0a9100881f48aaf", - "sha256:f0dcf719b63200d492b85535ebe5ea9b29e0d0b8aebeb87fe03fc1a65924fdaf" + "sha256:52423a7eebb5306a6d63bdc91a7c657db51ac9babfb8341c9a1440831ecf3203", + "sha256:ae1fa2fd56f64f4cc9be46493ce54bed0dd12dee03980c61a4393d89d84029ad", + "sha256:d2830142c2e046f5fc26a022fe680675b6f48f81c7fc1f03a950706e746e9dfe" ], "index": "pypi", - "version": "==0.14.0" + "version": "==0.16.0" }, "scandir": { "hashes": [ @@ -586,27 +719,35 @@ "markers": "python_version < '3.5'", "version": "==1.10.0" }, + "singledispatch": { + "hashes": [ + "sha256:5b06af87df13818d14f08a028e42f566640aef80805c3b50c5056b086e3c2b9c", + "sha256:833b46966687b3de7f438c761ac475213e53b306740f1abfaa86e1d1aae56aa8" + ], + "markers": "python_version < '3.4'", + "version": "==3.4.0.3" + }, "six": { "hashes": [ - "sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c", - "sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73" + "sha256:236bdbdce46e6e6a3d61a337c0f8b763ca1e8717c03b369e87a7ec7ce1319c0a", + "sha256:8f3cd2e254d8f793e7f3d6d9df77b92252b52637291d0f0da013c76ea2724b6c" ], - "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==1.12.0" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==1.14.0" }, "snowballstemmer": { "hashes": [ - "sha256:919f26a68b2c17a7634da993d91339e288964f93c274f1343e3bbbe2096e1128", - "sha256:9f3bcd3c401c3e862ec0ebe6d2c069ebc012ce142cce209c098ccb5b09136e89" + "sha256:209f257d7533fdb3cb73bdbd24f436239ca3b2fa67d56f6ff88e86be08cc5ef0", + "sha256:df3bac3df4c2c01363f3dd2cfa78cce2840a79b9f1c2d2de9ce8d31683992f52" ], - "version": "==1.2.1" + "version": "==2.0.0" }, "soupsieve": { "hashes": [ - "sha256:72b5f1aea9101cf720a36bb2327ede866fd6f1a07b1e87c92a1cc18113cbc946", - "sha256:e4e9c053d59795e440163733a7fec6c5972210e1790c507e4c7b051d6c5259de" + "sha256:bdb0d917b03a1369ce964056fc195cfdff8819c40de04695a80bc813c3cfa1f5", + "sha256:e2c1c5dee4a1c36bcb790e0fabd5492d874b8ebd4617622c4f6a731701060dda" ], - "version": "==1.9.2" + "version": "==1.9.5" }, "sphinx": { "hashes": [ @@ -618,11 +759,11 @@ }, "sphinx-click": { "hashes": [ - "sha256:2c7847607d07bc0ddf28acff3aa639b2660d06c5d95d1efe89eca6494fc750de", - "sha256:814b2463b576dfafaf4a6f8ed9585f6d9696073ed5e4cca5b59d2dc9d29d3bc0" + "sha256:793c68b41c4a9435f953e2a27f9bf5883729037b7431f32b2776257c2966bd1b", + "sha256:8c6274666730686a65efbae0b4465879b030372333de3114aeb63c44204da32e" ], "index": "pypi", - "version": "==2.2.0" + "version": "==2.3.1" }, "sphinxcontrib-websupport": { "hashes": [ @@ -634,17 +775,17 @@ }, "stdeb": { "hashes": [ - "sha256:0ed2c2cc6b8ba21da7d646c6f37ca60b22e9e4950e3cec6bcd9c2e7e57e3747e" + "sha256:4d8351209dda2d26066980222e0d1855a315a68f9af48f0c10d743089afe7d4b" ], "markers": "sys_platform == 'linux'", - "version": "==0.8.5" + "version": "==0.9.0" }, "tap.py": { "hashes": [ - "sha256:8ad62ba6898fcef4913c67d468d0c4beae3109b74c03363538145e31b1840b29", - "sha256:f6532fd7483c5fdc2ed13575fa4494e7d037f797f8a2c6f8809a859be61271f5" + "sha256:a598bfaa2e224d71f2e86147c2ef822c18ff2e1b8ef006397e5056b08f92f699", + "sha256:f5eeeeebfd64e53d32661752bb4c288589a3babbb96db3f391a4ec29f1359c70" ], - "version": "==2.5" + "version": "==3.0" }, "termcolor": { "hashes": [ @@ -669,44 +810,70 @@ }, "tqdm": { "hashes": [ - "sha256:14a285392c32b6f8222ecfbcd217838f88e11630affe9006cd0e94c7eff3cb61", - "sha256:25d4c0ea02a305a688e7e9c2cdc8f862f989ef2a4701ab28ee963295f5b109ab" + "sha256:0d8b5afb66e23d80433102e9bd8b5c8b65d34c2a2255b2de58d97bd2ea8170fd", + "sha256:f35fb121bafa030bd94e74fcfd44f3c2830039a2ddef7fc87ef1c2d205237b24" ], "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==4.32.2" + "version": "==4.43.0" }, "twine": { "hashes": [ - "sha256:0fb0bfa3df4f62076cab5def36b1a71a2e4acb4d1fa5c97475b048117b1a6446", - "sha256:d6c29c933ecfc74e9b1d9fa13aa1f87c5d5770e119f5a4ce032092f0ff5b14dc" + "sha256:630fadd6e342e725930be6c696537e3f9ccc54331742b16245dab292a17d0460", + "sha256:a3d22aab467b4682a22de4a422632e79d07eebd07ff2a7079effb13f8a693787" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==1.13.0" + "version": "==1.15.0" + }, + "typed-ast": { + "hashes": [ + "sha256:0666aa36131496aed8f7be0410ff974562ab7eeac11ef351def9ea6fa28f6355", + "sha256:0c2c07682d61a629b68433afb159376e24e5b2fd4641d35424e462169c0a7919", + "sha256:249862707802d40f7f29f6e1aad8d84b5aa9e44552d2cc17384b209f091276aa", + "sha256:24995c843eb0ad11a4527b026b4dde3da70e1f2d8806c99b7b4a7cf491612652", + "sha256:269151951236b0f9a6f04015a9004084a5ab0d5f19b57de779f908621e7d8b75", + "sha256:4083861b0aa07990b619bd7ddc365eb7fa4b817e99cf5f8d9cf21a42780f6e01", + "sha256:498b0f36cc7054c1fead3d7fc59d2150f4d5c6c56ba7fb150c013fbc683a8d2d", + "sha256:4e3e5da80ccbebfff202a67bf900d081906c358ccc3d5e3c8aea42fdfdfd51c1", + "sha256:6daac9731f172c2a22ade6ed0c00197ee7cc1221aa84cfdf9c31defeb059a907", + "sha256:715ff2f2df46121071622063fc7543d9b1fd19ebfc4f5c8895af64a77a8c852c", + "sha256:73d785a950fc82dd2a25897d525d003f6378d1cb23ab305578394694202a58c3", + "sha256:8c8aaad94455178e3187ab22c8b01a3837f8ee50e09cf31f1ba129eb293ec30b", + "sha256:8ce678dbaf790dbdb3eba24056d5364fb45944f33553dd5869b7580cdbb83614", + "sha256:aaee9905aee35ba5905cfb3c62f3e83b3bec7b39413f0a7f19be4e547ea01ebb", + "sha256:bcd3b13b56ea479b3650b82cabd6b5343a625b0ced5429e4ccad28a8973f301b", + "sha256:c9e348e02e4d2b4a8b2eedb48210430658df6951fa484e59de33ff773fbd4b41", + "sha256:d205b1b46085271b4e15f670058ce182bd1199e56b317bf2ec004b6a44f911f6", + "sha256:d43943ef777f9a1c42bf4e552ba23ac77a6351de620aa9acf64ad54933ad4d34", + "sha256:d5d33e9e7af3b34a40dc05f498939f0ebf187f07c385fd58d591c533ad8562fe", + "sha256:fc0fea399acb12edbf8a628ba8d2312f583bdbdb3335635db062fa98cf71fca4", + "sha256:fe460b922ec15dd205595c9b5b99e2f056fd98ae8f9f56b888e7a17dc2b757e7" + ], + "version": "==1.4.1" }, "typing": { "hashes": [ - "sha256:4027c5f6127a6267a435201981ba156de91ad0d1d98e9ddc2aa173453453492d", - "sha256:57dcf675a99b74d64dacf6fba08fb17cf7e3d5fdff53d4a30ea2a5e7e52543d4", - "sha256:a4c8473ce11a65999c8f59cb093e70686b6c84c98df58c1dae9b3b196089858a" + "sha256:91dfe6f3f706ee8cc32d38edbbf304e9b7583fb37108fef38229617f8b3eba23", + "sha256:c8cabb5ab8945cd2f54917be357d134db9cc1eb039e59d1606dc1e60cb1d9d36", + "sha256:f38d83c5a7a7086543a0f649564d661859c5146a85775ab90c0d2f93ffaa9714" ], "markers": "python_version < '3.5'", - "version": "==3.6.6" + "version": "==3.7.4.1" }, "urllib3": { "hashes": [ - "sha256:b246607a25ac80bedac05c6f282e3cdaf3afb65420fd024ac94435cabe6e18d1", - "sha256:dbe59173209418ae49d485b87d1681aefa36252ee85884c31346debd19463232" + "sha256:2f3db8b19923a873b3e5256dc9c2dedfa883e33d87c690d9c7913e1f40673cdc", + "sha256:87716c2d2a7121198ebcb7ce7cccf6ce5e9ba539041cfbaeecfb641dc0bf6acc" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3' and python_version < '4'", - "version": "==1.25.3" + "version": "==1.25.8" }, "virtualenv": { "hashes": [ - "sha256:6cb2e4c18d22dbbe283d0a0c31bb7d90771a606b2cb3415323eea008eaee6a9d", - "sha256:909fe0d3f7c9151b2df0a2cb53e55bdb7b0d61469353ff7a49fd47b0f0ab9285" + "sha256:6f4c2882a943d20714076679f8dcc5675e953d6c29bfea3bc5d08bb6cdea5d36", + "sha256:cb1dab893f9e39b3e68d9118c555dcd86526d531c128c3f72e1551939723b72f" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==16.7.2" + "version": "==20.0.14" }, "virtualenv-clone": { "hashes": [ @@ -718,10 +885,10 @@ }, "wcwidth": { "hashes": [ - "sha256:3df37372226d6e63e1b1e1eda15c594bca98a22d33a23832a90998faa96bc65e", - "sha256:f4ebe71925af7b40a864553f761ed559b43544f8f71746c2d756c7fe788ade7c" + "sha256:cafe2186b3c009a04067022ce1dcd79cb38d8d65ee4f4791b8888d6599d1bbe1", + "sha256:ee73862862a156bf77ff92b09034fc4825dd3af9cf81bc5b360668d425f3c5f1" ], - "version": "==0.1.7" + "version": "==0.1.9" }, "webencodings": { "hashes": [ @@ -732,19 +899,19 @@ }, "werkzeug": { "hashes": [ - "sha256:865856ebb55c4dcd0630cdd8f3331a1847a819dda7e8c750d3db6f2aa6c0209c", - "sha256:a0b915f0815982fb2a09161cb8f31708052d0951c3ba433ccc5e1aa276507ca6" + "sha256:169ba8a33788476292d04186ab33b01d6add475033dfc07215e6d219cc077096", + "sha256:6dc65cf9091cf750012f56f2cad759fa9e879f511b5ff8685e456b4e3bf90d16" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==0.15.4" + "version": "==1.0.0" }, "zipp": { "hashes": [ - "sha256:8c1019c6aad13642199fbe458275ad6a84907634cc9f0989877ccc4a2840139d", - "sha256:ca943a7e809cc12257001ccfb99e3563da9af99d52f261725e96dfe0f9275bc3" + "sha256:c70410551488251b0fee67b460fb9a536af8d6f9f008ad10ac51f615b6a521b1", + "sha256:e0d9e63797e483a30d27e09fffd308c59a700d365ec34e93cc100844168bf921" ], - "markers": "python_version >= '2.7'", - "version": "==0.5.1" + "markers": "python_version < '3.8'", + "version": "==1.2.0" } } } diff --git a/pipenv/patched/piptools/repositories/pypi.py b/pipenv/patched/piptools/repositories/pypi.py index a0ea5647f7..a56cc6e640 100644 --- a/pipenv/patched/piptools/repositories/pypi.py +++ b/pipenv/patched/piptools/repositories/pypi.py @@ -5,7 +5,6 @@ import copy import hashlib import os -import sys from contextlib import contextmanager from functools import partial from shutil import rmtree diff --git a/pipenv/patched/safety/__init__.py b/pipenv/patched/safety/__init__.py index 69563274ac..e9a6e9658c 100644 --- a/pipenv/patched/safety/__init__.py +++ b/pipenv/patched/safety/__init__.py @@ -2,4 +2,4 @@ __author__ = """pyup.io""" __email__ = 'support@pyup.io' -__version__ = '1.8.5' +__version__ = '1.8.7' diff --git a/pipenv/patched/safety/safety.py b/pipenv/patched/safety/safety.py index 871bd775e9..2fca3eb292 100644 --- a/pipenv/patched/safety/safety.py +++ b/pipenv/patched/safety/safety.py @@ -137,7 +137,7 @@ def check(packages, key, db_mirror, cached, ignore_ids, proxy): spec_set = SpecifierSet(specifiers=specifier) if spec_set.contains(pkg.version): if not db_full: - db_full = fetch_database(full=True, key=key, db=db_mirror) + db_full = fetch_database(full=True, key=key, db=db_mirror, cached=cached, proxy=proxy) for data in get_vulnerabilities(pkg=name, spec=specifier, db=db_full): vuln_id = data.get("id").replace("pyup.io-", "") if vuln_id and vuln_id not in ignore_ids: diff --git a/pipenv/vendor/click/LICENSE.rst b/pipenv/vendor/click/LICENSE.rst index 87ce152aaf..d12a849186 100644 --- a/pipenv/vendor/click/LICENSE.rst +++ b/pipenv/vendor/click/LICENSE.rst @@ -1,39 +1,28 @@ -Copyright © 2014 by the Pallets team. +Copyright 2014 Pallets -Some rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: -Redistribution and use in source and binary forms of the software as -well as documentation, with or without modification, are permitted -provided that the following conditions are met: - -- Redistributions of source code must retain the above copyright +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. -- Redistributions in binary form must reproduce the above copyright +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. -- Neither the name of the copyright holder nor the names of its +3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. -THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND -CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, -BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF -USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -THIS SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF -SUCH DAMAGE. - ----- - -Click uses parts of optparse written by Gregory P. Ward and maintained -by the Python Software Foundation. This is limited to code in parser.py. - -Copyright © 2001-2006 Gregory P. Ward. All rights reserved. -Copyright © 2002-2006 Python Software Foundation. All rights reserved. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pipenv/vendor/click/__init__.py b/pipenv/vendor/click/__init__.py index d3c33660a9..3910b80323 100644 --- a/pipenv/vendor/click/__init__.py +++ b/pipenv/vendor/click/__init__.py @@ -1,97 +1,79 @@ -# -*- coding: utf-8 -*- """ -click -~~~~~ - Click is a simple Python module inspired by the stdlib optparse to make writing command line scripts fun. Unlike other modules, it's based around a simple API that does not come with too much magic and is composable. - -:copyright: © 2014 by the Pallets team. -:license: BSD, see LICENSE.rst for more details. """ - -# Core classes -from .core import Context, BaseCommand, Command, MultiCommand, Group, \ - CommandCollection, Parameter, Option, Argument - -# Globals +from .core import Argument +from .core import BaseCommand +from .core import Command +from .core import CommandCollection +from .core import Context +from .core import Group +from .core import MultiCommand +from .core import Option +from .core import Parameter +from .decorators import argument +from .decorators import command +from .decorators import confirmation_option +from .decorators import group +from .decorators import help_option +from .decorators import make_pass_decorator +from .decorators import option +from .decorators import pass_context +from .decorators import pass_obj +from .decorators import password_option +from .decorators import version_option +from .exceptions import Abort +from .exceptions import BadArgumentUsage +from .exceptions import BadOptionUsage +from .exceptions import BadParameter +from .exceptions import ClickException +from .exceptions import FileError +from .exceptions import MissingParameter +from .exceptions import NoSuchOption +from .exceptions import UsageError +from .formatting import HelpFormatter +from .formatting import wrap_text from .globals import get_current_context - -# Decorators -from .decorators import pass_context, pass_obj, make_pass_decorator, \ - command, group, argument, option, confirmation_option, \ - password_option, version_option, help_option - -# Types -from .types import ParamType, File, Path, Choice, IntRange, Tuple, \ - DateTime, STRING, INT, FLOAT, BOOL, UUID, UNPROCESSED, FloatRange - -# Utilities -from .utils import echo, get_binary_stream, get_text_stream, open_file, \ - format_filename, get_app_dir, get_os_args - -# Terminal functions -from .termui import prompt, confirm, get_terminal_size, echo_via_pager, \ - progressbar, clear, style, unstyle, secho, edit, launch, getchar, \ - pause - -# Exceptions -from .exceptions import ClickException, UsageError, BadParameter, \ - FileError, Abort, NoSuchOption, BadOptionUsage, BadArgumentUsage, \ - MissingParameter - -# Formatting -from .formatting import HelpFormatter, wrap_text - -# Parsing from .parser import OptionParser - - -__all__ = [ - # Core classes - 'Context', 'BaseCommand', 'Command', 'MultiCommand', 'Group', - 'CommandCollection', 'Parameter', 'Option', 'Argument', - - # Globals - 'get_current_context', - - # Decorators - 'pass_context', 'pass_obj', 'make_pass_decorator', 'command', 'group', - 'argument', 'option', 'confirmation_option', 'password_option', - 'version_option', 'help_option', - - # Types - 'ParamType', 'File', 'Path', 'Choice', 'IntRange', 'Tuple', - 'DateTime', 'STRING', 'INT', 'FLOAT', 'BOOL', 'UUID', 'UNPROCESSED', - 'FloatRange', - - # Utilities - 'echo', 'get_binary_stream', 'get_text_stream', 'open_file', - 'format_filename', 'get_app_dir', 'get_os_args', - - # Terminal functions - 'prompt', 'confirm', 'get_terminal_size', 'echo_via_pager', - 'progressbar', 'clear', 'style', 'unstyle', 'secho', 'edit', 'launch', - 'getchar', 'pause', - - # Exceptions - 'ClickException', 'UsageError', 'BadParameter', 'FileError', - 'Abort', 'NoSuchOption', 'BadOptionUsage', 'BadArgumentUsage', - 'MissingParameter', - - # Formatting - 'HelpFormatter', 'wrap_text', - - # Parsing - 'OptionParser', -] - +from .termui import clear +from .termui import confirm +from .termui import echo_via_pager +from .termui import edit +from .termui import get_terminal_size +from .termui import getchar +from .termui import launch +from .termui import pause +from .termui import progressbar +from .termui import prompt +from .termui import secho +from .termui import style +from .termui import unstyle +from .types import BOOL +from .types import Choice +from .types import DateTime +from .types import File +from .types import FLOAT +from .types import FloatRange +from .types import INT +from .types import IntRange +from .types import ParamType +from .types import Path +from .types import STRING +from .types import Tuple +from .types import UNPROCESSED +from .types import UUID +from .utils import echo +from .utils import format_filename +from .utils import get_app_dir +from .utils import get_binary_stream +from .utils import get_os_args +from .utils import get_text_stream +from .utils import open_file # Controls if click should emit the warning about the use of unicode # literals. disable_unicode_literals_warning = False - -__version__ = '7.0' +__version__ = "7.1.1" diff --git a/pipenv/vendor/click/_bashcomplete.py b/pipenv/vendor/click/_bashcomplete.py index a5f1084c9a..8bca24480f 100644 --- a/pipenv/vendor/click/_bashcomplete.py +++ b/pipenv/vendor/click/_bashcomplete.py @@ -2,20 +2,22 @@ import os import re -from .utils import echo +from .core import Argument +from .core import MultiCommand +from .core import Option from .parser import split_arg_string -from .core import MultiCommand, Option, Argument from .types import Choice +from .utils import echo try: from collections import abc except ImportError: import collections as abc -WORDBREAK = '=' +WORDBREAK = "=" # Note, only BASH version 4.4 and later have the nosort option. -COMPLETION_SCRIPT_BASH = ''' +COMPLETION_SCRIPT_BASH = """ %(complete_func)s() { local IFS=$'\n' COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \\ @@ -28,7 +30,8 @@ local COMPLETION_OPTIONS="" local BASH_VERSION_ARR=(${BASH_VERSION//./ }) # Only BASH version 4.4 and later have the nosort option. - if [ ${BASH_VERSION_ARR[0]} -gt 4 ] || ([ ${BASH_VERSION_ARR[0]} -eq 4 ] && [ ${BASH_VERSION_ARR[1]} -ge 4 ]); then + if [ ${BASH_VERSION_ARR[0]} -gt 4 ] || ([ ${BASH_VERSION_ARR[0]} -eq 4 ] \ +&& [ ${BASH_VERSION_ARR[1]} -ge 4 ]); then COMPLETION_OPTIONS="-o nosort" fi @@ -36,13 +39,17 @@ } %(complete_func)setup -''' +""" + +COMPLETION_SCRIPT_ZSH = """ +#compdef %(script_names)s -COMPLETION_SCRIPT_ZSH = ''' %(complete_func)s() { local -a completions local -a completions_with_descriptions local -a response + (( ! $+commands[%(script_names)s] )) && return 1 + response=("${(@f)$( env COMP_WORDS=\"${words[*]}\" \\ COMP_CWORD=$((CURRENT-1)) \\ %(autocomplete_var)s=\"complete_zsh\" \\ @@ -57,34 +64,51 @@ done if [ -n "$completions_with_descriptions" ]; then - _describe -V unsorted completions_with_descriptions -U -Q + _describe -V unsorted completions_with_descriptions -U fi if [ -n "$completions" ]; then - compadd -U -V unsorted -Q -a completions + compadd -U -V unsorted -a completions fi compstate[insert]="automenu" } compdef %(complete_func)s %(script_names)s -''' +""" + +COMPLETION_SCRIPT_FISH = ( + "complete --no-files --command %(script_names)s --arguments" + ' "(env %(autocomplete_var)s=complete_fish' + " COMP_WORDS=(commandline -cp) COMP_CWORD=(commandline -t)" + ' %(script_names)s)"' +) + +_completion_scripts = { + "bash": COMPLETION_SCRIPT_BASH, + "zsh": COMPLETION_SCRIPT_ZSH, + "fish": COMPLETION_SCRIPT_FISH, +} -_invalid_ident_char_re = re.compile(r'[^a-zA-Z0-9_]') +_invalid_ident_char_re = re.compile(r"[^a-zA-Z0-9_]") def get_completion_script(prog_name, complete_var, shell): - cf_name = _invalid_ident_char_re.sub('', prog_name.replace('-', '_')) - script = COMPLETION_SCRIPT_ZSH if shell == 'zsh' else COMPLETION_SCRIPT_BASH - return (script % { - 'complete_func': '_%s_completion' % cf_name, - 'script_names': prog_name, - 'autocomplete_var': complete_var, - }).strip() + ';' + cf_name = _invalid_ident_char_re.sub("", prog_name.replace("-", "_")) + script = _completion_scripts.get(shell, COMPLETION_SCRIPT_BASH) + return ( + script + % { + "complete_func": "_{}_completion".format(cf_name), + "script_names": prog_name, + "autocomplete_var": complete_var, + } + ).strip() + ";" def resolve_ctx(cli, prog_name, args): - """ - Parse into a hierarchy of contexts. Contexts are connected through the parent variable. + """Parse into a hierarchy of contexts. Contexts are connected + through the parent variable. + :param cli: command definition :param prog_name: the program that is running :param args: full list of args @@ -98,8 +122,9 @@ def resolve_ctx(cli, prog_name, args): cmd_name, cmd, args = ctx.command.resolve_command(ctx, args) if cmd is None: return ctx - ctx = cmd.make_context(cmd_name, args, parent=ctx, - resilient_parsing=True) + ctx = cmd.make_context( + cmd_name, args, parent=ctx, resilient_parsing=True + ) args = ctx.protected_args + ctx.args else: # Walk chained subcommand contexts saving the last one. @@ -107,10 +132,14 @@ def resolve_ctx(cli, prog_name, args): cmd_name, cmd, args = ctx.command.resolve_command(ctx, args) if cmd is None: return ctx - sub_ctx = cmd.make_context(cmd_name, args, parent=ctx, - allow_extra_args=True, - allow_interspersed_args=False, - resilient_parsing=True) + sub_ctx = cmd.make_context( + cmd_name, + args, + parent=ctx, + allow_extra_args=True, + allow_interspersed_args=False, + resilient_parsing=True, + ) args = sub_ctx.args ctx = sub_ctx args = sub_ctx.protected_args + sub_ctx.args @@ -122,25 +151,29 @@ def resolve_ctx(cli, prog_name, args): def start_of_option(param_str): """ :param param_str: param_str to check - :return: whether or not this is the start of an option declaration (i.e. starts "-" or "--") + :return: whether or not this is the start of an option declaration + (i.e. starts "-" or "--") """ - return param_str and param_str[:1] == '-' + return param_str and param_str[:1] == "-" def is_incomplete_option(all_args, cmd_param): """ :param all_args: the full original list of args supplied :param cmd_param: the current command paramter - :return: whether or not the last option declaration (i.e. starts "-" or "--") is incomplete and - corresponds to this cmd_param. In other words whether this cmd_param option can still accept - values + :return: whether or not the last option declaration (i.e. starts + "-" or "--") is incomplete and corresponds to this cmd_param. In + other words whether this cmd_param option can still accept + values """ if not isinstance(cmd_param, Option): return False if cmd_param.is_flag: return False last_option = None - for index, arg_str in enumerate(reversed([arg for arg in all_args if arg != WORDBREAK])): + for index, arg_str in enumerate( + reversed([arg for arg in all_args if arg != WORDBREAK]) + ): if index + 1 > cmd_param.nargs: break if start_of_option(arg_str): @@ -151,10 +184,12 @@ def is_incomplete_option(all_args, cmd_param): def is_incomplete_argument(current_params, cmd_param): """ - :param current_params: the current params and values for this argument as already entered + :param current_params: the current params and values for this + argument as already entered :param cmd_param: the current command parameter - :return: whether or not the last argument is incomplete and corresponds to this cmd_param. In - other words whether or not the this cmd_param argument can still accept values + :return: whether or not the last argument is incomplete and + corresponds to this cmd_param. In other words whether or not the + this cmd_param argument can still accept values """ if not isinstance(cmd_param, Argument): return False @@ -163,8 +198,11 @@ def is_incomplete_argument(current_params, cmd_param): return True if cmd_param.nargs == -1: return True - if isinstance(current_param_values, abc.Iterable) \ - and cmd_param.nargs > 1 and len(current_param_values) < cmd_param.nargs: + if ( + isinstance(current_param_values, abc.Iterable) + and cmd_param.nargs > 1 + and len(current_param_values) < cmd_param.nargs + ): return True return False @@ -180,14 +218,16 @@ def get_user_autocompletions(ctx, args, incomplete, cmd_param): results = [] if isinstance(cmd_param.type, Choice): # Choices don't support descriptions. - results = [(c, None) - for c in cmd_param.type.choices if str(c).startswith(incomplete)] + results = [ + (c, None) for c in cmd_param.type.choices if str(c).startswith(incomplete) + ] elif cmd_param.autocompletion is not None: - dynamic_completions = cmd_param.autocompletion(ctx=ctx, - args=args, - incomplete=incomplete) - results = [c if isinstance(c, tuple) else (c, None) - for c in dynamic_completions] + dynamic_completions = cmd_param.autocompletion( + ctx=ctx, args=args, incomplete=incomplete + ) + results = [ + c if isinstance(c, tuple) else (c, None) for c in dynamic_completions + ] return results @@ -208,15 +248,25 @@ def add_subcommand_completions(ctx, incomplete, completions_out): # Add subcommand completions. if isinstance(ctx.command, MultiCommand): completions_out.extend( - [(c.name, c.get_short_help_str()) for c in get_visible_commands_starting_with(ctx, incomplete)]) - - # Walk up the context list and add any other completion possibilities from chained commands + [ + (c.name, c.get_short_help_str()) + for c in get_visible_commands_starting_with(ctx, incomplete) + ] + ) + + # Walk up the context list and add any other completion + # possibilities from chained commands while ctx.parent is not None: ctx = ctx.parent if isinstance(ctx.command, MultiCommand) and ctx.command.chain: - remaining_commands = [c for c in get_visible_commands_starting_with(ctx, incomplete) - if c.name not in ctx.protected_args] - completions_out.extend([(c.name, c.get_short_help_str()) for c in remaining_commands]) + remaining_commands = [ + c + for c in get_visible_commands_starting_with(ctx, incomplete) + if c.name not in ctx.protected_args + ] + completions_out.extend( + [(c.name, c.get_short_help_str()) for c in remaining_commands] + ) def get_choices(cli, prog_name, args, incomplete): @@ -233,23 +283,30 @@ def get_choices(cli, prog_name, args, incomplete): if ctx is None: return [] - # In newer versions of bash long opts with '='s are partitioned, but it's easier to parse - # without the '=' + has_double_dash = "--" in all_args + + # In newer versions of bash long opts with '='s are partitioned, but + # it's easier to parse without the '=' if start_of_option(incomplete) and WORDBREAK in incomplete: partition_incomplete = incomplete.partition(WORDBREAK) all_args.append(partition_incomplete[0]) incomplete = partition_incomplete[2] elif incomplete == WORDBREAK: - incomplete = '' + incomplete = "" completions = [] - if start_of_option(incomplete): + if not has_double_dash and start_of_option(incomplete): # completions for partial options for param in ctx.command.params: if isinstance(param, Option) and not param.hidden: - param_opts = [param_opt for param_opt in param.opts + - param.secondary_opts if param_opt not in all_args or param.multiple] - completions.extend([(o, param.help) for o in param_opts if o.startswith(incomplete)]) + param_opts = [ + param_opt + for param_opt in param.opts + param.secondary_opts + if param_opt not in all_args or param.multiple + ] + completions.extend( + [(o, param.help) for o in param_opts if o.startswith(incomplete)] + ) return completions # completion for option values from user supplied values for param in ctx.command.params: @@ -266,28 +323,53 @@ def get_choices(cli, prog_name, args, incomplete): def do_complete(cli, prog_name, include_descriptions): - cwords = split_arg_string(os.environ['COMP_WORDS']) - cword = int(os.environ['COMP_CWORD']) + cwords = split_arg_string(os.environ["COMP_WORDS"]) + cword = int(os.environ["COMP_CWORD"]) args = cwords[1:cword] try: incomplete = cwords[cword] except IndexError: - incomplete = '' + incomplete = "" for item in get_choices(cli, prog_name, args, incomplete): echo(item[0]) if include_descriptions: - # ZSH has trouble dealing with empty array parameters when returned from commands, so use a well defined character '_' to indicate no description is present. - echo(item[1] if item[1] else '_') + # ZSH has trouble dealing with empty array parameters when + # returned from commands, use '_' to indicate no description + # is present. + echo(item[1] if item[1] else "_") + + return True + + +def do_complete_fish(cli, prog_name): + cwords = split_arg_string(os.environ["COMP_WORDS"]) + incomplete = os.environ["COMP_CWORD"] + args = cwords[1:] + + for item in get_choices(cli, prog_name, args, incomplete): + if item[1]: + echo("{arg}\t{desc}".format(arg=item[0], desc=item[1])) + else: + echo(item[0]) return True def bashcomplete(cli, prog_name, complete_var, complete_instr): - if complete_instr.startswith('source'): - shell = 'zsh' if complete_instr == 'source_zsh' else 'bash' + if "_" in complete_instr: + command, shell = complete_instr.split("_", 1) + else: + command = complete_instr + shell = "bash" + + if command == "source": echo(get_completion_script(prog_name, complete_var, shell)) return True - elif complete_instr == 'complete' or complete_instr == 'complete_zsh': - return do_complete(cli, prog_name, complete_instr == 'complete_zsh') + elif command == "complete": + if shell == "fish": + return do_complete_fish(cli, prog_name) + elif shell in {"bash", "zsh"}: + return do_complete(cli, prog_name, shell == "zsh") + return False diff --git a/pipenv/vendor/click/_compat.py b/pipenv/vendor/click/_compat.py index 937e2301d4..ed57a18f95 100644 --- a/pipenv/vendor/click/_compat.py +++ b/pipenv/vendor/click/_compat.py @@ -1,67 +1,80 @@ -import re +# flake8: noqa +import codecs import io import os +import re import sys -import codecs from weakref import WeakKeyDictionary - PY2 = sys.version_info[0] == 2 -CYGWIN = sys.platform.startswith('cygwin') +CYGWIN = sys.platform.startswith("cygwin") +MSYS2 = sys.platform.startswith("win") and ("GCC" in sys.version) # Determine local App Engine environment, per Google's own suggestion -APP_ENGINE = ('APPENGINE_RUNTIME' in os.environ and - 'Development/' in os.environ['SERVER_SOFTWARE']) -WIN = sys.platform.startswith('win') and not APP_ENGINE +APP_ENGINE = "APPENGINE_RUNTIME" in os.environ and "Development/" in os.environ.get( + "SERVER_SOFTWARE", "" +) +WIN = sys.platform.startswith("win") and not APP_ENGINE and not MSYS2 DEFAULT_COLUMNS = 80 -_ansi_re = re.compile(r'\033\[((?:\d|;)*)([a-zA-Z])') +_ansi_re = re.compile(r"\033\[[;?0-9]*[a-zA-Z]") def get_filesystem_encoding(): return sys.getfilesystemencoding() or sys.getdefaultencoding() -def _make_text_stream(stream, encoding, errors, - force_readable=False, force_writable=False): +def _make_text_stream( + stream, encoding, errors, force_readable=False, force_writable=False +): if encoding is None: encoding = get_best_encoding(stream) if errors is None: - errors = 'replace' - return _NonClosingTextIOWrapper(stream, encoding, errors, - line_buffering=True, - force_readable=force_readable, - force_writable=force_writable) + errors = "replace" + return _NonClosingTextIOWrapper( + stream, + encoding, + errors, + line_buffering=True, + force_readable=force_readable, + force_writable=force_writable, + ) def is_ascii_encoding(encoding): """Checks if a given encoding is ascii.""" try: - return codecs.lookup(encoding).name == 'ascii' + return codecs.lookup(encoding).name == "ascii" except LookupError: return False def get_best_encoding(stream): """Returns the default stream encoding if not found.""" - rv = getattr(stream, 'encoding', None) or sys.getdefaultencoding() + rv = getattr(stream, "encoding", None) or sys.getdefaultencoding() if is_ascii_encoding(rv): - return 'utf-8' + return "utf-8" return rv class _NonClosingTextIOWrapper(io.TextIOWrapper): - - def __init__(self, stream, encoding, errors, - force_readable=False, force_writable=False, **extra): - self._stream = stream = _FixupStream(stream, force_readable, - force_writable) + def __init__( + self, + stream, + encoding, + errors, + force_readable=False, + force_writable=False, + **extra + ): + self._stream = stream = _FixupStream(stream, force_readable, force_writable) io.TextIOWrapper.__init__(self, stream, encoding, errors, **extra) # The io module is a place where the Python 3 text behavior # was forced upon Python 2, so we need to unbreak # it to look like Python 2. if PY2: + def write(self, x): if isinstance(x, str) or is_bytes(x): try: @@ -105,7 +118,7 @@ def __getattr__(self, name): return getattr(self._stream, name) def read1(self, size): - f = getattr(self._stream, 'read1', None) + f = getattr(self._stream, "read1", None) if f is not None: return f(size) # We only dispatch to readline instead of read in Python 2 as we @@ -118,7 +131,7 @@ def read1(self, size): def readable(self): if self._force_readable: return True - x = getattr(self._stream, 'readable', None) + x = getattr(self._stream, "readable", None) if x is not None: return x() try: @@ -130,20 +143,20 @@ def readable(self): def writable(self): if self._force_writable: return True - x = getattr(self._stream, 'writable', None) + x = getattr(self._stream, "writable", None) if x is not None: return x() try: - self._stream.write('') + self._stream.write("") except Exception: try: - self._stream.write(b'') + self._stream.write(b"") except Exception: return False return True def seekable(self): - x = getattr(self._stream, 'seekable', None) + x = getattr(self._stream, "seekable", None) if x is not None: return x() try: @@ -155,17 +168,18 @@ def seekable(self): if PY2: text_type = unicode - bytes = str raw_input = raw_input string_types = (str, unicode) int_types = (int, long) iteritems = lambda x: x.iteritems() range_type = xrange + from pipes import quote as shlex_quote + def is_bytes(x): return isinstance(x, (buffer, bytearray)) - _identifier_re = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$') + _identifier_re = re.compile(r"^[a-zA-Z_][a-zA-Z0-9_]*$") # For Windows, we need to force stdout/stdin/stderr to binary if it's # fetched for that. This obviously is not the most correct way to do @@ -193,6 +207,7 @@ def set_binary_mode(f): except ImportError: pass else: + def set_binary_mode(f): try: fileno = f.fileno() @@ -207,6 +222,7 @@ def set_binary_mode(f): except ImportError: pass else: + def set_binary_mode(f): try: fileno = f.fileno() @@ -224,42 +240,42 @@ def get_binary_stdin(): return set_binary_mode(sys.stdin) def get_binary_stdout(): - _wrap_std_stream('stdout') + _wrap_std_stream("stdout") return set_binary_mode(sys.stdout) def get_binary_stderr(): - _wrap_std_stream('stderr') + _wrap_std_stream("stderr") return set_binary_mode(sys.stderr) def get_text_stdin(encoding=None, errors=None): rv = _get_windows_console_stream(sys.stdin, encoding, errors) if rv is not None: return rv - return _make_text_stream(sys.stdin, encoding, errors, - force_readable=True) + return _make_text_stream(sys.stdin, encoding, errors, force_readable=True) def get_text_stdout(encoding=None, errors=None): - _wrap_std_stream('stdout') + _wrap_std_stream("stdout") rv = _get_windows_console_stream(sys.stdout, encoding, errors) if rv is not None: return rv - return _make_text_stream(sys.stdout, encoding, errors, - force_writable=True) + return _make_text_stream(sys.stdout, encoding, errors, force_writable=True) def get_text_stderr(encoding=None, errors=None): - _wrap_std_stream('stderr') + _wrap_std_stream("stderr") rv = _get_windows_console_stream(sys.stderr, encoding, errors) if rv is not None: return rv - return _make_text_stream(sys.stderr, encoding, errors, - force_writable=True) + return _make_text_stream(sys.stderr, encoding, errors, force_writable=True) def filename_to_ui(value): if isinstance(value, bytes): - value = value.decode(get_filesystem_encoding(), 'replace') + value = value.decode(get_filesystem_encoding(), "replace") return value + + else: import io + text_type = str raw_input = input string_types = (str,) @@ -268,6 +284,8 @@ def filename_to_ui(value): isidentifier = lambda x: x.isidentifier() iteritems = lambda x: iter(x.items()) + from shlex import quote as shlex_quote + def is_bytes(x): return isinstance(x, (bytes, memoryview, bytearray)) @@ -281,10 +299,10 @@ def _is_binary_reader(stream, default=False): def _is_binary_writer(stream, default=False): try: - stream.write(b'') + stream.write(b"") except Exception: try: - stream.write('') + stream.write("") return False except Exception: pass @@ -299,7 +317,7 @@ def _find_binary_reader(stream): if _is_binary_reader(stream, False): return stream - buf = getattr(stream, 'buffer', None) + buf = getattr(stream, "buffer", None) # Same situation here; this time we assume that the buffer is # actually binary in case it's closed. @@ -314,7 +332,7 @@ def _find_binary_writer(stream): if _is_binary_writer(stream, False): return stream - buf = getattr(stream, 'buffer', None) + buf = getattr(stream, "buffer", None) # Same situation here; this time we assume that the buffer is # actually binary in case it's closed. @@ -327,136 +345,142 @@ def _stream_is_misconfigured(stream): # to ASCII. This appears to happen in certain unittest # environments. It's not quite clear what the correct behavior is # but this at least will force Click to recover somehow. - return is_ascii_encoding(getattr(stream, 'encoding', None) or 'ascii') + return is_ascii_encoding(getattr(stream, "encoding", None) or "ascii") - def _is_compatible_text_stream(stream, encoding, errors): - stream_encoding = getattr(stream, 'encoding', None) - stream_errors = getattr(stream, 'errors', None) + def _is_compat_stream_attr(stream, attr, value): + """A stream attribute is compatible if it is equal to the + desired value or the desired value is unset and the attribute + has a value. + """ + stream_value = getattr(stream, attr, None) + return stream_value == value or (value is None and stream_value is not None) - # Perfect match. - if stream_encoding == encoding and stream_errors == errors: - return True - - # Otherwise, it's only a compatible stream if we did not ask for - # an encoding. - if encoding is None: - return stream_encoding is not None - - return False - - def _force_correct_text_reader(text_reader, encoding, errors, - force_readable=False): - if _is_binary_reader(text_reader, False): - binary_reader = text_reader + def _is_compatible_text_stream(stream, encoding, errors): + """Check if a stream's encoding and errors attributes are + compatible with the desired values. + """ + return _is_compat_stream_attr( + stream, "encoding", encoding + ) and _is_compat_stream_attr(stream, "errors", errors) + + def _force_correct_text_stream( + text_stream, + encoding, + errors, + is_binary, + find_binary, + force_readable=False, + force_writable=False, + ): + if is_binary(text_stream, False): + binary_reader = text_stream else: - # If there is no target encoding set, we need to verify that the - # reader is not actually misconfigured. - if encoding is None and not _stream_is_misconfigured(text_reader): - return text_reader - - if _is_compatible_text_stream(text_reader, encoding, errors): - return text_reader - - # If the reader has no encoding, we try to find the underlying - # binary reader for it. If that fails because the environment is - # misconfigured, we silently go with the same reader because this - # is too common to happen. In that case, mojibake is better than - # exceptions. - binary_reader = _find_binary_reader(text_reader) + # If the stream looks compatible, and won't default to a + # misconfigured ascii encoding, return it as-is. + if _is_compatible_text_stream(text_stream, encoding, errors) and not ( + encoding is None and _stream_is_misconfigured(text_stream) + ): + return text_stream + + # Otherwise, get the underlying binary reader. + binary_reader = find_binary(text_stream) + + # If that's not possible, silently use the original reader + # and get mojibake instead of exceptions. if binary_reader is None: - return text_reader + return text_stream - # At this point, we default the errors to replace instead of strict - # because nobody handles those errors anyways and at this point - # we're so fundamentally fucked that nothing can repair it. - if errors is None: - errors = 'replace' - return _make_text_stream(binary_reader, encoding, errors, - force_readable=force_readable) - - def _force_correct_text_writer(text_writer, encoding, errors, - force_writable=False): - if _is_binary_writer(text_writer, False): - binary_writer = text_writer - else: - # If there is no target encoding set, we need to verify that the - # writer is not actually misconfigured. - if encoding is None and not _stream_is_misconfigured(text_writer): - return text_writer - - if _is_compatible_text_stream(text_writer, encoding, errors): - return text_writer - - # If the writer has no encoding, we try to find the underlying - # binary writer for it. If that fails because the environment is - # misconfigured, we silently go with the same writer because this - # is too common to happen. In that case, mojibake is better than - # exceptions. - binary_writer = _find_binary_writer(text_writer) - if binary_writer is None: - return text_writer - - # At this point, we default the errors to replace instead of strict - # because nobody handles those errors anyways and at this point - # we're so fundamentally fucked that nothing can repair it. + # Default errors to replace instead of strict in order to get + # something that works. if errors is None: - errors = 'replace' - return _make_text_stream(binary_writer, encoding, errors, - force_writable=force_writable) + errors = "replace" + + # Wrap the binary stream in a text stream with the correct + # encoding parameters. + return _make_text_stream( + binary_reader, + encoding, + errors, + force_readable=force_readable, + force_writable=force_writable, + ) + + def _force_correct_text_reader(text_reader, encoding, errors, force_readable=False): + return _force_correct_text_stream( + text_reader, + encoding, + errors, + _is_binary_reader, + _find_binary_reader, + force_readable=force_readable, + ) + + def _force_correct_text_writer(text_writer, encoding, errors, force_writable=False): + return _force_correct_text_stream( + text_writer, + encoding, + errors, + _is_binary_writer, + _find_binary_writer, + force_writable=force_writable, + ) def get_binary_stdin(): reader = _find_binary_reader(sys.stdin) if reader is None: - raise RuntimeError('Was not able to determine binary ' - 'stream for sys.stdin.') + raise RuntimeError("Was not able to determine binary stream for sys.stdin.") return reader def get_binary_stdout(): writer = _find_binary_writer(sys.stdout) if writer is None: - raise RuntimeError('Was not able to determine binary ' - 'stream for sys.stdout.') + raise RuntimeError( + "Was not able to determine binary stream for sys.stdout." + ) return writer def get_binary_stderr(): writer = _find_binary_writer(sys.stderr) if writer is None: - raise RuntimeError('Was not able to determine binary ' - 'stream for sys.stderr.') + raise RuntimeError( + "Was not able to determine binary stream for sys.stderr." + ) return writer def get_text_stdin(encoding=None, errors=None): rv = _get_windows_console_stream(sys.stdin, encoding, errors) if rv is not None: return rv - return _force_correct_text_reader(sys.stdin, encoding, errors, - force_readable=True) + return _force_correct_text_reader( + sys.stdin, encoding, errors, force_readable=True + ) def get_text_stdout(encoding=None, errors=None): rv = _get_windows_console_stream(sys.stdout, encoding, errors) if rv is not None: return rv - return _force_correct_text_writer(sys.stdout, encoding, errors, - force_writable=True) + return _force_correct_text_writer( + sys.stdout, encoding, errors, force_writable=True + ) def get_text_stderr(encoding=None, errors=None): rv = _get_windows_console_stream(sys.stderr, encoding, errors) if rv is not None: return rv - return _force_correct_text_writer(sys.stderr, encoding, errors, - force_writable=True) + return _force_correct_text_writer( + sys.stderr, encoding, errors, force_writable=True + ) def filename_to_ui(value): if isinstance(value, bytes): - value = value.decode(get_filesystem_encoding(), 'replace') + value = value.decode(get_filesystem_encoding(), "replace") else: - value = value.encode('utf-8', 'surrogateescape') \ - .decode('utf-8', 'replace') + value = value.encode("utf-8", "surrogateescape").decode("utf-8", "replace") return value def get_streerror(e, default=None): - if hasattr(e, 'strerror'): + if hasattr(e, "strerror"): msg = e.strerror else: if default is not None: @@ -464,60 +488,107 @@ def get_streerror(e, default=None): else: msg = str(e) if isinstance(msg, bytes): - msg = msg.decode('utf-8', 'replace') + msg = msg.decode("utf-8", "replace") return msg -def open_stream(filename, mode='r', encoding=None, errors='strict', - atomic=False): +def _wrap_io_open(file, mode, encoding, errors): + """On Python 2, :func:`io.open` returns a text file wrapper that + requires passing ``unicode`` to ``write``. Need to open the file in + binary mode then wrap it in a subclass that can write ``str`` and + ``unicode``. + + Also handles not passing ``encoding`` and ``errors`` in binary mode. + """ + binary = "b" in mode + + if binary: + kwargs = {} + else: + kwargs = {"encoding": encoding, "errors": errors} + + if not PY2 or binary: + return io.open(file, mode, **kwargs) + + f = io.open(file, "{}b".format(mode.replace("t", ""))) + return _make_text_stream(f, **kwargs) + + +def open_stream(filename, mode="r", encoding=None, errors="strict", atomic=False): + binary = "b" in mode + # Standard streams first. These are simple because they don't need # special handling for the atomic flag. It's entirely ignored. - if filename == '-': - if any(m in mode for m in ['w', 'a', 'x']): - if 'b' in mode: + if filename == "-": + if any(m in mode for m in ["w", "a", "x"]): + if binary: return get_binary_stdout(), False return get_text_stdout(encoding=encoding, errors=errors), False - if 'b' in mode: + if binary: return get_binary_stdin(), False return get_text_stdin(encoding=encoding, errors=errors), False # Non-atomic writes directly go out through the regular open functions. if not atomic: - if encoding is None: - return open(filename, mode), True - return io.open(filename, mode, encoding=encoding, errors=errors), True + return _wrap_io_open(filename, mode, encoding, errors), True # Some usability stuff for atomic writes - if 'a' in mode: + if "a" in mode: raise ValueError( - 'Appending to an existing file is not supported, because that ' - 'would involve an expensive `copy`-operation to a temporary ' - 'file. Open the file in normal `w`-mode and copy explicitly ' - 'if that\'s what you\'re after.' + "Appending to an existing file is not supported, because that" + " would involve an expensive `copy`-operation to a temporary" + " file. Open the file in normal `w`-mode and copy explicitly" + " if that's what you're after." ) - if 'x' in mode: - raise ValueError('Use the `overwrite`-parameter instead.') - if 'w' not in mode: - raise ValueError('Atomic writes only make sense with `w`-mode.') + if "x" in mode: + raise ValueError("Use the `overwrite`-parameter instead.") + if "w" not in mode: + raise ValueError("Atomic writes only make sense with `w`-mode.") # Atomic writes are more complicated. They work by opening a file # as a proxy in the same folder and then using the fdopen # functionality to wrap it in a Python file. Then we wrap it in an # atomic file that moves the file over on close. - import tempfile - fd, tmp_filename = tempfile.mkstemp(dir=os.path.dirname(filename), - prefix='.__atomic-write') + import errno + import random - if encoding is not None: - f = io.open(fd, mode, encoding=encoding, errors=errors) - else: - f = os.fdopen(fd, mode) + try: + perm = os.stat(filename).st_mode + except OSError: + perm = None + + flags = os.O_RDWR | os.O_CREAT | os.O_EXCL + + if binary: + flags |= getattr(os, "O_BINARY", 0) + while True: + tmp_filename = os.path.join( + os.path.dirname(filename), + ".__atomic-write{:08x}".format(random.randrange(1 << 32)), + ) + try: + fd = os.open(tmp_filename, flags, 0o666 if perm is None else perm) + break + except OSError as e: + if e.errno == errno.EEXIST or ( + os.name == "nt" + and e.errno == errno.EACCES + and os.path.isdir(e.filename) + and os.access(e.filename, os.W_OK) + ): + continue + raise + + if perm is not None: + os.chmod(tmp_filename, perm) # in case perm includes bits in umask + + f = _wrap_io_open(fd, mode, encoding, errors) return _AtomicFile(f, tmp_filename, os.path.realpath(filename)), True # Used in a destructor call, needs extra protection from interpreter cleanup. -if hasattr(os, 'replace'): +if hasattr(os, "replace"): _replace = os.replace _can_replace = True else: @@ -526,7 +597,6 @@ def open_stream(filename, mode='r', encoding=None, errors='strict', class _AtomicFile(object): - def __init__(self, f, tmp_filename, real_filename): self._f = f self._tmp_filename = tmp_filename @@ -568,14 +638,26 @@ def __repr__(self): def strip_ansi(value): - return _ansi_re.sub('', value) + return _ansi_re.sub("", value) + + +def _is_jupyter_kernel_output(stream): + if WIN: + # TODO: Couldn't test on Windows, should't try to support until + # someone tests the details wrt colorama. + return + + while isinstance(stream, (_FixupStream, _NonClosingTextIOWrapper)): + stream = stream._stream + + return stream.__class__.__module__.startswith("ipykernel.") def should_strip_ansi(stream=None, color=None): if color is None: if stream is None: stream = sys.stdin - return not isatty(stream) + return not isatty(stream) and not _is_jupyter_kernel_output(stream) return not color @@ -590,16 +672,18 @@ def should_strip_ansi(stream=None, color=None): def _get_argv_encoding(): import locale + return locale.getpreferredencoding() if PY2: - def raw_input(prompt=''): + + def raw_input(prompt=""): sys.stderr.flush() if prompt: stdout = _default_text_stdout() stdout.write(prompt) stdin = _default_text_stdin() - return stdin.readline().rstrip('\r\n') + return stdin.readline().rstrip("\r\n") try: import colorama @@ -641,11 +725,15 @@ def _safe_write(s): def get_winterm_size(): win = colorama.win32.GetConsoleScreenBufferInfo( - colorama.win32.STDOUT).srWindow + colorama.win32.STDOUT + ).srWindow return win.Right - win.Left, win.Bottom - win.Top + + else: + def _get_argv_encoding(): - return getattr(sys.stdin, 'encoding', None) or get_filesystem_encoding() + return getattr(sys.stdin, "encoding", None) or get_filesystem_encoding() _get_windows_console_stream = lambda *x: None _wrap_std_stream = lambda *x: None @@ -664,6 +752,7 @@ def isatty(stream): def _make_cached_stream_func(src_func, wrapper_func): cache = WeakKeyDictionary() + def func(): stream = src_func() try: @@ -679,25 +768,23 @@ def func(): except Exception: pass return rv + return func -_default_text_stdin = _make_cached_stream_func( - lambda: sys.stdin, get_text_stdin) -_default_text_stdout = _make_cached_stream_func( - lambda: sys.stdout, get_text_stdout) -_default_text_stderr = _make_cached_stream_func( - lambda: sys.stderr, get_text_stderr) +_default_text_stdin = _make_cached_stream_func(lambda: sys.stdin, get_text_stdin) +_default_text_stdout = _make_cached_stream_func(lambda: sys.stdout, get_text_stdout) +_default_text_stderr = _make_cached_stream_func(lambda: sys.stderr, get_text_stderr) binary_streams = { - 'stdin': get_binary_stdin, - 'stdout': get_binary_stdout, - 'stderr': get_binary_stderr, + "stdin": get_binary_stdin, + "stdout": get_binary_stdout, + "stderr": get_binary_stderr, } text_streams = { - 'stdin': get_text_stdin, - 'stdout': get_text_stdout, - 'stderr': get_text_stderr, + "stdin": get_text_stdin, + "stdout": get_text_stdout, + "stderr": get_text_stderr, } diff --git a/pipenv/vendor/click/_termui_impl.py b/pipenv/vendor/click/_termui_impl.py index 00a8e5ef1c..c6e86cc010 100644 --- a/pipenv/vendor/click/_termui_impl.py +++ b/pipenv/vendor/click/_termui_impl.py @@ -1,34 +1,35 @@ # -*- coding: utf-8 -*- """ -click._termui_impl -~~~~~~~~~~~~~~~~~~ - This module contains implementations for the termui module. To keep the import time of Click down, some infrequently used functionality is placed in this module and only imported as needed. - -:copyright: © 2014 by the Pallets team. -:license: BSD, see LICENSE.rst for more details. """ - +import contextlib +import math import os import sys import time -import math -import contextlib -from ._compat import _default_text_stdout, range_type, PY2, isatty, \ - open_stream, strip_ansi, term_len, get_best_encoding, WIN, int_types, \ - CYGWIN -from .utils import echo -from .exceptions import ClickException +from ._compat import _default_text_stdout +from ._compat import CYGWIN +from ._compat import get_best_encoding +from ._compat import int_types +from ._compat import isatty +from ._compat import open_stream +from ._compat import range_type +from ._compat import shlex_quote +from ._compat import strip_ansi +from ._compat import term_len +from ._compat import WIN +from .exceptions import ClickException +from .utils import echo -if os.name == 'nt': - BEFORE_BAR = '\r' - AFTER_BAR = '\n' +if os.name == "nt": + BEFORE_BAR = "\r" + AFTER_BAR = "\n" else: - BEFORE_BAR = '\r\033[?25l' - AFTER_BAR = '\033[?25h\n' + BEFORE_BAR = "\r\033[?25l" + AFTER_BAR = "\033[?25h\n" def _length_hint(obj): @@ -44,19 +45,29 @@ def _length_hint(obj): hint = get_hint(obj) except TypeError: return None - if hint is NotImplemented or \ - not isinstance(hint, int_types) or \ - hint < 0: + if hint is NotImplemented or not isinstance(hint, int_types) or hint < 0: return None return hint class ProgressBar(object): - - def __init__(self, iterable, length=None, fill_char='#', empty_char=' ', - bar_template='%(bar)s', info_sep=' ', show_eta=True, - show_percent=None, show_pos=False, item_show_func=None, - label=None, file=None, color=None, width=30): + def __init__( + self, + iterable, + length=None, + fill_char="#", + empty_char=" ", + bar_template="%(bar)s", + info_sep=" ", + show_eta=True, + show_percent=None, + show_pos=False, + item_show_func=None, + label=None, + file=None, + color=None, + width=30, + ): self.fill_char = fill_char self.empty_char = empty_char self.bar_template = bar_template @@ -65,7 +76,7 @@ def __init__(self, iterable, length=None, fill_char='#', empty_char=' ', self.show_percent = show_percent self.show_pos = show_pos self.item_show_func = item_show_func - self.label = label or '' + self.label = label or "" if file is None: file = _default_text_stdout() self.file = file @@ -77,7 +88,7 @@ def __init__(self, iterable, length=None, fill_char='#', empty_char=' ', length = _length_hint(iterable) if iterable is None: if length is None: - raise TypeError('iterable or length is required') + raise TypeError("iterable or length is required") iterable = range_type(length) self.iter = iter(iterable) self.length = length @@ -104,10 +115,21 @@ def __exit__(self, exc_type, exc_value, tb): def __iter__(self): if not self.entered: - raise RuntimeError('You need to use progress bars in a with block.') + raise RuntimeError("You need to use progress bars in a with block.") self.render_progress() return self.generator() + def __next__(self): + # Iteration is defined in terms of a generator function, + # returned by iter(self); use that to define next(). This works + # because `self.iter` is an iterable consumed by that generator, + # so it is re-entry safe. Calling `next(self.generator())` + # twice works and does "what you want". + return next(iter(self)) + + # Python 2 compat + next = __next__ + def is_fast(self): return time.time() - self.start <= self.short_limit @@ -145,20 +167,19 @@ def format_eta(self): hours = t % 24 t //= 24 if t > 0: - days = t - return '%dd %02d:%02d:%02d' % (days, hours, minutes, seconds) + return "{}d {:02}:{:02}:{:02}".format(t, hours, minutes, seconds) else: - return '%02d:%02d:%02d' % (hours, minutes, seconds) - return '' + return "{:02}:{:02}:{:02}".format(hours, minutes, seconds) + return "" def format_pos(self): pos = str(self.pos) if self.length_known: - pos += '/%s' % self.length + pos += "/{}".format(self.length) return pos def format_pct(self): - return ('% 4d%%' % int(self.pct * 100))[1:] + return "{: 4}%".format(int(self.pct * 100))[1:] def format_bar(self): if self.length_known: @@ -170,9 +191,13 @@ def format_bar(self): else: bar = list(self.empty_char * (self.width or 1)) if self.time_per_iteration != 0: - bar[int((math.cos(self.pos * self.time_per_iteration) - / 2.0 + 0.5) * self.width)] = self.fill_char - bar = ''.join(bar) + bar[ + int( + (math.cos(self.pos * self.time_per_iteration) / 2.0 + 0.5) + * self.width + ) + ] = self.fill_char + bar = "".join(bar) return bar def format_progress_line(self): @@ -193,11 +218,14 @@ def format_progress_line(self): if item_info is not None: info_bits.append(item_info) - return (self.bar_template % { - 'label': self.label, - 'bar': self.format_bar(), - 'info': self.info_sep.join(info_bits) - }).rstrip() + return ( + self.bar_template + % { + "label": self.label, + "bar": self.format_bar(), + "info": self.info_sep.join(info_bits), + } + ).rstrip() def render_progress(self): from .termui import get_terminal_size @@ -214,7 +242,7 @@ def render_progress(self): new_width = max(0, get_terminal_size()[0] - clutter_length) if new_width < old_width: buf.append(BEFORE_BAR) - buf.append(' ' * self.max_width) + buf.append(" " * self.max_width) self.max_width = new_width self.width = new_width @@ -229,8 +257,8 @@ def render_progress(self): self.max_width = line_len buf.append(line) - buf.append(' ' * (clear_width - line_len)) - line = ''.join(buf) + buf.append(" " * (clear_width - line_len)) + line = "".join(buf) # Render the line only if it changed. if line != self._last_line and not self.is_fast(): @@ -270,13 +298,19 @@ def finish(self): self.finished = True def generator(self): + """Return a generator which yields the items added to the bar + during construction, and updates the progress bar *after* the + yielded block returns. """ - Returns a generator which yields the items added to the bar during - construction, and updates the progress bar *after* the yielded block - returns. - """ + # WARNING: the iterator interface for `ProgressBar` relies on + # this and only works because this is a simple generator which + # doesn't create or manage additional state. If this function + # changes, the impact should be evaluated both against + # `iter(bar)` and `next(bar)`. `next()` in particular may call + # `self.generator()` repeatedly, and this must remain safe in + # order for that interface to work. if not self.entered: - raise RuntimeError('You need to use progress bars in a with block.') + raise RuntimeError("You need to use progress bars in a with block.") if self.is_hidden: for rv in self.iter: @@ -295,24 +329,28 @@ def pager(generator, color=None): stdout = _default_text_stdout() if not isatty(sys.stdin) or not isatty(stdout): return _nullpager(stdout, generator, color) - pager_cmd = (os.environ.get('PAGER', None) or '').strip() + pager_cmd = (os.environ.get("PAGER", None) or "").strip() if pager_cmd: if WIN: return _tempfilepager(generator, pager_cmd, color) return _pipepager(generator, pager_cmd, color) - if os.environ.get('TERM') in ('dumb', 'emacs'): + if os.environ.get("TERM") in ("dumb", "emacs"): return _nullpager(stdout, generator, color) - if WIN or sys.platform.startswith('os2'): - return _tempfilepager(generator, 'more <', color) - if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0: - return _pipepager(generator, 'less', color) + if WIN or sys.platform.startswith("os2"): + return _tempfilepager(generator, "more <", color) + if hasattr(os, "system") and os.system("(less) 2>/dev/null") == 0: + return _pipepager(generator, "less", color) import tempfile + fd, filename = tempfile.mkstemp() os.close(fd) try: - if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0: - return _pipepager(generator, 'more', color) + if ( + hasattr(os, "system") + and os.system("more {}".format(shlex_quote(filename))) == 0 + ): + return _pipepager(generator, "more", color) return _nullpager(stdout, generator, color) finally: os.unlink(filename) @@ -323,28 +361,28 @@ def _pipepager(generator, cmd, color): pager through this might support colors. """ import subprocess + env = dict(os.environ) # If we're piping to less we might support colors under the # condition that - cmd_detail = cmd.rsplit('/', 1)[-1].split() - if color is None and cmd_detail[0] == 'less': - less_flags = os.environ.get('LESS', '') + ' '.join(cmd_detail[1:]) + cmd_detail = cmd.rsplit("/", 1)[-1].split() + if color is None and cmd_detail[0] == "less": + less_flags = "{}{}".format(os.environ.get("LESS", ""), " ".join(cmd_detail[1:])) if not less_flags: - env['LESS'] = '-R' + env["LESS"] = "-R" color = True - elif 'r' in less_flags or 'R' in less_flags: + elif "r" in less_flags or "R" in less_flags: color = True - c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, - env=env) + c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, env=env) encoding = get_best_encoding(c.stdin) try: for text in generator: if not color: text = strip_ansi(text) - c.stdin.write(text.encode(encoding, 'replace')) + c.stdin.write(text.encode(encoding, "replace")) except (IOError, KeyboardInterrupt): pass else: @@ -370,16 +408,17 @@ def _pipepager(generator, cmd, color): def _tempfilepager(generator, cmd, color): """Page through text by invoking a program on a temporary file.""" import tempfile + filename = tempfile.mktemp() # TODO: This never terminates if the passed generator never terminates. text = "".join(generator) if not color: text = strip_ansi(text) encoding = get_best_encoding(sys.stdout) - with open_stream(filename, 'wb')[0] as f: + with open_stream(filename, "wb")[0] as f: f.write(text.encode(encoding)) try: - os.system(cmd + ' "' + filename + '"') + os.system("{} {}".format(shlex_quote(cmd), shlex_quote(filename))) finally: os.unlink(filename) @@ -393,9 +432,7 @@ def _nullpager(stream, generator, color): class Editor(object): - - def __init__(self, editor=None, env=None, require_save=True, - extension='.txt'): + def __init__(self, editor=None, env=None, require_save=True, extension=".txt"): self.editor = editor self.env = env self.require_save = require_save @@ -404,19 +441,20 @@ def __init__(self, editor=None, env=None, require_save=True, def get_editor(self): if self.editor is not None: return self.editor - for key in 'VISUAL', 'EDITOR': + for key in "VISUAL", "EDITOR": rv = os.environ.get(key) if rv: return rv if WIN: - return 'notepad' - for editor in 'vim', 'nano': - if os.system('which %s >/dev/null 2>&1' % editor) == 0: + return "notepad" + for editor in "sensible-editor", "vim", "nano": + if os.system("which {} >/dev/null 2>&1".format(editor)) == 0: return editor - return 'vi' + return "vi" def edit_file(self, filename): import subprocess + editor = self.get_editor() if self.env: environ = os.environ.copy() @@ -424,47 +462,49 @@ def edit_file(self, filename): else: environ = None try: - c = subprocess.Popen('%s "%s"' % (editor, filename), - env=environ, shell=True) + c = subprocess.Popen( + "{} {}".format(shlex_quote(editor), shlex_quote(filename)), + env=environ, + shell=True, + ) exit_code = c.wait() if exit_code != 0: - raise ClickException('%s: Editing failed!' % editor) + raise ClickException("{}: Editing failed!".format(editor)) except OSError as e: - raise ClickException('%s: Editing failed: %s' % (editor, e)) + raise ClickException("{}: Editing failed: {}".format(editor, e)) def edit(self, text): import tempfile - text = text or '' - if text and not text.endswith('\n'): - text += '\n' + text = text or "" + if text and not text.endswith("\n"): + text += "\n" - fd, name = tempfile.mkstemp(prefix='editor-', suffix=self.extension) + fd, name = tempfile.mkstemp(prefix="editor-", suffix=self.extension) try: if WIN: - encoding = 'utf-8-sig' - text = text.replace('\n', '\r\n') + encoding = "utf-8-sig" + text = text.replace("\n", "\r\n") else: - encoding = 'utf-8' + encoding = "utf-8" text = text.encode(encoding) - f = os.fdopen(fd, 'wb') + f = os.fdopen(fd, "wb") f.write(text) f.close() timestamp = os.path.getmtime(name) self.edit_file(name) - if self.require_save \ - and os.path.getmtime(name) == timestamp: + if self.require_save and os.path.getmtime(name) == timestamp: return None - f = open(name, 'rb') + f = open(name, "rb") try: rv = f.read() finally: f.close() - return rv.decode('utf-8-sig').replace('\r\n', '\n') + return rv.decode("utf-8-sig").replace("\r\n", "\n") finally: os.unlink(name) @@ -477,18 +517,18 @@ def _unquote_file(url): import urllib except ImportError: import urllib - if url.startswith('file://'): + if url.startswith("file://"): url = urllib.unquote(url[7:]) return url - if sys.platform == 'darwin': - args = ['open'] + if sys.platform == "darwin": + args = ["open"] if wait: - args.append('-W') + args.append("-W") if locate: - args.append('-R') + args.append("-R") args.append(_unquote_file(url)) - null = open('/dev/null', 'w') + null = open("/dev/null", "w") try: return subprocess.Popen(args, stderr=null).wait() finally: @@ -496,44 +536,42 @@ def _unquote_file(url): elif WIN: if locate: url = _unquote_file(url) - args = 'explorer /select,"%s"' % _unquote_file( - url.replace('"', '')) + args = "explorer /select,{}".format(shlex_quote(url)) else: - args = 'start %s "" "%s"' % ( - wait and '/WAIT' or '', url.replace('"', '')) + args = 'start {} "" {}'.format("/WAIT" if wait else "", shlex_quote(url)) return os.system(args) elif CYGWIN: if locate: url = _unquote_file(url) - args = 'cygstart "%s"' % (os.path.dirname(url).replace('"', '')) + args = "cygstart {}".format(shlex_quote(os.path.dirname(url))) else: - args = 'cygstart %s "%s"' % ( - wait and '-w' or '', url.replace('"', '')) + args = "cygstart {} {}".format("-w" if wait else "", shlex_quote(url)) return os.system(args) try: if locate: - url = os.path.dirname(_unquote_file(url)) or '.' + url = os.path.dirname(_unquote_file(url)) or "." else: url = _unquote_file(url) - c = subprocess.Popen(['xdg-open', url]) + c = subprocess.Popen(["xdg-open", url]) if wait: return c.wait() return 0 except OSError: - if url.startswith(('http://', 'https://')) and not locate and not wait: + if url.startswith(("http://", "https://")) and not locate and not wait: import webbrowser + webbrowser.open(url) return 0 return 1 def _translate_ch_to_exc(ch): - if ch == u'\x03': + if ch == u"\x03": raise KeyboardInterrupt() - if ch == u'\x04' and not WIN: # Unix-like, Ctrl+D + if ch == u"\x04" and not WIN: # Unix-like, Ctrl+D raise EOFError() - if ch == u'\x1a' and WIN: # Windows, Ctrl+Z + if ch == u"\x1a" and WIN: # Windows, Ctrl+Z raise EOFError() @@ -580,12 +618,14 @@ def getchar(echo): func = msvcrt.getwch rv = func() - if rv in (u'\x00', u'\xe0'): + if rv in (u"\x00", u"\xe0"): # \x00 and \xe0 are control characters that indicate special key, # see above. rv += func() _translate_ch_to_exc(rv) return rv + + else: import tty import termios @@ -593,7 +633,7 @@ def getchar(echo): @contextlib.contextmanager def raw_terminal(): if not isatty(sys.stdin): - f = open('/dev/tty') + f = open("/dev/tty") fd = f.fileno() else: fd = sys.stdin.fileno() @@ -614,7 +654,7 @@ def raw_terminal(): def getchar(echo): with raw_terminal() as fd: ch = os.read(fd, 32) - ch = ch.decode(get_best_encoding(sys.stdin), 'replace') + ch = ch.decode(get_best_encoding(sys.stdin), "replace") if echo and isatty(sys.stdout): sys.stdout.write(ch) _translate_ch_to_exc(ch) diff --git a/pipenv/vendor/click/_textwrap.py b/pipenv/vendor/click/_textwrap.py index 7e776031ea..6959087b7f 100644 --- a/pipenv/vendor/click/_textwrap.py +++ b/pipenv/vendor/click/_textwrap.py @@ -3,7 +3,6 @@ class TextWrapper(textwrap.TextWrapper): - def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width): space_left = max(width - cur_len, 1) @@ -35,4 +34,4 @@ def indent_only(self, text): if idx > 0: indent = self.subsequent_indent rv.append(indent + line) - return '\n'.join(rv) + return "\n".join(rv) diff --git a/pipenv/vendor/click/_unicodefun.py b/pipenv/vendor/click/_unicodefun.py index 620edff37e..781c365227 100644 --- a/pipenv/vendor/click/_unicodefun.py +++ b/pipenv/vendor/click/_unicodefun.py @@ -1,25 +1,19 @@ +import codecs import os import sys -import codecs from ._compat import PY2 -# If someone wants to vendor click, we want to ensure the -# correct package is discovered. Ideally we could use a -# relative import here but unfortunately Python does not -# support that. -click = sys.modules[__name__.rsplit('.', 1)[0]] - - def _find_unicode_literals_frame(): import __future__ - if not hasattr(sys, '_getframe'): # not all Python implementations have it + + if not hasattr(sys, "_getframe"): # not all Python implementations have it return 0 frm = sys._getframe(1) idx = 1 while frm is not None: - if frm.f_globals.get('__name__', '').startswith('click.'): + if frm.f_globals.get("__name__", "").startswith("click."): frm = frm.f_back idx += 1 elif frm.f_code.co_flags & __future__.unicode_literals.compiler_flag: @@ -32,19 +26,27 @@ def _find_unicode_literals_frame(): def _check_for_unicode_literals(): if not __debug__: return - if not PY2 or click.disable_unicode_literals_warning: + + from . import disable_unicode_literals_warning + + if not PY2 or disable_unicode_literals_warning: return bad_frame = _find_unicode_literals_frame() if bad_frame <= 0: return from warnings import warn - warn(Warning('Click detected the use of the unicode_literals ' - '__future__ import. This is heavily discouraged ' - 'because it can introduce subtle bugs in your ' - 'code. You should instead use explicit u"" literals ' - 'for your unicode strings. For more information see ' - 'https://click.palletsprojects.com/python3/'), - stacklevel=bad_frame) + + warn( + Warning( + "Click detected the use of the unicode_literals __future__" + " import. This is heavily discouraged because it can" + " introduce subtle bugs in your code. You should instead" + ' use explicit u"" literals for your unicode strings. For' + " more information see" + " https://click.palletsprojects.com/python3/" + ), + stacklevel=bad_frame, + ) def _verify_python3_env(): @@ -53,73 +55,77 @@ def _verify_python3_env(): return try: import locale + fs_enc = codecs.lookup(locale.getpreferredencoding()).name except Exception: - fs_enc = 'ascii' - if fs_enc != 'ascii': + fs_enc = "ascii" + if fs_enc != "ascii": return - extra = '' - if os.name == 'posix': + extra = "" + if os.name == "posix": import subprocess + try: - rv = subprocess.Popen(['locale', '-a'], stdout=subprocess.PIPE, - stderr=subprocess.PIPE).communicate()[0] + rv = subprocess.Popen( + ["locale", "-a"], stdout=subprocess.PIPE, stderr=subprocess.PIPE + ).communicate()[0] except OSError: - rv = b'' + rv = b"" good_locales = set() has_c_utf8 = False # Make sure we're operating on text here. if isinstance(rv, bytes): - rv = rv.decode('ascii', 'replace') + rv = rv.decode("ascii", "replace") for line in rv.splitlines(): locale = line.strip() - if locale.lower().endswith(('.utf-8', '.utf8')): + if locale.lower().endswith((".utf-8", ".utf8")): good_locales.add(locale) - if locale.lower() in ('c.utf8', 'c.utf-8'): + if locale.lower() in ("c.utf8", "c.utf-8"): has_c_utf8 = True - extra += '\n\n' + extra += "\n\n" if not good_locales: extra += ( - 'Additional information: on this system no suitable UTF-8\n' - 'locales were discovered. This most likely requires resolving\n' - 'by reconfiguring the locale system.' + "Additional information: on this system no suitable" + " UTF-8 locales were discovered. This most likely" + " requires resolving by reconfiguring the locale" + " system." ) elif has_c_utf8: extra += ( - 'This system supports the C.UTF-8 locale which is recommended.\n' - 'You might be able to resolve your issue by exporting the\n' - 'following environment variables:\n\n' - ' export LC_ALL=C.UTF-8\n' - ' export LANG=C.UTF-8' + "This system supports the C.UTF-8 locale which is" + " recommended. You might be able to resolve your issue" + " by exporting the following environment variables:\n\n" + " export LC_ALL=C.UTF-8\n" + " export LANG=C.UTF-8" ) else: extra += ( - 'This system lists a couple of UTF-8 supporting locales that\n' - 'you can pick from. The following suitable locales were\n' - 'discovered: %s' - ) % ', '.join(sorted(good_locales)) + "This system lists a couple of UTF-8 supporting locales" + " that you can pick from. The following suitable" + " locales were discovered: {}".format(", ".join(sorted(good_locales))) + ) bad_locale = None - for locale in os.environ.get('LC_ALL'), os.environ.get('LANG'): - if locale and locale.lower().endswith(('.utf-8', '.utf8')): + for locale in os.environ.get("LC_ALL"), os.environ.get("LANG"): + if locale and locale.lower().endswith((".utf-8", ".utf8")): bad_locale = locale if locale is not None: break if bad_locale is not None: extra += ( - '\n\nClick discovered that you exported a UTF-8 locale\n' - 'but the locale system could not pick up from it because\n' - 'it does not exist. The exported locale is "%s" but it\n' - 'is not supported' - ) % bad_locale + "\n\nClick discovered that you exported a UTF-8 locale" + " but the locale system could not pick up from it" + " because it does not exist. The exported locale is" + " '{}' but it is not supported".format(bad_locale) + ) raise RuntimeError( - 'Click will abort further execution because Python 3 was' - ' configured to use ASCII as encoding for the environment.' - ' Consult https://click.palletsprojects.com/en/7.x/python3/ for' - ' mitigation steps.' + extra + "Click will abort further execution because Python 3 was" + " configured to use ASCII as encoding for the environment." + " Consult https://click.palletsprojects.com/python3/ for" + " mitigation steps.{}".format(extra) ) diff --git a/pipenv/vendor/click/_winconsole.py b/pipenv/vendor/click/_winconsole.py index bbb080ddae..b6c4274af0 100644 --- a/pipenv/vendor/click/_winconsole.py +++ b/pipenv/vendor/click/_winconsole.py @@ -7,24 +7,42 @@ # compared to the original patches as we do not need to patch # the entire interpreter but just work in our little world of # echo and prmopt. - +import ctypes import io import os import sys -import zlib import time -import ctypes +import zlib +from ctypes import byref +from ctypes import c_char +from ctypes import c_char_p +from ctypes import c_int +from ctypes import c_ssize_t +from ctypes import c_ulong +from ctypes import c_void_p +from ctypes import POINTER +from ctypes import py_object +from ctypes import windll +from ctypes import WinError +from ctypes import WINFUNCTYPE +from ctypes.wintypes import DWORD +from ctypes.wintypes import HANDLE +from ctypes.wintypes import LPCWSTR +from ctypes.wintypes import LPWSTR + import msvcrt -from ._compat import _NonClosingTextIOWrapper, text_type, PY2 -from ctypes import byref, POINTER, c_int, c_char, c_char_p, \ - c_void_p, py_object, c_ssize_t, c_ulong, windll, WINFUNCTYPE + +from ._compat import _NonClosingTextIOWrapper +from ._compat import PY2 +from ._compat import text_type + try: from ctypes import pythonapi + PyObject_GetBuffer = pythonapi.PyObject_GetBuffer PyBuffer_Release = pythonapi.PyBuffer_Release except ImportError: pythonapi = None -from ctypes.wintypes import LPWSTR, LPCWSTR c_ssize_p = POINTER(c_ssize_t) @@ -33,12 +51,15 @@ GetStdHandle = kernel32.GetStdHandle ReadConsoleW = kernel32.ReadConsoleW WriteConsoleW = kernel32.WriteConsoleW +GetConsoleMode = kernel32.GetConsoleMode GetLastError = kernel32.GetLastError -GetCommandLineW = WINFUNCTYPE(LPWSTR)( - ('GetCommandLineW', windll.kernel32)) -CommandLineToArgvW = WINFUNCTYPE( - POINTER(LPWSTR), LPCWSTR, POINTER(c_int))( - ('CommandLineToArgvW', windll.shell32)) +GetCommandLineW = WINFUNCTYPE(LPWSTR)(("GetCommandLineW", windll.kernel32)) +CommandLineToArgvW = WINFUNCTYPE(POINTER(LPWSTR), LPCWSTR, POINTER(c_int))( + ("CommandLineToArgvW", windll.shell32) +) +LocalFree = WINFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p)( + ("LocalFree", windll.kernel32) +) STDIN_HANDLE = GetStdHandle(-10) @@ -57,27 +78,27 @@ STDOUT_FILENO = 1 STDERR_FILENO = 2 -EOF = b'\x1a' +EOF = b"\x1a" MAX_BYTES_WRITTEN = 32767 class Py_buffer(ctypes.Structure): _fields_ = [ - ('buf', c_void_p), - ('obj', py_object), - ('len', c_ssize_t), - ('itemsize', c_ssize_t), - ('readonly', c_int), - ('ndim', c_int), - ('format', c_char_p), - ('shape', c_ssize_p), - ('strides', c_ssize_p), - ('suboffsets', c_ssize_p), - ('internal', c_void_p) + ("buf", c_void_p), + ("obj", py_object), + ("len", c_ssize_t), + ("itemsize", c_ssize_t), + ("readonly", c_int), + ("ndim", c_int), + ("format", c_char_p), + ("shape", c_ssize_p), + ("strides", c_ssize_p), + ("suboffsets", c_ssize_p), + ("internal", c_void_p), ] if PY2: - _fields_.insert(-1, ('smalltable', c_ssize_t * 2)) + _fields_.insert(-1, ("smalltable", c_ssize_t * 2)) # On PyPy we cannot get buffers so our ability to operate here is @@ -85,6 +106,7 @@ class Py_buffer(ctypes.Structure): if pythonapi is None: get_buffer = None else: + def get_buffer(obj, writable=False): buf = Py_buffer() flags = PyBUF_WRITABLE if writable else PyBUF_SIMPLE @@ -97,7 +119,6 @@ def get_buffer(obj, writable=False): class _WindowsConsoleRawIOBase(io.RawIOBase): - def __init__(self, handle): self.handle = handle @@ -107,7 +128,6 @@ def isatty(self): class _WindowsConsoleReader(_WindowsConsoleRawIOBase): - def readable(self): return True @@ -116,20 +136,26 @@ def readinto(self, b): if not bytes_to_be_read: return 0 elif bytes_to_be_read % 2: - raise ValueError('cannot read odd number of bytes from ' - 'UTF-16-LE encoded console') + raise ValueError( + "cannot read odd number of bytes from UTF-16-LE encoded console" + ) buffer = get_buffer(b, writable=True) code_units_to_be_read = bytes_to_be_read // 2 code_units_read = c_ulong() - rv = ReadConsoleW(self.handle, buffer, code_units_to_be_read, - byref(code_units_read), None) + rv = ReadConsoleW( + HANDLE(self.handle), + buffer, + code_units_to_be_read, + byref(code_units_read), + None, + ) if GetLastError() == ERROR_OPERATION_ABORTED: # wait for KeyboardInterrupt time.sleep(0.1) if not rv: - raise OSError('Windows error: %s' % GetLastError()) + raise OSError("Windows error: {}".format(GetLastError())) if buffer[0] == EOF: return 0 @@ -137,27 +163,30 @@ def readinto(self, b): class _WindowsConsoleWriter(_WindowsConsoleRawIOBase): - def writable(self): return True @staticmethod def _get_error_message(errno): if errno == ERROR_SUCCESS: - return 'ERROR_SUCCESS' + return "ERROR_SUCCESS" elif errno == ERROR_NOT_ENOUGH_MEMORY: - return 'ERROR_NOT_ENOUGH_MEMORY' - return 'Windows error %s' % errno + return "ERROR_NOT_ENOUGH_MEMORY" + return "Windows error {}".format(errno) def write(self, b): bytes_to_be_written = len(b) buf = get_buffer(b) - code_units_to_be_written = min(bytes_to_be_written, - MAX_BYTES_WRITTEN) // 2 + code_units_to_be_written = min(bytes_to_be_written, MAX_BYTES_WRITTEN) // 2 code_units_written = c_ulong() - WriteConsoleW(self.handle, buf, code_units_to_be_written, - byref(code_units_written), None) + WriteConsoleW( + HANDLE(self.handle), + buf, + code_units_to_be_written, + byref(code_units_written), + None, + ) bytes_written = 2 * code_units_written.value if bytes_written == 0 and bytes_to_be_written > 0: @@ -166,7 +195,6 @@ def write(self, b): class ConsoleStream(object): - def __init__(self, text_stream, byte_stream): self._text_stream = text_stream self.buffer = byte_stream @@ -195,9 +223,8 @@ def isatty(self): return self.buffer.isatty() def __repr__(self): - return '<ConsoleStream name=%r encoding=%r>' % ( - self.name, - self.encoding, + return "<ConsoleStream name={!r} encoding={!r}>".format( + self.name, self.encoding ) @@ -207,6 +234,7 @@ class WindowsChunkedWriter(object): attribute access apart from method 'write()' which we wrap to write in limited chunks due to a Windows limitation on binary console streams. """ + def __init__(self, wrapped): # double-underscore everything to prevent clashes with names of # attributes on the wrapped stream object. @@ -221,7 +249,7 @@ def write(self, text): while written < total_to_write: to_write = min(total_to_write - written, MAX_BYTES_WRITTEN) - self.__wrapped.write(text[written:written+to_write]) + self.__wrapped.write(text[written : written + to_write]) written += to_write @@ -230,7 +258,11 @@ def write(self, text): def _wrap_std_stream(name): # Python 2 & Windows 7 and below - if PY2 and sys.getwindowsversion()[:2] <= (6, 1) and name not in _wrapped_std_streams: + if ( + PY2 + and sys.getwindowsversion()[:2] <= (6, 1) + and name not in _wrapped_std_streams + ): setattr(sys, name, WindowsChunkedWriter(getattr(sys, name))) _wrapped_std_streams.add(name) @@ -238,43 +270,59 @@ def _wrap_std_stream(name): def _get_text_stdin(buffer_stream): text_stream = _NonClosingTextIOWrapper( io.BufferedReader(_WindowsConsoleReader(STDIN_HANDLE)), - 'utf-16-le', 'strict', line_buffering=True) + "utf-16-le", + "strict", + line_buffering=True, + ) return ConsoleStream(text_stream, buffer_stream) def _get_text_stdout(buffer_stream): text_stream = _NonClosingTextIOWrapper( io.BufferedWriter(_WindowsConsoleWriter(STDOUT_HANDLE)), - 'utf-16-le', 'strict', line_buffering=True) + "utf-16-le", + "strict", + line_buffering=True, + ) return ConsoleStream(text_stream, buffer_stream) def _get_text_stderr(buffer_stream): text_stream = _NonClosingTextIOWrapper( io.BufferedWriter(_WindowsConsoleWriter(STDERR_HANDLE)), - 'utf-16-le', 'strict', line_buffering=True) + "utf-16-le", + "strict", + line_buffering=True, + ) return ConsoleStream(text_stream, buffer_stream) if PY2: + def _hash_py_argv(): - return zlib.crc32('\x00'.join(sys.argv[1:])) + return zlib.crc32("\x00".join(sys.argv[1:])) _initial_argv_hash = _hash_py_argv() def _get_windows_argv(): argc = c_int(0) argv_unicode = CommandLineToArgvW(GetCommandLineW(), byref(argc)) - argv = [argv_unicode[i] for i in range(0, argc.value)] + if not argv_unicode: + raise WinError() + try: + argv = [argv_unicode[i] for i in range(0, argc.value)] + finally: + LocalFree(argv_unicode) + del argv_unicode - if not hasattr(sys, 'frozen'): + if not hasattr(sys, "frozen"): argv = argv[1:] while len(argv) > 0: arg = argv[0] - if not arg.startswith('-') or arg == '-': + if not arg.startswith("-") or arg == "-": break argv = argv[1:] - if arg.startswith(('-c', '-m')): + if arg.startswith(("-c", "-m")): break return argv[1:] @@ -287,15 +335,30 @@ def _get_windows_argv(): } +def _is_console(f): + if not hasattr(f, "fileno"): + return False + + try: + fileno = f.fileno() + except OSError: + return False + + handle = msvcrt.get_osfhandle(fileno) + return bool(GetConsoleMode(handle, byref(DWORD()))) + + def _get_windows_console_stream(f, encoding, errors): - if get_buffer is not None and \ - encoding in ('utf-16-le', None) \ - and errors in ('strict', None) and \ - hasattr(f, 'isatty') and f.isatty(): + if ( + get_buffer is not None + and encoding in ("utf-16-le", None) + and errors in ("strict", None) + and _is_console(f) + ): func = _stream_factories.get(f.fileno()) if func is not None: if not PY2: - f = getattr(f, 'buffer', None) + f = getattr(f, "buffer", None) if f is None: return None else: diff --git a/pipenv/vendor/click/core.py b/pipenv/vendor/click/core.py index 7a1e3422be..f58bf26d2f 100644 --- a/pipenv/vendor/click/core.py +++ b/pipenv/vendor/click/core.py @@ -3,37 +3,51 @@ import os import sys from contextlib import contextmanager -from itertools import repeat from functools import update_wrapper +from itertools import repeat -from .types import convert_type, IntRange, BOOL -from .utils import PacifyFlushWrapper, make_str, make_default_short_help, \ - echo, get_os_args -from .exceptions import ClickException, UsageError, BadParameter, Abort, \ - MissingParameter, Exit -from .termui import prompt, confirm, style -from .formatting import HelpFormatter, join_options -from .parser import OptionParser, split_opt -from .globals import push_context, pop_context - -from ._compat import PY2, isidentifier, iteritems, string_types -from ._unicodefun import _check_for_unicode_literals, _verify_python3_env - +from ._compat import isidentifier +from ._compat import iteritems +from ._compat import PY2 +from ._compat import string_types +from ._unicodefun import _check_for_unicode_literals +from ._unicodefun import _verify_python3_env +from .exceptions import Abort +from .exceptions import BadParameter +from .exceptions import ClickException +from .exceptions import Exit +from .exceptions import MissingParameter +from .exceptions import UsageError +from .formatting import HelpFormatter +from .formatting import join_options +from .globals import pop_context +from .globals import push_context +from .parser import OptionParser +from .parser import split_opt +from .termui import confirm +from .termui import prompt +from .termui import style +from .types import BOOL +from .types import convert_type +from .types import IntRange +from .utils import echo +from .utils import get_os_args +from .utils import make_default_short_help +from .utils import make_str +from .utils import PacifyFlushWrapper _missing = object() +SUBCOMMAND_METAVAR = "COMMAND [ARGS]..." +SUBCOMMANDS_METAVAR = "COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]..." -SUBCOMMAND_METAVAR = 'COMMAND [ARGS]...' -SUBCOMMANDS_METAVAR = 'COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]...' - -DEPRECATED_HELP_NOTICE = ' (DEPRECATED)' -DEPRECATED_INVOKE_NOTICE = 'DeprecationWarning: ' + \ - 'The command %(name)s is deprecated.' +DEPRECATED_HELP_NOTICE = " (DEPRECATED)" +DEPRECATED_INVOKE_NOTICE = "DeprecationWarning: The command %(name)s is deprecated." def _maybe_show_deprecated_notice(cmd): if cmd.deprecated: - echo(style(DEPRECATED_INVOKE_NOTICE % {'name': cmd.name}, fg='red'), err=True) + echo(style(DEPRECATED_INVOKE_NOTICE % {"name": cmd.name}, fg="red"), err=True) def fast_exit(code): @@ -48,12 +62,13 @@ def fast_exit(code): def _bashcomplete(cmd, prog_name, complete_var=None): """Internal handler for the bash completion support.""" if complete_var is None: - complete_var = '_%s_COMPLETE' % (prog_name.replace('-', '_')).upper() + complete_var = "_{}_COMPLETE".format(prog_name.replace("-", "_").upper()) complete_instr = os.environ.get(complete_var) if not complete_instr: return from ._bashcomplete import bashcomplete + if bashcomplete(cmd, prog_name, complete_var, complete_instr): fast_exit(1) @@ -62,19 +77,28 @@ def _check_multicommand(base_command, cmd_name, cmd, register=False): if not base_command.chain or not isinstance(cmd, MultiCommand): return if register: - hint = 'It is not possible to add multi commands as children to ' \ - 'another multi command that is in chain mode' + hint = ( + "It is not possible to add multi commands as children to" + " another multi command that is in chain mode." + ) else: - hint = 'Found a multi command as subcommand to a multi command ' \ - 'that is in chain mode. This is not supported' - raise RuntimeError('%s. Command "%s" is set to chain and "%s" was ' - 'added as subcommand but it in itself is a ' - 'multi command. ("%s" is a %s within a chained ' - '%s named "%s").' % ( - hint, base_command.name, cmd_name, - cmd_name, cmd.__class__.__name__, - base_command.__class__.__name__, - base_command.name)) + hint = ( + "Found a multi command as subcommand to a multi command" + " that is in chain mode. This is not supported." + ) + raise RuntimeError( + "{}. Command '{}' is set to chain and '{}' was added as" + " subcommand but it in itself is a multi command. ('{}' is a {}" + " within a chained {} named '{}').".format( + hint, + base_command.name, + cmd_name, + cmd_name, + cmd.__class__.__name__, + base_command.__class__.__name__, + base_command.name, + ) + ) def batch(iterable, batch_size): @@ -82,25 +106,26 @@ def batch(iterable, batch_size): def invoke_param_callback(callback, ctx, param, value): - code = getattr(callback, '__code__', None) - args = getattr(code, 'co_argcount', 3) + code = getattr(callback, "__code__", None) + args = getattr(code, "co_argcount", 3) if args < 3: - # This will become a warning in Click 3.0: from warnings import warn - warn(Warning('Invoked legacy parameter callback "%s". The new ' - 'signature for such callbacks starting with ' - 'click 2.0 is (ctx, param, value).' - % callback), stacklevel=3) + + warn( + "Parameter callbacks take 3 args, (ctx, param, value). The" + " 2-arg style is deprecated and will be removed in 8.0.".format(callback), + DeprecationWarning, + stacklevel=3, + ) return callback(ctx, value) + return callback(ctx, param, value) @contextmanager def augment_usage_errors(ctx, param=None): - """Context manager that attaches extra information to exceptions that - fly. - """ + """Context manager that attaches extra information to exceptions.""" try: yield except BadParameter as e: @@ -120,11 +145,12 @@ def iter_params_for_processing(invocation_order, declaration_order): for processing and an iterable of parameters that exist, this returns a list in the correct order as they should be processed. """ + def sort_key(item): try: idx = invocation_order.index(item) except ValueError: - idx = float('inf') + idx = float("inf") return (not item.is_eager, idx) return sorted(declaration_order, key=sort_key) @@ -154,6 +180,9 @@ class Context(object): Added the `color`, `ignore_unknown_options`, and `max_content_width` parameters. + .. versionadded:: 7.1 + Added the `show_default` parameter. + :param command: the command class for this context. :param parent: the parent context. :param info_name: the info name for this invocation. Generally this @@ -208,15 +237,30 @@ class Context(object): codes are used in texts that Click prints which is by default not the case. This for instance would affect help output. + :param show_default: if True, shows defaults for all options. + Even if an option is later created with show_default=False, + this command-level setting overrides it. """ - def __init__(self, command, parent=None, info_name=None, obj=None, - auto_envvar_prefix=None, default_map=None, - terminal_width=None, max_content_width=None, - resilient_parsing=False, allow_extra_args=None, - allow_interspersed_args=None, - ignore_unknown_options=None, help_option_names=None, - token_normalize_func=None, color=None): + def __init__( + self, + command, + parent=None, + info_name=None, + obj=None, + auto_envvar_prefix=None, + default_map=None, + terminal_width=None, + max_content_width=None, + resilient_parsing=False, + allow_extra_args=None, + allow_interspersed_args=None, + ignore_unknown_options=None, + help_option_names=None, + token_normalize_func=None, + color=None, + show_default=None, + ): #: the parent context or `None` if none exists. self.parent = parent #: the :class:`Command` for this context. @@ -237,12 +281,14 @@ def __init__(self, command, parent=None, info_name=None, obj=None, obj = parent.obj #: the user object stored. self.obj = obj - self._meta = getattr(parent, 'meta', {}) + self._meta = getattr(parent, "meta", {}) #: A dictionary (-like object) with defaults for parameters. - if default_map is None \ - and parent is not None \ - and parent.default_map is not None: + if ( + default_map is None + and parent is not None + and parent.default_map is not None + ): default_map = parent.default_map.get(info_name) self.default_map = default_map @@ -301,7 +347,7 @@ def __init__(self, command, parent=None, info_name=None, obj=None, if parent is not None: help_option_names = parent.help_option_names else: - help_option_names = ['--help'] + help_option_names = ["--help"] #: The names for the help options. self.help_option_names = help_option_names @@ -322,13 +368,18 @@ def __init__(self, command, parent=None, info_name=None, obj=None, # the command on this level has a name, we can expand the envvar # prefix automatically. if auto_envvar_prefix is None: - if parent is not None \ - and parent.auto_envvar_prefix is not None and \ - self.info_name is not None: - auto_envvar_prefix = '%s_%s' % (parent.auto_envvar_prefix, - self.info_name.upper()) + if ( + parent is not None + and parent.auto_envvar_prefix is not None + and self.info_name is not None + ): + auto_envvar_prefix = "{}_{}".format( + parent.auto_envvar_prefix, self.info_name.upper() + ) else: auto_envvar_prefix = auto_envvar_prefix.upper() + if auto_envvar_prefix is not None: + auto_envvar_prefix = auto_envvar_prefix.replace("-", "_") self.auto_envvar_prefix = auto_envvar_prefix if color is None and parent is not None: @@ -337,6 +388,8 @@ def __init__(self, command, parent=None, info_name=None, obj=None, #: Controls if styling output is wanted or not. self.color = color + self.show_default = show_default + self._close_callbacks = [] self._depth = 0 @@ -404,7 +457,7 @@ def meta(self): Example usage:: - LANG_KEY = __name__ + '.lang' + LANG_KEY = f'{__name__}.lang' def set_language(value): ctx = get_current_context() @@ -419,8 +472,9 @@ def get_language(): def make_formatter(self): """Creates the formatter for the help and usage output.""" - return HelpFormatter(width=self.terminal_width, - max_width=self.max_content_width) + return HelpFormatter( + width=self.terminal_width, max_width=self.max_content_width + ) def call_on_close(self, f): """This decorator remembers a function as callback that should be @@ -446,11 +500,11 @@ def command_path(self): information on the help page. It's automatically created by combining the info names of the chain of contexts to the root. """ - rv = '' + rv = "" if self.info_name is not None: rv = self.info_name if self.parent is not None: - rv = self.parent.command_path + ' ' + rv + rv = "{} {}".format(self.parent.command_path, rv) return rv.lstrip() def find_root(self): @@ -515,7 +569,7 @@ def get_help(self): """ return self.command.get_help(self) - def invoke(*args, **kwargs): + def invoke(*args, **kwargs): # noqa: B902 """Invokes a command callback in exactly the way it expects. There are two ways to invoke this method: @@ -542,8 +596,9 @@ def invoke(*args, **kwargs): callback = other_cmd.callback ctx = Context(other_cmd, info_name=other_cmd.name, parent=self) if callback is None: - raise TypeError('The given command does not have a ' - 'callback that can be invoked.') + raise TypeError( + "The given command does not have a callback that can be invoked." + ) for param in other_cmd.params: if param.name not in kwargs and param.expose_value: @@ -554,7 +609,7 @@ def invoke(*args, **kwargs): with ctx: return callback(*args, **kwargs) - def forward(*args, **kwargs): + def forward(*args, **kwargs): # noqa: B902 """Similar to :meth:`invoke` but fills in default keyword arguments from the current context if the other command expects it. This cannot invoke callbacks directly, only other commands. @@ -564,7 +619,7 @@ def forward(*args, **kwargs): # It's also possible to invoke another command which might or # might not have a callback. if not isinstance(cmd, Command): - raise TypeError('Callback is not a command.') + raise TypeError("Callback is not a command.") for param in self.params: if param not in kwargs: @@ -594,6 +649,7 @@ class BaseCommand(object): :param context_settings: an optional dictionary with defaults that are passed to the context object. """ + #: the default for the :attr:`Context.allow_extra_args` flag. allow_extra_args = False #: the default for the :attr:`Context.allow_interspersed_args` flag. @@ -612,11 +668,14 @@ def __init__(self, name, context_settings=None): #: an optional dictionary with defaults passed to the context. self.context_settings = context_settings + def __repr__(self): + return "<{} {}>".format(self.__class__.__name__, self.name) + def get_usage(self, ctx): - raise NotImplementedError('Base commands cannot get usage') + raise NotImplementedError("Base commands cannot get usage") def get_help(self, ctx): - raise NotImplementedError('Base commands cannot get help') + raise NotImplementedError("Base commands cannot get help") def make_context(self, info_name, args, parent=None, **extra): """This function when given an info name and arguments will kick @@ -646,17 +705,22 @@ def parse_args(self, ctx, args): and parses the arguments, then modifies the context as necessary. This is automatically invoked by :meth:`make_context`. """ - raise NotImplementedError('Base commands do not know how to parse ' - 'arguments.') + raise NotImplementedError("Base commands do not know how to parse arguments.") def invoke(self, ctx): """Given a context, this invokes the command. The default implementation is raising a not implemented error. """ - raise NotImplementedError('Base commands are not invokable by default') - - def main(self, args=None, prog_name=None, complete_var=None, - standalone_mode=True, **extra): + raise NotImplementedError("Base commands are not invokable by default") + + def main( + self, + args=None, + prog_name=None, + complete_var=None, + standalone_mode=True, + **extra + ): """This is the way to invoke a script with all the bells and whistles as a command line application. This will always terminate the application after a call. If this is not wanted, ``SystemExit`` @@ -703,8 +767,9 @@ def main(self, args=None, prog_name=None, complete_var=None, args = list(args) if prog_name is None: - prog_name = make_str(os.path.basename( - sys.argv and sys.argv[0] or __file__)) + prog_name = make_str( + os.path.basename(sys.argv[0] if sys.argv else __file__) + ) # Hook for the Bash completion. This only activates if the Bash # completion is actually enabled, otherwise this is quite a fast @@ -756,7 +821,7 @@ def main(self, args=None, prog_name=None, complete_var=None, except Abort: if not standalone_mode: raise - echo('Aborted!', file=sys.stderr) + echo("Aborted!", file=sys.stderr) sys.exit(1) def __call__(self, *args, **kwargs): @@ -771,6 +836,8 @@ class Command(BaseCommand): .. versionchanged:: 2.0 Added the `context_settings` parameter. + .. versionchanged:: 7.1 + Added the `no_args_is_help` parameter. :param name: the name of the command to use unless a group overrides it. :param context_settings: an optional dictionary with defaults that are @@ -785,16 +852,31 @@ class Command(BaseCommand): shown on the command listing of the parent command. :param add_help_option: by default each command registers a ``--help`` option. This can be disabled by this parameter. + :param no_args_is_help: this controls what happens if no arguments are + provided. This option is disabled by default. + If enabled this will add ``--help`` as argument + if no arguments are passed :param hidden: hide this command from help outputs. :param deprecated: issues a message indicating that the command is deprecated. """ - def __init__(self, name, context_settings=None, callback=None, - params=None, help=None, epilog=None, short_help=None, - options_metavar='[OPTIONS]', add_help_option=True, - hidden=False, deprecated=False): + def __init__( + self, + name, + context_settings=None, + callback=None, + params=None, + help=None, + epilog=None, + short_help=None, + options_metavar="[OPTIONS]", + add_help_option=True, + no_args_is_help=False, + hidden=False, + deprecated=False, + ): BaseCommand.__init__(self, name, context_settings) #: the callback to execute when the command fires. This might be #: `None` in which case nothing happens. @@ -805,20 +887,25 @@ def __init__(self, name, context_settings=None, callback=None, self.params = params or [] # if a form feed (page break) is found in the help text, truncate help # text to the content preceding the first form feed - if help and '\f' in help: - help = help.split('\f', 1)[0] + if help and "\f" in help: + help = help.split("\f", 1)[0] self.help = help self.epilog = epilog self.options_metavar = options_metavar self.short_help = short_help self.add_help_option = add_help_option + self.no_args_is_help = no_args_is_help self.hidden = hidden self.deprecated = deprecated def get_usage(self, ctx): + """Formats the usage line into a string and returns it. + + Calls :meth:`format_usage` internally. + """ formatter = ctx.make_formatter() self.format_usage(ctx, formatter) - return formatter.getvalue().rstrip('\n') + return formatter.getvalue().rstrip("\n") def get_params(self, ctx): rv = self.params @@ -828,9 +915,12 @@ def get_params(self, ctx): return rv def format_usage(self, ctx, formatter): - """Writes the usage line into the formatter.""" + """Writes the usage line into the formatter. + + This is a low-level method called by :meth:`get_usage`. + """ pieces = self.collect_usage_pieces(ctx) - formatter.write_usage(ctx.command_path, ' '.join(pieces)) + formatter.write_usage(ctx.command_path, " ".join(pieces)) def collect_usage_pieces(self, ctx): """Returns all the pieces that go into the usage line and returns @@ -859,10 +949,15 @@ def show_help(ctx, param, value): if value and not ctx.resilient_parsing: echo(ctx.get_help(), color=ctx.color) ctx.exit() - return Option(help_options, is_flag=True, - is_eager=True, expose_value=False, - callback=show_help, - help='Show this message and exit.') + + return Option( + help_options, + is_flag=True, + is_eager=True, + expose_value=False, + callback=show_help, + help="Show this message and exit.", + ) def make_parser(self, ctx): """Creates the underlying option parser for this command.""" @@ -872,21 +967,31 @@ def make_parser(self, ctx): return parser def get_help(self, ctx): - """Formats the help into a string and returns it. This creates a - formatter and will call into the following formatting methods: + """Formats the help into a string and returns it. + + Calls :meth:`format_help` internally. """ formatter = ctx.make_formatter() self.format_help(ctx, formatter) - return formatter.getvalue().rstrip('\n') + return formatter.getvalue().rstrip("\n") def get_short_help_str(self, limit=45): - """Gets short help for the command or makes it by shortening the long help string.""" - return self.short_help or self.help and make_default_short_help(self.help, limit) or '' + """Gets short help for the command or makes it by shortening the + long help string. + """ + return ( + self.short_help + or self.help + and make_default_short_help(self.help, limit) + or "" + ) def format_help(self, ctx, formatter): """Writes the help into the formatter if it exists. - This calls into the following methods: + This is a low-level method called by :meth:`get_help`. + + This calls the following methods: - :meth:`format_usage` - :meth:`format_help_text` @@ -921,7 +1026,7 @@ def format_options(self, ctx, formatter): opts.append(rv) if opts: - with formatter.section('Options'): + with formatter.section("Options"): formatter.write_dl(opts) def format_epilog(self, ctx, formatter): @@ -932,17 +1037,22 @@ def format_epilog(self, ctx, formatter): formatter.write_text(self.epilog) def parse_args(self, ctx, args): + if not args and self.no_args_is_help and not ctx.resilient_parsing: + echo(ctx.get_help(), color=ctx.color) + ctx.exit() + parser = self.make_parser(ctx) opts, args, param_order = parser.parse_args(args=args) - for param in iter_params_for_processing( - param_order, self.get_params(ctx)): + for param in iter_params_for_processing(param_order, self.get_params(ctx)): value, args = param.handle_parse_result(ctx, opts, args) if args and not ctx.allow_extra_args and not ctx.resilient_parsing: - ctx.fail('Got unexpected extra argument%s (%s)' - % (len(args) != 1 and 's' or '', - ' '.join(map(make_str, args)))) + ctx.fail( + "Got unexpected extra argument{} ({})".format( + "s" if len(args) != 1 else "", " ".join(map(make_str, args)) + ) + ) ctx.args = args return args @@ -979,12 +1089,20 @@ class MultiCommand(Command): :param result_callback: the result callback to attach to this multi command. """ + allow_extra_args = True allow_interspersed_args = False - def __init__(self, name=None, invoke_without_command=False, - no_args_is_help=None, subcommand_metavar=None, - chain=False, result_callback=None, **attrs): + def __init__( + self, + name=None, + invoke_without_command=False, + no_args_is_help=None, + subcommand_metavar=None, + chain=False, + result_callback=None, + **attrs + ): Command.__init__(self, name, **attrs) if no_args_is_help is None: no_args_is_help = not invoke_without_command @@ -1004,8 +1122,10 @@ def __init__(self, name=None, invoke_without_command=False, if self.chain: for param in self.params: if isinstance(param, Argument) and not param.required: - raise RuntimeError('Multi commands in chain mode cannot ' - 'have optional arguments.') + raise RuntimeError( + "Multi commands in chain mode cannot have" + " optional arguments." + ) def collect_usage_pieces(self, ctx): rv = Command.collect_usage_pieces(self, ctx) @@ -1041,16 +1161,19 @@ def process_result(result, input): :param replace: if set to `True` an already existing result callback will be removed. """ + def decorator(f): old_callback = self.result_callback if old_callback is None or replace: self.result_callback = f return f + def function(__value, *args, **kwargs): - return f(old_callback(__value, *args, **kwargs), - *args, **kwargs) + return f(old_callback(__value, *args, **kwargs), *args, **kwargs) + self.result_callback = rv = update_wrapper(function, f) return rv + return decorator def format_commands(self, ctx, formatter): @@ -1078,7 +1201,7 @@ def format_commands(self, ctx, formatter): rows.append((subcommand, help)) if rows: - with formatter.section('Commands'): + with formatter.section("Commands"): formatter.write_dl(rows) def parse_args(self, ctx, args): @@ -1098,8 +1221,7 @@ def parse_args(self, ctx, args): def invoke(self, ctx): def _process_result(value): if self.result_callback is not None: - value = ctx.invoke(self.result_callback, value, - **ctx.params) + value = ctx.invoke(self.result_callback, value, **ctx.params) return value if not ctx.protected_args: @@ -1115,7 +1237,7 @@ def _process_result(value): with ctx: Command.invoke(self, ctx) return _process_result([]) - ctx.fail('Missing command.') + ctx.fail("Missing command.") # Fetch args back out args = ctx.protected_args + ctx.args @@ -1142,7 +1264,7 @@ def _process_result(value): # set to ``*`` to inform the command that subcommands are executed # but nothing else. with ctx: - ctx.invoked_subcommand = args and '*' or None + ctx.invoked_subcommand = "*" if args else None Command.invoke(self, ctx) # Otherwise we make every single context and invoke them in a @@ -1151,9 +1273,13 @@ def _process_result(value): contexts = [] while args: cmd_name, cmd, args = self.resolve_command(ctx, args) - sub_ctx = cmd.make_context(cmd_name, args, parent=ctx, - allow_extra_args=True, - allow_interspersed_args=False) + sub_ctx = cmd.make_context( + cmd_name, + args, + parent=ctx, + allow_extra_args=True, + allow_interspersed_args=False, + ) contexts.append(sub_ctx) args, sub_ctx.args = sub_ctx.args, [] @@ -1185,7 +1311,7 @@ def resolve_command(self, ctx, args): if cmd is None and not ctx.resilient_parsing: if split_opt(cmd_name)[0]: self.parse_args(ctx, ctx.args) - ctx.fail('No such command "%s".' % original_cmd_name) + ctx.fail("No such command '{}'.".format(original_cmd_name)) return cmd_name, cmd, args[1:] @@ -1220,7 +1346,7 @@ def add_command(self, cmd, name=None): """ name = name or cmd.name if name is None: - raise TypeError('Command has no name.') + raise TypeError("Command has no name.") _check_multicommand(self, name, cmd, register=True) self.commands[name] = cmd @@ -1230,10 +1356,13 @@ def command(self, *args, **kwargs): immediately registers the created command with this instance by calling into :meth:`add_command`. """ + from .decorators import command + def decorator(f): cmd = command(*args, **kwargs)(f) self.add_command(cmd) return cmd + return decorator def group(self, *args, **kwargs): @@ -1242,10 +1371,13 @@ def group(self, *args, **kwargs): immediately registers the created command with this instance by calling into :meth:`add_command`. """ + from .decorators import group + def decorator(f): cmd = group(*args, **kwargs)(f) self.add_command(cmd) return cmd + return decorator def get_command(self, ctx, cmd_name): @@ -1294,12 +1426,6 @@ class Parameter(object): Some settings are supported by both options and arguments. - .. versionchanged:: 2.0 - Changed signature for parameter callback to also be passed the - parameter. In Click 2.0, the old callback format will still work, - but it will raise a warning to give you change to migrate the - code easier. - :param param_decls: the parameter declarations for this option or argument. This is a list of flags or argument names. @@ -1312,8 +1438,7 @@ class Parameter(object): without any arguments. :param callback: a callback that should be executed after the parameter was matched. This is called as ``fn(ctx, param, - value)`` and needs to return the value. Before Click - 2.0, the signature was ``(ctx, value)``. + value)`` and needs to return the value. :param nargs: the number of arguments to match. If not ``1`` the return value is a tuple instead of single value. The default for nargs is ``1`` (except if the type is a tuple, then it's @@ -1327,15 +1452,36 @@ class Parameter(object): order of processing. :param envvar: a string or list of strings that are environment variables that should be checked. - """ - param_type_name = 'parameter' - def __init__(self, param_decls=None, type=None, required=False, - default=None, callback=None, nargs=None, metavar=None, - expose_value=True, is_eager=False, envvar=None, - autocompletion=None): - self.name, self.opts, self.secondary_opts = \ - self._parse_decls(param_decls or (), expose_value) + .. versionchanged:: 7.1 + Empty environment variables are ignored rather than taking the + empty string value. This makes it possible for scripts to clear + variables if they can't unset them. + + .. versionchanged:: 2.0 + Changed signature for parameter callback to also be passed the + parameter. The old callback format will still work, but it will + raise a warning to give you a chance to migrate the code easier. + """ + param_type_name = "parameter" + + def __init__( + self, + param_decls=None, + type=None, + required=False, + default=None, + callback=None, + nargs=None, + metavar=None, + expose_value=True, + is_eager=False, + envvar=None, + autocompletion=None, + ): + self.name, self.opts, self.secondary_opts = self._parse_decls( + param_decls or (), expose_value + ) self.type = convert_type(type, default) @@ -1358,6 +1504,9 @@ def __init__(self, param_decls=None, type=None, required=False, self.envvar = envvar self.autocompletion = autocompletion + def __repr__(self): + return "<{} {}>".format(self.__class__.__name__, self.name) + @property def human_readable_name(self): """Returns the human readable name of this parameter. This is the @@ -1372,7 +1521,7 @@ def make_metavar(self): if metavar is None: metavar = self.type.name.upper() if self.nargs != 1: - metavar += '...' + metavar += "..." return metavar def get_default(self, ctx): @@ -1402,10 +1551,11 @@ def type_cast_value(self, ctx, value): """ if self.type.is_composite: if self.nargs <= 1: - raise TypeError('Attempted to invoke composite type ' - 'but nargs has been set to %s. This is ' - 'not supported; nargs needs to be set to ' - 'a fixed value > 1.' % self.nargs) + raise TypeError( + "Attempted to invoke composite type but nargs has" + " been set to {}. This is not supported; nargs" + " needs to be set to a fixed value > 1.".format(self.nargs) + ) if self.multiple: return tuple(self.type(x or (), self, ctx) for x in value or ()) return self.type(value or (), self, ctx) @@ -1414,6 +1564,7 @@ def _convert(value, level): if level == 0: return self.type(value, self, ctx) return tuple(_convert(x, level - 1) for x in value or ()) + return _convert(value, (self.nargs != 1) + bool(self.multiple)) def process_value(self, ctx, value): @@ -1454,7 +1605,10 @@ def resolve_envvar_value(self, ctx): if rv is not None: return rv else: - return os.environ.get(self.envvar) + rv = os.environ.get(self.envvar) + + if rv != "": + return rv def value_from_envvar(self, ctx): rv = self.resolve_envvar_value(ctx) @@ -1473,8 +1627,7 @@ def handle_parse_result(self, ctx, opts, args): value = None if self.callback is not None: try: - value = invoke_param_callback( - self.callback, ctx, self, value) + value = invoke_param_callback(self.callback, ctx, self, value) except Exception: if not ctx.resilient_parsing: raise @@ -1494,7 +1647,7 @@ def get_error_hint(self, ctx): indicate which param caused the error. """ hint_list = self.opts or [self.human_readable_name] - return ' / '.join('"%s"' % x for x in hint_list) + return " / ".join(repr(x) for x in hint_list) class Option(Parameter): @@ -1535,19 +1688,33 @@ class Option(Parameter): :param help: the help string. :param hidden: hide this option from help outputs. """ - param_type_name = 'option' - - def __init__(self, param_decls=None, show_default=False, - prompt=False, confirmation_prompt=False, - hide_input=False, is_flag=None, flag_value=None, - multiple=False, count=False, allow_from_autoenv=True, - type=None, help=None, hidden=False, show_choices=True, - show_envvar=False, **attrs): - default_is_missing = attrs.get('default', _missing) is _missing + + param_type_name = "option" + + def __init__( + self, + param_decls=None, + show_default=False, + prompt=False, + confirmation_prompt=False, + hide_input=False, + is_flag=None, + flag_value=None, + multiple=False, + count=False, + allow_from_autoenv=True, + type=None, + help=None, + hidden=False, + show_choices=True, + show_envvar=False, + **attrs + ): + default_is_missing = attrs.get("default", _missing) is _missing Parameter.__init__(self, param_decls, type=type, **attrs) if prompt is True: - prompt_text = self.name.replace('_', ' ').capitalize() + prompt_text = self.name.replace("_", " ").capitalize() elif prompt is False: prompt_text = None else: @@ -1569,8 +1736,7 @@ def __init__(self, param_decls=None, show_default=False, flag_value = not self.default self.is_flag = is_flag self.flag_value = flag_value - if self.is_flag and isinstance(self.flag_value, bool) \ - and type is None: + if self.is_flag and isinstance(self.flag_value, bool) and type in [None, bool]: self.type = BOOL self.is_bool_flag = True else: @@ -1594,22 +1760,22 @@ def __init__(self, param_decls=None, show_default=False, # Sanity check for stuff we don't support if __debug__: if self.nargs < 0: - raise TypeError('Options cannot have nargs < 0') + raise TypeError("Options cannot have nargs < 0") if self.prompt and self.is_flag and not self.is_bool_flag: - raise TypeError('Cannot prompt for flags that are not bools.') + raise TypeError("Cannot prompt for flags that are not bools.") if not self.is_bool_flag and self.secondary_opts: - raise TypeError('Got secondary option for non boolean flag.') - if self.is_bool_flag and self.hide_input \ - and self.prompt is not None: - raise TypeError('Hidden input does not work with boolean ' - 'flag prompts.') + raise TypeError("Got secondary option for non boolean flag.") + if self.is_bool_flag and self.hide_input and self.prompt is not None: + raise TypeError("Hidden input does not work with boolean flag prompts.") if self.count: if self.multiple: - raise TypeError('Options cannot be multiple and count ' - 'at the same time.') + raise TypeError( + "Options cannot be multiple and count at the same time." + ) elif self.is_flag: - raise TypeError('Options cannot be count and flags at ' - 'the same time.') + raise TypeError( + "Options cannot be count and flags at the same time." + ) def _parse_decls(self, decls, expose_value): opts = [] @@ -1620,10 +1786,10 @@ def _parse_decls(self, decls, expose_value): for decl in decls: if isidentifier(decl): if name is not None: - raise TypeError('Name defined twice') + raise TypeError("Name defined twice") name = decl else: - split_char = decl[:1] == '/' and ';' or '/' + split_char = ";" if decl[:1] == "/" else "/" if split_char in decl: first, second = decl.split(split_char, 1) first = first.rstrip() @@ -1639,49 +1805,51 @@ def _parse_decls(self, decls, expose_value): if name is None and possible_names: possible_names.sort(key=lambda x: -len(x[0])) # group long options first - name = possible_names[0][1].replace('-', '_').lower() + name = possible_names[0][1].replace("-", "_").lower() if not isidentifier(name): name = None if name is None: if not expose_value: return None, opts, secondary_opts - raise TypeError('Could not determine name for option') + raise TypeError("Could not determine name for option") if not opts and not secondary_opts: - raise TypeError('No options defined but a name was passed (%s). ' - 'Did you mean to declare an argument instead ' - 'of an option?' % name) + raise TypeError( + "No options defined but a name was passed ({}). Did you" + " mean to declare an argument instead of an option?".format(name) + ) return name, opts, secondary_opts def add_to_parser(self, parser, ctx): kwargs = { - 'dest': self.name, - 'nargs': self.nargs, - 'obj': self, + "dest": self.name, + "nargs": self.nargs, + "obj": self, } if self.multiple: - action = 'append' + action = "append" elif self.count: - action = 'count' + action = "count" else: - action = 'store' + action = "store" if self.is_flag: - kwargs.pop('nargs', None) + kwargs.pop("nargs", None) + action_const = "{}_const".format(action) if self.is_bool_flag and self.secondary_opts: - parser.add_option(self.opts, action=action + '_const', - const=True, **kwargs) - parser.add_option(self.secondary_opts, action=action + - '_const', const=False, **kwargs) + parser.add_option(self.opts, action=action_const, const=True, **kwargs) + parser.add_option( + self.secondary_opts, action=action_const, const=False, **kwargs + ) else: - parser.add_option(self.opts, action=action + '_const', - const=self.flag_value, - **kwargs) + parser.add_option( + self.opts, action=action_const, const=self.flag_value, **kwargs + ) else: - kwargs['action'] = action + kwargs["action"] = action parser.add_option(self.opts, **kwargs) def get_help_record(self, ctx): @@ -1694,46 +1862,50 @@ def _write_opts(opts): if any_slashes: any_prefix_is_slash[:] = [True] if not self.is_flag and not self.count: - rv += ' ' + self.make_metavar() + rv += " {}".format(self.make_metavar()) return rv rv = [_write_opts(self.opts)] if self.secondary_opts: rv.append(_write_opts(self.secondary_opts)) - help = self.help or '' + help = self.help or "" extra = [] if self.show_envvar: envvar = self.envvar if envvar is None: - if self.allow_from_autoenv and \ - ctx.auto_envvar_prefix is not None: - envvar = '%s_%s' % (ctx.auto_envvar_prefix, self.name.upper()) + if self.allow_from_autoenv and ctx.auto_envvar_prefix is not None: + envvar = "{}_{}".format(ctx.auto_envvar_prefix, self.name.upper()) if envvar is not None: - extra.append('env var: %s' % ( - ', '.join('%s' % d for d in envvar) - if isinstance(envvar, (list, tuple)) - else envvar, )) - if self.default is not None and self.show_default: + extra.append( + "env var: {}".format( + ", ".join(str(d) for d in envvar) + if isinstance(envvar, (list, tuple)) + else envvar + ) + ) + if self.default is not None and (self.show_default or ctx.show_default): if isinstance(self.show_default, string_types): - default_string = '({})'.format(self.show_default) + default_string = "({})".format(self.show_default) elif isinstance(self.default, (list, tuple)): - default_string = ', '.join('%s' % d for d in self.default) + default_string = ", ".join(str(d) for d in self.default) elif inspect.isfunction(self.default): default_string = "(dynamic)" else: default_string = self.default - extra.append('default: {}'.format(default_string)) + extra.append("default: {}".format(default_string)) if self.required: - extra.append('required') + extra.append("required") if extra: - help = '%s[%s]' % (help and help + ' ' or '', '; '.join(extra)) + help = "{}[{}]".format( + "{} ".format(help) if help else "", "; ".join(extra) + ) - return ((any_prefix_is_slash and '; ' or ' / ').join(rv), help) + return ("; " if any_prefix_is_slash else " / ").join(rv), help def get_default(self, ctx): - # If we're a non boolean flag out default is more complex because + # If we're a non boolean flag our default is more complex because # we need to look at all flags in the same group to figure out # if we're the the default one in which case we return the flag # value as default. @@ -1758,18 +1930,22 @@ def prompt_for_value(self, ctx): if self.is_bool_flag: return confirm(self.prompt, default) - return prompt(self.prompt, default=default, type=self.type, - hide_input=self.hide_input, show_choices=self.show_choices, - confirmation_prompt=self.confirmation_prompt, - value_proc=lambda x: self.process_value(ctx, x)) + return prompt( + self.prompt, + default=default, + type=self.type, + hide_input=self.hide_input, + show_choices=self.show_choices, + confirmation_prompt=self.confirmation_prompt, + value_proc=lambda x: self.process_value(ctx, x), + ) def resolve_envvar_value(self, ctx): rv = Parameter.resolve_envvar_value(self, ctx) if rv is not None: return rv - if self.allow_from_autoenv and \ - ctx.auto_envvar_prefix is not None: - envvar = '%s_%s' % (ctx.auto_envvar_prefix, self.name.upper()) + if self.allow_from_autoenv and ctx.auto_envvar_prefix is not None: + envvar = "{}_{}".format(ctx.auto_envvar_prefix, self.name.upper()) return os.environ.get(envvar) def value_from_envvar(self, ctx): @@ -1784,8 +1960,7 @@ def value_from_envvar(self, ctx): return rv def full_process_value(self, ctx, value): - if value is None and self.prompt is not None \ - and not ctx.resilient_parsing: + if value is None and self.prompt is not None and not ctx.resilient_parsing: return self.prompt_for_value(ctx) return Parameter.full_process_value(self, ctx, value) @@ -1797,18 +1972,20 @@ class Argument(Parameter): All parameters are passed onwards to the parameter constructor. """ - param_type_name = 'argument' + + param_type_name = "argument" def __init__(self, param_decls, required=None, **attrs): if required is None: - if attrs.get('default') is not None: + if attrs.get("default") is not None: required = False else: - required = attrs.get('nargs', 1) > 0 + required = attrs.get("nargs", 1) > 0 Parameter.__init__(self, param_decls, required=required, **attrs) if self.default is not None and self.nargs < 0: - raise TypeError('nargs=-1 in combination with a default value ' - 'is not supported.') + raise TypeError( + "nargs=-1 in combination with a default value is not supported." + ) @property def human_readable_name(self): @@ -1823,34 +2000,31 @@ def make_metavar(self): if not var: var = self.name.upper() if not self.required: - var = '[%s]' % var + var = "[{}]".format(var) if self.nargs != 1: - var += '...' + var += "..." return var def _parse_decls(self, decls, expose_value): if not decls: if not expose_value: return None, [], [] - raise TypeError('Could not determine name for argument') + raise TypeError("Could not determine name for argument") if len(decls) == 1: name = arg = decls[0] - name = name.replace('-', '_').lower() + name = name.replace("-", "_").lower() else: - raise TypeError('Arguments take exactly one ' - 'parameter declaration, got %d' % len(decls)) + raise TypeError( + "Arguments take exactly one parameter declaration, got" + " {}".format(len(decls)) + ) return name, [arg], [] def get_usage_pieces(self, ctx): return [self.make_metavar()] def get_error_hint(self, ctx): - return '"%s"' % self.make_metavar() + return repr(self.make_metavar()) def add_to_parser(self, parser, ctx): - parser.add_argument(dest=self.name, nargs=self.nargs, - obj=self) - - -# Circular dependency between decorators and core -from .decorators import command, group + parser.add_argument(dest=self.name, nargs=self.nargs, obj=self) diff --git a/pipenv/vendor/click/decorators.py b/pipenv/vendor/click/decorators.py index c57c530861..c7b5af6cc5 100644 --- a/pipenv/vendor/click/decorators.py +++ b/pipenv/vendor/click/decorators.py @@ -1,20 +1,25 @@ -import sys import inspect - +import sys from functools import update_wrapper from ._compat import iteritems from ._unicodefun import _check_for_unicode_literals -from .utils import echo +from .core import Argument +from .core import Command +from .core import Group +from .core import Option from .globals import get_current_context +from .utils import echo def pass_context(f): """Marks a callback as wanting to receive the current context object as first argument. """ + def new_func(*args, **kwargs): return f(get_current_context(), *args, **kwargs) + return update_wrapper(new_func, f) @@ -23,8 +28,10 @@ def pass_obj(f): context onwards (:attr:`Context.obj`). This is useful if that object represents the state of a nested system. """ + def new_func(*args, **kwargs): return f(get_current_context().obj, *args, **kwargs) + return update_wrapper(new_func, f) @@ -50,6 +57,7 @@ def new_func(ctx, *args, **kwargs): :param ensure: if set to `True`, a new object will be created and remembered on the context if it's not there yet. """ + def decorator(f): def new_func(*args, **kwargs): ctx = get_current_context() @@ -58,35 +66,41 @@ def new_func(*args, **kwargs): else: obj = ctx.find_object(object_type) if obj is None: - raise RuntimeError('Managed to invoke callback without a ' - 'context object of type %r existing' - % object_type.__name__) + raise RuntimeError( + "Managed to invoke callback without a context" + " object of type '{}' existing".format(object_type.__name__) + ) return ctx.invoke(f, obj, *args, **kwargs) + return update_wrapper(new_func, f) + return decorator def _make_command(f, name, attrs, cls): if isinstance(f, Command): - raise TypeError('Attempted to convert a callback into a ' - 'command twice.') + raise TypeError("Attempted to convert a callback into a command twice.") try: params = f.__click_params__ params.reverse() del f.__click_params__ except AttributeError: params = [] - help = attrs.get('help') + help = attrs.get("help") if help is None: help = inspect.getdoc(f) if isinstance(help, bytes): - help = help.decode('utf-8') + help = help.decode("utf-8") else: help = inspect.cleandoc(help) - attrs['help'] = help + attrs["help"] = help _check_for_unicode_literals() - return cls(name=name or f.__name__.lower().replace('_', '-'), - callback=f, params=params, **attrs) + return cls( + name=name or f.__name__.lower().replace("_", "-"), + callback=f, + params=params, + **attrs + ) def command(name=None, cls=None, **attrs): @@ -94,9 +108,9 @@ def command(name=None, cls=None, **attrs): callback. This will also automatically attach all decorated :func:`option`\s and :func:`argument`\s as parameters to the command. - The name of the command defaults to the name of the function. If you - want to change that, you can pass the intended name as the first - argument. + The name of the command defaults to the name of the function with + underscores replaced by dashes. If you want to change that, you can + pass the intended name as the first argument. All keyword arguments are forwarded to the underlying command class. @@ -111,10 +125,12 @@ def command(name=None, cls=None, **attrs): """ if cls is None: cls = Command + def decorator(f): cmd = _make_command(f, name, attrs, cls) cmd.__doc__ = f.__doc__ return cmd + return decorator @@ -123,7 +139,7 @@ def group(name=None, **attrs): works otherwise the same as :func:`command` just that the `cls` parameter is set to :class:`Group`. """ - attrs.setdefault('cls', Group) + attrs.setdefault("cls", Group) return command(name, **attrs) @@ -131,7 +147,7 @@ def _param_memo(f, param): if isinstance(f, Command): f.params.append(param) else: - if not hasattr(f, '__click_params__'): + if not hasattr(f, "__click_params__"): f.__click_params__ = [] f.__click_params__.append(param) @@ -146,10 +162,12 @@ def argument(*param_decls, **attrs): :param cls: the argument class to instantiate. This defaults to :class:`Argument`. """ + def decorator(f): - ArgumentClass = attrs.pop('cls', Argument) + ArgumentClass = attrs.pop("cls", Argument) _param_memo(f, ArgumentClass(param_decls, **attrs)) return f + return decorator @@ -163,15 +181,17 @@ def option(*param_decls, **attrs): :param cls: the option class to instantiate. This defaults to :class:`Option`. """ + def decorator(f): # Issue 926, copy attrs, so pre-defined options can re-use the same cls= option_attrs = attrs.copy() - if 'help' in option_attrs: - option_attrs['help'] = inspect.cleandoc(option_attrs['help']) - OptionClass = option_attrs.pop('cls', Option) + if "help" in option_attrs: + option_attrs["help"] = inspect.cleandoc(option_attrs["help"]) + OptionClass = option_attrs.pop("cls", Option) _param_memo(f, OptionClass(param_decls, **option_attrs)) return f + return decorator @@ -192,16 +212,19 @@ def callback(ctx, param, value): def dropdb(): pass """ + def decorator(f): def callback(ctx, param, value): if not value: ctx.abort() - attrs.setdefault('is_flag', True) - attrs.setdefault('callback', callback) - attrs.setdefault('expose_value', False) - attrs.setdefault('prompt', 'Do you want to continue?') - attrs.setdefault('help', 'Confirm the action without prompting.') - return option(*(param_decls or ('--yes',)), **attrs)(f) + + attrs.setdefault("is_flag", True) + attrs.setdefault("callback", callback) + attrs.setdefault("expose_value", False) + attrs.setdefault("prompt", "Do you want to continue?") + attrs.setdefault("help", "Confirm the action without prompting.") + return option(*(param_decls or ("--yes",)), **attrs)(f) + return decorator @@ -217,11 +240,13 @@ def password_option(*param_decls, **attrs): def changeadmin(password): pass """ + def decorator(f): - attrs.setdefault('prompt', True) - attrs.setdefault('confirmation_prompt', True) - attrs.setdefault('hide_input', True) - return option(*(param_decls or ('--password',)), **attrs)(f) + attrs.setdefault("prompt", True) + attrs.setdefault("confirmation_prompt", True) + attrs.setdefault("hide_input", True) + return option(*(param_decls or ("--password",)), **attrs)(f) + return decorator @@ -238,14 +263,14 @@ def version_option(version=None, *param_decls, **attrs): :param others: everything else is forwarded to :func:`option`. """ if version is None: - if hasattr(sys, '_getframe'): - module = sys._getframe(1).f_globals.get('__name__') + if hasattr(sys, "_getframe"): + module = sys._getframe(1).f_globals.get("__name__") else: - module = '' + module = "" def decorator(f): - prog_name = attrs.pop('prog_name', None) - message = attrs.pop('message', '%(prog)s, version %(version)s') + prog_name = attrs.pop("prog_name", None) + message = attrs.pop("message", "%(prog)s, version %(version)s") def callback(ctx, param, value): if not value or ctx.resilient_parsing: @@ -261,25 +286,23 @@ def callback(ctx, param, value): pass else: for dist in pkg_resources.working_set: - scripts = dist.get_entry_map().get('console_scripts') or {} - for script_name, entry_point in iteritems(scripts): + scripts = dist.get_entry_map().get("console_scripts") or {} + for _, entry_point in iteritems(scripts): if entry_point.module_name == module: ver = dist.version break if ver is None: - raise RuntimeError('Could not determine version') - echo(message % { - 'prog': prog, - 'version': ver, - }, color=ctx.color) + raise RuntimeError("Could not determine version") + echo(message % {"prog": prog, "version": ver}, color=ctx.color) ctx.exit() - attrs.setdefault('is_flag', True) - attrs.setdefault('expose_value', False) - attrs.setdefault('is_eager', True) - attrs.setdefault('help', 'Show the version and exit.') - attrs['callback'] = callback - return option(*(param_decls or ('--version',)), **attrs)(f) + attrs.setdefault("is_flag", True) + attrs.setdefault("expose_value", False) + attrs.setdefault("is_eager", True) + attrs.setdefault("help", "Show the version and exit.") + attrs["callback"] = callback + return option(*(param_decls or ("--version",)), **attrs)(f) + return decorator @@ -293,19 +316,18 @@ def help_option(*param_decls, **attrs): All arguments are forwarded to :func:`option`. """ + def decorator(f): def callback(ctx, param, value): if value and not ctx.resilient_parsing: echo(ctx.get_help(), color=ctx.color) ctx.exit() - attrs.setdefault('is_flag', True) - attrs.setdefault('expose_value', False) - attrs.setdefault('help', 'Show this message and exit.') - attrs.setdefault('is_eager', True) - attrs['callback'] = callback - return option(*(param_decls or ('--help',)), **attrs)(f) - return decorator + attrs.setdefault("is_flag", True) + attrs.setdefault("expose_value", False) + attrs.setdefault("help", "Show this message and exit.") + attrs.setdefault("is_eager", True) + attrs["callback"] = callback + return option(*(param_decls or ("--help",)), **attrs)(f) -# Circular dependencies between core and decorators -from .core import Command, Group, Argument, Option + return decorator diff --git a/pipenv/vendor/click/exceptions.py b/pipenv/vendor/click/exceptions.py index 6fa17658cb..592ee38f0d 100644 --- a/pipenv/vendor/click/exceptions.py +++ b/pipenv/vendor/click/exceptions.py @@ -1,10 +1,12 @@ -from ._compat import PY2, filename_to_ui, get_text_stderr +from ._compat import filename_to_ui +from ._compat import get_text_stderr +from ._compat import PY2 from .utils import echo def _join_param_hints(param_hint): if isinstance(param_hint, (tuple, list)): - return ' / '.join('"%s"' % x for x in param_hint) + return " / ".join(repr(x) for x in param_hint) return param_hint @@ -18,7 +20,7 @@ def __init__(self, message): ctor_msg = message if PY2: if ctor_msg is not None: - ctor_msg = ctor_msg.encode('utf-8') + ctor_msg = ctor_msg.encode("utf-8") Exception.__init__(self, ctor_msg) self.message = message @@ -32,12 +34,12 @@ def __str__(self): __unicode__ = __str__ def __str__(self): - return self.message.encode('utf-8') + return self.message.encode("utf-8") def show(self, file=None): if file is None: file = get_text_stderr() - echo('Error: %s' % self.format_message(), file=file) + echo("Error: {}".format(self.format_message()), file=file) class UsageError(ClickException): @@ -48,26 +50,27 @@ class UsageError(ClickException): :param ctx: optionally the context that caused this error. Click will fill in the context automatically in some situations. """ + exit_code = 2 def __init__(self, message, ctx=None): ClickException.__init__(self, message) self.ctx = ctx - self.cmd = self.ctx and self.ctx.command or None + self.cmd = self.ctx.command if self.ctx else None def show(self, file=None): if file is None: file = get_text_stderr() color = None - hint = '' - if (self.cmd is not None and - self.cmd.get_help_option(self.ctx) is not None): - hint = ('Try "%s %s" for help.\n' - % (self.ctx.command_path, self.ctx.help_option_names[0])) + hint = "" + if self.cmd is not None and self.cmd.get_help_option(self.ctx) is not None: + hint = "Try '{} {}' for help.\n".format( + self.ctx.command_path, self.ctx.help_option_names[0] + ) if self.ctx is not None: color = self.ctx.color - echo(self.ctx.get_usage() + '\n%s' % hint, file=file, color=color) - echo('Error: %s' % self.format_message(), file=file, color=color) + echo("{}\n{}".format(self.ctx.get_usage(), hint), file=file, color=color) + echo("Error: {}".format(self.format_message()), file=file, color=color) class BadParameter(UsageError): @@ -88,8 +91,7 @@ class BadParameter(UsageError): each item is quoted and separated. """ - def __init__(self, message, ctx=None, param=None, - param_hint=None): + def __init__(self, message, ctx=None, param=None, param_hint=None): UsageError.__init__(self, message, ctx) self.param = param self.param_hint = param_hint @@ -100,10 +102,10 @@ def format_message(self): elif self.param is not None: param_hint = self.param.get_error_hint(self.ctx) else: - return 'Invalid value: %s' % self.message + return "Invalid value: {}".format(self.message) param_hint = _join_param_hints(param_hint) - return 'Invalid value for %s: %s' % (param_hint, self.message) + return "Invalid value for {}: {}".format(param_hint, self.message) class MissingParameter(BadParameter): @@ -118,8 +120,9 @@ class MissingParameter(BadParameter): ``'option'`` or ``'argument'``. """ - def __init__(self, message=None, ctx=None, param=None, - param_hint=None, param_type=None): + def __init__( + self, message=None, ctx=None, param=None, param_hint=None, param_type=None + ): BadParameter.__init__(self, message, ctx, param, param_hint) self.param_type = param_type @@ -141,17 +144,30 @@ def format_message(self): msg_extra = self.param.type.get_missing_message(self.param) if msg_extra: if msg: - msg += '. ' + msg_extra + msg += ". {}".format(msg_extra) else: msg = msg_extra - return 'Missing %s%s%s%s' % ( + return "Missing {}{}{}{}".format( param_type, - param_hint and ' %s' % param_hint or '', - msg and '. ' or '.', - msg or '', + " {}".format(param_hint) if param_hint else "", + ". " if msg else ".", + msg or "", ) + def __str__(self): + if self.message is None: + param_name = self.param.name if self.param else None + return "missing parameter: {}".format(param_name) + else: + return self.message + + if PY2: + __unicode__ = __str__ + + def __str__(self): + return self.__unicode__().encode("utf-8") + class NoSuchOption(UsageError): """Raised if click attempted to handle an option that does not @@ -160,10 +176,9 @@ class NoSuchOption(UsageError): .. versionadded:: 4.0 """ - def __init__(self, option_name, message=None, possibilities=None, - ctx=None): + def __init__(self, option_name, message=None, possibilities=None, ctx=None): if message is None: - message = 'no such option: %s' % option_name + message = "no such option: {}".format(option_name) UsageError.__init__(self, message, ctx) self.option_name = option_name self.possibilities = possibilities @@ -172,11 +187,11 @@ def format_message(self): bits = [self.message] if self.possibilities: if len(self.possibilities) == 1: - bits.append('Did you mean %s?' % self.possibilities[0]) + bits.append("Did you mean {}?".format(self.possibilities[0])) else: possibilities = sorted(self.possibilities) - bits.append('(Possible options: %s)' % ', '.join(possibilities)) - return ' '.join(bits) + bits.append("(Possible options: {})".format(", ".join(possibilities))) + return " ".join(bits) class BadOptionUsage(UsageError): @@ -212,13 +227,13 @@ class FileError(ClickException): def __init__(self, filename, hint=None): ui_filename = filename_to_ui(filename) if hint is None: - hint = 'unknown error' + hint = "unknown error" ClickException.__init__(self, hint) self.ui_filename = ui_filename self.filename = filename def format_message(self): - return 'Could not open file %s: %s' % (self.ui_filename, self.message) + return "Could not open file {}: {}".format(self.ui_filename, self.message) class Abort(RuntimeError): @@ -231,5 +246,8 @@ class Exit(RuntimeError): :param code: the status code to exit with. """ + + __slots__ = ("exit_code",) + def __init__(self, code=0): self.exit_code = code diff --git a/pipenv/vendor/click/formatting.py b/pipenv/vendor/click/formatting.py index a3d6a4d389..319c7f6163 100644 --- a/pipenv/vendor/click/formatting.py +++ b/pipenv/vendor/click/formatting.py @@ -1,8 +1,8 @@ from contextlib import contextmanager -from .termui import get_terminal_size -from .parser import split_opt -from ._compat import term_len +from ._compat import term_len +from .parser import split_opt +from .termui import get_terminal_size # Can force a width. This is used by the test system FORCED_WIDTH = None @@ -19,11 +19,12 @@ def measure_table(rows): def iter_rows(rows, col_count): for row in rows: row = tuple(row) - yield row + ('',) * (col_count - len(row)) + yield row + ("",) * (col_count - len(row)) -def wrap_text(text, width=78, initial_indent='', subsequent_indent='', - preserve_paragraphs=False): +def wrap_text( + text, width=78, initial_indent="", subsequent_indent="", preserve_paragraphs=False +): """A helper function that intelligently wraps text. By default, it assumes that it operates on a single paragraph of text but if the `preserve_paragraphs` parameter is provided it will intelligently @@ -43,10 +44,14 @@ def wrap_text(text, width=78, initial_indent='', subsequent_indent='', intelligently handle paragraphs. """ from ._textwrap import TextWrapper + text = text.expandtabs() - wrapper = TextWrapper(width, initial_indent=initial_indent, - subsequent_indent=subsequent_indent, - replace_whitespace=False) + wrapper = TextWrapper( + width, + initial_indent=initial_indent, + subsequent_indent=subsequent_indent, + replace_whitespace=False, + ) if not preserve_paragraphs: return wrapper.fill(text) @@ -57,10 +62,10 @@ def wrap_text(text, width=78, initial_indent='', subsequent_indent='', def _flush_par(): if not buf: return - if buf[0].strip() == '\b': - p.append((indent or 0, True, '\n'.join(buf[1:]))) + if buf[0].strip() == "\b": + p.append((indent or 0, True, "\n".join(buf[1:]))) else: - p.append((indent or 0, False, ' '.join(buf))) + p.append((indent or 0, False, " ".join(buf))) del buf[:] for line in text.splitlines(): @@ -77,13 +82,13 @@ def _flush_par(): rv = [] for indent, raw, text in p: - with wrapper.extra_indent(' ' * indent): + with wrapper.extra_indent(" " * indent): if raw: rv.append(wrapper.indent_only(text)) else: rv.append(wrapper.fill(text)) - return '\n\n'.join(rv) + return "\n\n".join(rv) class HelpFormatter(object): @@ -122,53 +127,65 @@ def dedent(self): """Decreases the indentation.""" self.current_indent -= self.indent_increment - def write_usage(self, prog, args='', prefix='Usage: '): + def write_usage(self, prog, args="", prefix="Usage: "): """Writes a usage line into the buffer. :param prog: the program name. :param args: whitespace separated list of arguments. :param prefix: the prefix for the first line. """ - usage_prefix = '%*s%s ' % (self.current_indent, prefix, prog) + usage_prefix = "{:>{w}}{} ".format(prefix, prog, w=self.current_indent) text_width = self.width - self.current_indent if text_width >= (term_len(usage_prefix) + 20): # The arguments will fit to the right of the prefix. - indent = ' ' * term_len(usage_prefix) - self.write(wrap_text(args, text_width, - initial_indent=usage_prefix, - subsequent_indent=indent)) + indent = " " * term_len(usage_prefix) + self.write( + wrap_text( + args, + text_width, + initial_indent=usage_prefix, + subsequent_indent=indent, + ) + ) else: # The prefix is too long, put the arguments on the next line. self.write(usage_prefix) - self.write('\n') - indent = ' ' * (max(self.current_indent, term_len(prefix)) + 4) - self.write(wrap_text(args, text_width, - initial_indent=indent, - subsequent_indent=indent)) + self.write("\n") + indent = " " * (max(self.current_indent, term_len(prefix)) + 4) + self.write( + wrap_text( + args, text_width, initial_indent=indent, subsequent_indent=indent + ) + ) - self.write('\n') + self.write("\n") def write_heading(self, heading): """Writes a heading into the buffer.""" - self.write('%*s%s:\n' % (self.current_indent, '', heading)) + self.write("{:>{w}}{}:\n".format("", heading, w=self.current_indent)) def write_paragraph(self): """Writes a paragraph into the buffer.""" if self.buffer: - self.write('\n') + self.write("\n") def write_text(self, text): """Writes re-indented text into the buffer. This rewraps and preserves paragraphs. """ text_width = max(self.width - self.current_indent, 11) - indent = ' ' * self.current_indent - self.write(wrap_text(text, text_width, - initial_indent=indent, - subsequent_indent=indent, - preserve_paragraphs=True)) - self.write('\n') + indent = " " * self.current_indent + self.write( + wrap_text( + text, + text_width, + initial_indent=indent, + subsequent_indent=indent, + preserve_paragraphs=True, + ) + ) + self.write("\n") def write_dl(self, rows, col_max=30, col_spacing=2): """Writes a definition list into the buffer. This is how options @@ -182,30 +199,40 @@ def write_dl(self, rows, col_max=30, col_spacing=2): rows = list(rows) widths = measure_table(rows) if len(widths) != 2: - raise TypeError('Expected two columns for definition list') + raise TypeError("Expected two columns for definition list") first_col = min(widths[0], col_max) + col_spacing for first, second in iter_rows(rows, len(widths)): - self.write('%*s%s' % (self.current_indent, '', first)) + self.write("{:>{w}}{}".format("", first, w=self.current_indent)) if not second: - self.write('\n') + self.write("\n") continue if term_len(first) <= first_col - col_spacing: - self.write(' ' * (first_col - term_len(first))) + self.write(" " * (first_col - term_len(first))) else: - self.write('\n') - self.write(' ' * (first_col + self.current_indent)) + self.write("\n") + self.write(" " * (first_col + self.current_indent)) text_width = max(self.width - first_col - 2, 10) - lines = iter(wrap_text(second, text_width).splitlines()) + wrapped_text = wrap_text(second, text_width, preserve_paragraphs=True) + lines = wrapped_text.splitlines() + if lines: - self.write(next(lines) + '\n') - for line in lines: - self.write('%*s%s\n' % ( - first_col + self.current_indent, '', line)) + self.write("{}\n".format(lines[0])) + + for line in lines[1:]: + self.write( + "{:>{w}}{}\n".format( + "", line, w=first_col + self.current_indent + ) + ) + + if len(lines) > 1: + # separate long help from next option + self.write("\n") else: - self.write('\n') + self.write("\n") @contextmanager def section(self, name): @@ -233,7 +260,7 @@ def indentation(self): def getvalue(self): """Returns the buffer contents.""" - return ''.join(self.buffer) + return "".join(self.buffer) def join_options(options): @@ -246,11 +273,11 @@ def join_options(options): any_prefix_is_slash = False for opt in options: prefix = split_opt(opt)[0] - if prefix == '/': + if prefix == "/": any_prefix_is_slash = True rv.append((len(prefix), opt)) rv.sort(key=lambda x: x[0]) - rv = ', '.join(x[1] for x in rv) + rv = ", ".join(x[1] for x in rv) return rv, any_prefix_is_slash diff --git a/pipenv/vendor/click/globals.py b/pipenv/vendor/click/globals.py index 843b594abe..1649f9a0bf 100644 --- a/pipenv/vendor/click/globals.py +++ b/pipenv/vendor/click/globals.py @@ -1,6 +1,5 @@ from threading import local - _local = local() @@ -15,20 +14,20 @@ def get_current_context(silent=False): .. versionadded:: 5.0 - :param silent: is set to `True` the return value is `None` if no context + :param silent: if set to `True` the return value is `None` if no context is available. The default behavior is to raise a :exc:`RuntimeError`. """ try: - return getattr(_local, 'stack')[-1] + return _local.stack[-1] except (AttributeError, IndexError): if not silent: - raise RuntimeError('There is no active click context.') + raise RuntimeError("There is no active click context.") def push_context(ctx): """Pushes a new context to the current stack.""" - _local.__dict__.setdefault('stack', []).append(ctx) + _local.__dict__.setdefault("stack", []).append(ctx) def pop_context(): diff --git a/pipenv/vendor/click/parser.py b/pipenv/vendor/click/parser.py index 1c3ae9c8ef..f43ebfe9fc 100644 --- a/pipenv/vendor/click/parser.py +++ b/pipenv/vendor/click/parser.py @@ -1,8 +1,5 @@ # -*- coding: utf-8 -*- """ -click.parser -~~~~~~~~~~~~ - This module started out as largely a copy paste from the stdlib's optparse module with the features removed that we do not need from optparse because we implement them in Click on a higher level (for @@ -14,12 +11,20 @@ is that there are differences in 2.x and 3.x about the error messages generated and optparse in the stdlib uses gettext for no good reason and might cause us issues. -""" +Click uses parts of optparse written by Gregory P. Ward and maintained +by the Python Software Foundation. This is limited to code in parser.py. + +Copyright 2001-2006 Gregory P. Ward. All rights reserved. +Copyright 2002-2006 Python Software Foundation. All rights reserved. +""" import re from collections import deque -from .exceptions import UsageError, NoSuchOption, BadOptionUsage, \ - BadArgumentUsage + +from .exceptions import BadArgumentUsage +from .exceptions import BadOptionUsage +from .exceptions import NoSuchOption +from .exceptions import UsageError def _unpack_args(args, nargs_spec): @@ -59,7 +64,7 @@ def _fetch(c): rv.append(tuple(x)) elif nargs < 0: if spos is not None: - raise TypeError('Cannot have two nargs < 0') + raise TypeError("Cannot have two nargs < 0") spos = len(rv) rv.append(None) @@ -68,21 +73,21 @@ def _fetch(c): if spos is not None: rv[spos] = tuple(args) args = [] - rv[spos + 1:] = reversed(rv[spos + 1:]) + rv[spos + 1 :] = reversed(rv[spos + 1 :]) return tuple(rv), list(args) def _error_opt_args(nargs, opt): if nargs == 1: - raise BadOptionUsage(opt, '%s option requires an argument' % opt) - raise BadOptionUsage(opt, '%s option requires %d arguments' % (opt, nargs)) + raise BadOptionUsage(opt, "{} option requires an argument".format(opt)) + raise BadOptionUsage(opt, "{} option requires {} arguments".format(opt, nargs)) def split_opt(opt): first = opt[:1] if first.isalnum(): - return '', opt + return "", opt if opt[1:2] == first: return opt[:2], opt[2:] return first, opt[1:] @@ -98,13 +103,14 @@ def normalize_opt(opt, ctx): def split_arg_string(string): """Given an argument string this attempts to split it into small parts.""" rv = [] - for match in re.finditer(r"('([^'\\]*(?:\\.[^'\\]*)*)'" - r'|"([^"\\]*(?:\\.[^"\\]*)*)"' - r'|\S+)\s*', string, re.S): + for match in re.finditer( + r"('([^'\\]*(?:\\.[^'\\]*)*)'|\"([^\"\\]*(?:\\.[^\"\\]*)*)\"|\S+)\s*", + string, + re.S, + ): arg = match.group().strip() - if arg[:1] == arg[-1:] and arg[:1] in '"\'': - arg = arg[1:-1].encode('ascii', 'backslashreplace') \ - .decode('unicode-escape') + if arg[:1] == arg[-1:] and arg[:1] in "\"'": + arg = arg[1:-1].encode("ascii", "backslashreplace").decode("unicode-escape") try: arg = type(string)(arg) except UnicodeError: @@ -114,7 +120,6 @@ def split_arg_string(string): class Option(object): - def __init__(self, opts, dest, action=None, nargs=1, const=None, obj=None): self._short_opts = [] self._long_opts = [] @@ -123,8 +128,7 @@ def __init__(self, opts, dest, action=None, nargs=1, const=None, obj=None): for opt in opts: prefix, value = split_opt(opt) if not prefix: - raise ValueError('Invalid start character for option (%s)' - % opt) + raise ValueError("Invalid start character for option ({})".format(opt)) self.prefixes.add(prefix[0]) if len(prefix) == 1 and len(value) == 1: self._short_opts.append(opt) @@ -133,7 +137,7 @@ def __init__(self, opts, dest, action=None, nargs=1, const=None, obj=None): self.prefixes.add(prefix) if action is None: - action = 'store' + action = "store" self.dest = dest self.action = action @@ -143,26 +147,25 @@ def __init__(self, opts, dest, action=None, nargs=1, const=None, obj=None): @property def takes_value(self): - return self.action in ('store', 'append') + return self.action in ("store", "append") def process(self, value, state): - if self.action == 'store': + if self.action == "store": state.opts[self.dest] = value - elif self.action == 'store_const': + elif self.action == "store_const": state.opts[self.dest] = self.const - elif self.action == 'append': + elif self.action == "append": state.opts.setdefault(self.dest, []).append(value) - elif self.action == 'append_const': + elif self.action == "append_const": state.opts.setdefault(self.dest, []).append(self.const) - elif self.action == 'count': + elif self.action == "count": state.opts[self.dest] = state.opts.get(self.dest, 0) + 1 else: - raise ValueError('unknown action %r' % self.action) + raise ValueError("unknown action '{}'".format(self.action)) state.order.append(self.obj) class Argument(object): - def __init__(self, dest, nargs=1, obj=None): self.dest = dest self.nargs = nargs @@ -174,14 +177,14 @@ def process(self, value, state): if holes == len(value): value = None elif holes != 0: - raise BadArgumentUsage('argument %s takes %d values' - % (self.dest, self.nargs)) + raise BadArgumentUsage( + "argument {} takes {} values".format(self.dest, self.nargs) + ) state.opts[self.dest] = value state.order.append(self.obj) class ParsingState(object): - def __init__(self, rargs): self.opts = {} self.largs = [] @@ -222,11 +225,10 @@ def __init__(self, ctx=None): self.ignore_unknown_options = ctx.ignore_unknown_options self._short_opt = {} self._long_opt = {} - self._opt_prefixes = set(['-', '--']) + self._opt_prefixes = {"-", "--"} self._args = [] - def add_option(self, opts, dest, action=None, nargs=1, const=None, - obj=None): + def add_option(self, opts, dest, action=None, nargs=1, const=None, obj=None): """Adds a new option named `dest` to the parser. The destination is not inferred (unlike with optparse) and needs to be explicitly provided. Action can be any of ``store``, ``store_const``, @@ -238,8 +240,7 @@ def add_option(self, opts, dest, action=None, nargs=1, const=None, if obj is None: obj = dest opts = [normalize_opt(opt, self.ctx) for opt in opts] - option = Option(opts, dest, action=action, nargs=nargs, - const=const, obj=obj) + option = Option(opts, dest, action=action, nargs=nargs, const=const, obj=obj) self._opt_prefixes.update(option.prefixes) for opt in option._short_opts: self._short_opt[opt] = option @@ -273,8 +274,9 @@ def parse_args(self, args): return state.opts, state.largs, state.order def _process_args_for_args(self, state): - pargs, args = _unpack_args(state.largs + state.rargs, - [x.nargs for x in self._args]) + pargs, args = _unpack_args( + state.largs + state.rargs, [x.nargs for x in self._args] + ) for idx, arg in enumerate(self._args): arg.process(pargs[idx], state) @@ -288,7 +290,7 @@ def _process_args_for_options(self, state): arglen = len(arg) # Double dashes always handled explicitly regardless of what # prefixes are valid. - if arg == '--': + if arg == "--": return elif arg[:1] in self._opt_prefixes and arglen > 1: self._process_opts(arg, state) @@ -320,8 +322,7 @@ def _process_args_for_options(self, state): def _match_long_opt(self, opt, explicit_value, state): if opt not in self._long_opt: - possibilities = [word for word in self._long_opt - if word.startswith(opt)] + possibilities = [word for word in self._long_opt if word.startswith(opt)] raise NoSuchOption(opt, possibilities=possibilities, ctx=self.ctx) option = self._long_opt[opt] @@ -343,7 +344,7 @@ def _match_long_opt(self, opt, explicit_value, state): del state.rargs[:nargs] elif explicit_value is not None: - raise BadOptionUsage(opt, '%s option does not take a value' % opt) + raise BadOptionUsage(opt, "{} option does not take a value".format(opt)) else: value = None @@ -395,15 +396,15 @@ def _match_short_opt(self, arg, state): # to the state as new larg. This way there is basic combinatorics # that can be achieved while still ignoring unknown arguments. if self.ignore_unknown_options and unknown_options: - state.largs.append(prefix + ''.join(unknown_options)) + state.largs.append("{}{}".format(prefix, "".join(unknown_options))) def _process_opts(self, arg, state): explicit_value = None # Long option handling happens in two parts. The first part is # supporting explicitly attached values. In any case, we will try # to long match the option first. - if '=' in arg: - long_opt, explicit_value = arg.split('=', 1) + if "=" in arg: + long_opt, explicit_value = arg.split("=", 1) else: long_opt = arg norm_long_opt = normalize_opt(long_opt, self.ctx) diff --git a/pipenv/vendor/click/termui.py b/pipenv/vendor/click/termui.py index bf9a3aa163..02ef9e9f04 100644 --- a/pipenv/vendor/click/termui.py +++ b/pipenv/vendor/click/termui.py @@ -1,60 +1,89 @@ -import os -import sys -import struct import inspect +import io import itertools +import os +import struct +import sys -from ._compat import raw_input, text_type, string_types, \ - isatty, strip_ansi, get_winterm_size, DEFAULT_COLUMNS, WIN -from .utils import echo -from .exceptions import Abort, UsageError -from .types import convert_type, Choice, Path +from ._compat import DEFAULT_COLUMNS +from ._compat import get_winterm_size +from ._compat import isatty +from ._compat import raw_input +from ._compat import string_types +from ._compat import strip_ansi +from ._compat import text_type +from ._compat import WIN +from .exceptions import Abort +from .exceptions import UsageError from .globals import resolve_color_default - +from .types import Choice +from .types import convert_type +from .types import Path +from .utils import echo +from .utils import LazyFile # The prompt functions to use. The doc tools currently override these # functions to customize how they work. visible_prompt_func = raw_input _ansi_colors = { - 'black': 30, - 'red': 31, - 'green': 32, - 'yellow': 33, - 'blue': 34, - 'magenta': 35, - 'cyan': 36, - 'white': 37, - 'reset': 39, - 'bright_black': 90, - 'bright_red': 91, - 'bright_green': 92, - 'bright_yellow': 93, - 'bright_blue': 94, - 'bright_magenta': 95, - 'bright_cyan': 96, - 'bright_white': 97, + "black": 30, + "red": 31, + "green": 32, + "yellow": 33, + "blue": 34, + "magenta": 35, + "cyan": 36, + "white": 37, + "reset": 39, + "bright_black": 90, + "bright_red": 91, + "bright_green": 92, + "bright_yellow": 93, + "bright_blue": 94, + "bright_magenta": 95, + "bright_cyan": 96, + "bright_white": 97, } -_ansi_reset_all = '\033[0m' +_ansi_reset_all = "\033[0m" def hidden_prompt_func(prompt): import getpass + return getpass.getpass(prompt) -def _build_prompt(text, suffix, show_default=False, default=None, show_choices=True, type=None): +def _build_prompt( + text, suffix, show_default=False, default=None, show_choices=True, type=None +): prompt = text if type is not None and show_choices and isinstance(type, Choice): - prompt += ' (' + ", ".join(map(str, type.choices)) + ')' + prompt += " ({})".format(", ".join(map(str, type.choices))) if default is not None and show_default: - prompt = '%s [%s]' % (prompt, default) + prompt = "{} [{}]".format(prompt, _format_default(default)) return prompt + suffix -def prompt(text, default=None, hide_input=False, confirmation_prompt=False, - type=None, value_proc=None, prompt_suffix=': ', show_default=True, - err=False, show_choices=True): +def _format_default(default): + if isinstance(default, (io.IOBase, LazyFile)) and hasattr(default, "name"): + return default.name + + return default + + +def prompt( + text, + default=None, + hide_input=False, + confirmation_prompt=False, + type=None, + value_proc=None, + prompt_suffix=": ", + show_default=True, + err=False, + show_choices=True, +): """Prompts a user for input. This is a convenience function that can be used to prompt a user for input later. @@ -92,12 +121,12 @@ def prompt(text, default=None, hide_input=False, confirmation_prompt=False, result = None def prompt_func(text): - f = hide_input and hidden_prompt_func or visible_prompt_func + f = hidden_prompt_func if hide_input else visible_prompt_func try: # Write the prompt separately so that we get nice # coloring through colorama on Windows echo(text, nl=False, err=err) - return f('') + return f("") except (KeyboardInterrupt, EOFError): # getpass doesn't print a newline if the user aborts input with ^C. # Allegedly this behavior is inherited from getpass(3). @@ -109,7 +138,9 @@ def prompt_func(text): if value_proc is None: value_proc = convert_type(type, default) - prompt = _build_prompt(text, prompt_suffix, show_default, default, show_choices, type) + prompt = _build_prompt( + text, prompt_suffix, show_default, default, show_choices, type + ) while 1: while 1: @@ -125,21 +156,22 @@ def prompt_func(text): try: result = value_proc(value) except UsageError as e: - echo('Error: %s' % e.message, err=err) + echo("Error: {}".format(e.message), err=err) # noqa: B306 continue if not confirmation_prompt: return result while 1: - value2 = prompt_func('Repeat for confirmation: ') + value2 = prompt_func("Repeat for confirmation: ") if value2: break if value == value2: return result - echo('Error: the two entered values do not match', err=err) + echo("Error: the two entered values do not match", err=err) -def confirm(text, default=False, abort=False, prompt_suffix=': ', - show_default=True, err=False): +def confirm( + text, default=False, abort=False, prompt_suffix=": ", show_default=True, err=False +): """Prompts for confirmation (yes/no question). If the user aborts the input by sending a interrupt signal this @@ -157,24 +189,25 @@ def confirm(text, default=False, abort=False, prompt_suffix=': ', :param err: if set to true the file defaults to ``stderr`` instead of ``stdout``, the same as with echo. """ - prompt = _build_prompt(text, prompt_suffix, show_default, - default and 'Y/n' or 'y/N') + prompt = _build_prompt( + text, prompt_suffix, show_default, "Y/n" if default else "y/N" + ) while 1: try: # Write the prompt separately so that we get nice # coloring through colorama on Windows echo(prompt, nl=False, err=err) - value = visible_prompt_func('').lower().strip() + value = visible_prompt_func("").lower().strip() except (KeyboardInterrupt, EOFError): raise Abort() - if value in ('y', 'yes'): + if value in ("y", "yes"): rv = True - elif value in ('n', 'no'): + elif value in ("n", "no"): rv = False - elif value == '': + elif value == "": rv = default else: - echo('Error: invalid input', err=err) + echo("Error: invalid input", err=err) continue break if abort and not rv: @@ -189,7 +222,8 @@ def get_terminal_size(): # If shutil has get_terminal_size() (Python 3.3 and later) use that if sys.version_info >= (3, 3): import shutil - shutil_get_terminal_size = getattr(shutil, 'get_terminal_size', None) + + shutil_get_terminal_size = getattr(shutil, "get_terminal_size", None) if shutil_get_terminal_size: sz = shutil_get_terminal_size() return sz.columns, sz.lines @@ -207,8 +241,8 @@ def ioctl_gwinsz(fd): try: import fcntl import termios - cr = struct.unpack( - 'hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')) + + cr = struct.unpack("hh", fcntl.ioctl(fd, termios.TIOCGWINSZ, "1234")) except Exception: return return cr @@ -224,8 +258,7 @@ def ioctl_gwinsz(fd): except Exception: pass if not cr or not cr[0] or not cr[1]: - cr = (os.environ.get('LINES', 25), - os.environ.get('COLUMNS', DEFAULT_COLUMNS)) + cr = (os.environ.get("LINES", 25), os.environ.get("COLUMNS", DEFAULT_COLUMNS)) return int(cr[1]), int(cr[0]) @@ -251,18 +284,29 @@ def echo_via_pager(text_or_generator, color=None): i = iter(text_or_generator) # convert every element of i to a text type if necessary - text_generator = (el if isinstance(el, string_types) else text_type(el) - for el in i) + text_generator = (el if isinstance(el, string_types) else text_type(el) for el in i) from ._termui_impl import pager + return pager(itertools.chain(text_generator, "\n"), color) -def progressbar(iterable=None, length=None, label=None, show_eta=True, - show_percent=None, show_pos=False, - item_show_func=None, fill_char='#', empty_char='-', - bar_template='%(label)s [%(bar)s] %(info)s', - info_sep=' ', width=36, file=None, color=None): +def progressbar( + iterable=None, + length=None, + label=None, + show_eta=True, + show_percent=None, + show_pos=False, + item_show_func=None, + fill_char="#", + empty_char="-", + bar_template="%(label)s [%(bar)s] %(info)s", + info_sep=" ", + width=36, + file=None, + color=None, +): """This function creates an iterable context manager that can be used to iterate over something while showing a progress bar. It will either iterate over the `iterable` or `length` items (that are counted @@ -272,11 +316,17 @@ def progressbar(iterable=None, length=None, label=None, show_eta=True, will not be rendered if the file is not a terminal. The context manager creates the progress bar. When the context - manager is entered the progress bar is already displayed. With every + manager is entered the progress bar is already created. With every iteration over the progress bar, the iterable passed to the bar is advanced and the bar is updated. When the context manager exits, a newline is printed and the progress bar is finalized on screen. + Note: The progress bar is currently designed for use cases where the + total progress can be expected to take at least several seconds. + Because of this, the ProgressBar class object won't display + progress that is considered too fast, and progress where the time + between steps is less than a second. + No printing must happen or the progress bar will be unintentionally destroyed. @@ -342,13 +392,24 @@ def progressbar(iterable=None, length=None, label=None, show_eta=True, which is not the case by default. """ from ._termui_impl import ProgressBar + color = resolve_color_default(color) - return ProgressBar(iterable=iterable, length=length, show_eta=show_eta, - show_percent=show_percent, show_pos=show_pos, - item_show_func=item_show_func, fill_char=fill_char, - empty_char=empty_char, bar_template=bar_template, - info_sep=info_sep, file=file, label=label, - width=width, color=color) + return ProgressBar( + iterable=iterable, + length=length, + show_eta=show_eta, + show_percent=show_percent, + show_pos=show_pos, + item_show_func=item_show_func, + fill_char=fill_char, + empty_char=empty_char, + bar_template=bar_template, + info_sep=info_sep, + file=file, + label=label, + width=width, + color=color, + ) def clear(): @@ -364,13 +425,22 @@ def clear(): # clear the screen by shelling out. Otherwise we can use an escape # sequence. if WIN: - os.system('cls') + os.system("cls") else: - sys.stdout.write('\033[2J\033[1;1H') - - -def style(text, fg=None, bg=None, bold=None, dim=None, underline=None, - blink=None, reverse=None, reset=True): + sys.stdout.write("\033[2J\033[1;1H") + + +def style( + text, + fg=None, + bg=None, + bold=None, + dim=None, + underline=None, + blink=None, + reverse=None, + reset=True, +): """Styles a text with ANSI styles and returns the new string. By default the styling is self contained which means that at the end of the string a reset code is issued. This can be prevented by @@ -425,28 +495,28 @@ def style(text, fg=None, bg=None, bold=None, dim=None, underline=None, bits = [] if fg: try: - bits.append('\033[%dm' % (_ansi_colors[fg])) + bits.append("\033[{}m".format(_ansi_colors[fg])) except KeyError: - raise TypeError('Unknown color %r' % fg) + raise TypeError("Unknown color '{}'".format(fg)) if bg: try: - bits.append('\033[%dm' % (_ansi_colors[bg] + 10)) + bits.append("\033[{}m".format(_ansi_colors[bg] + 10)) except KeyError: - raise TypeError('Unknown color %r' % bg) + raise TypeError("Unknown color '{}'".format(bg)) if bold is not None: - bits.append('\033[%dm' % (1 if bold else 22)) + bits.append("\033[{}m".format(1 if bold else 22)) if dim is not None: - bits.append('\033[%dm' % (2 if dim else 22)) + bits.append("\033[{}m".format(2 if dim else 22)) if underline is not None: - bits.append('\033[%dm' % (4 if underline else 24)) + bits.append("\033[{}m".format(4 if underline else 24)) if blink is not None: - bits.append('\033[%dm' % (5 if blink else 25)) + bits.append("\033[{}m".format(5 if blink else 25)) if reverse is not None: - bits.append('\033[%dm' % (7 if reverse else 27)) + bits.append("\033[{}m".format(7 if reverse else 27)) bits.append(text) if reset: bits.append(_ansi_reset_all) - return ''.join(bits) + return "".join(bits) def unstyle(text): @@ -478,8 +548,9 @@ def secho(message=None, file=None, nl=True, err=False, color=None, **styles): return echo(message, file=file, nl=nl, err=err, color=color) -def edit(text=None, editor=None, env=None, require_save=True, - extension='.txt', filename=None): +def edit( + text=None, editor=None, env=None, require_save=True, extension=".txt", filename=None +): r"""Edits the given text in the defined editor. If an editor is given (should be the full path to the executable but the regular operating system search path is used for finding the executable) it overrides @@ -508,8 +579,10 @@ def edit(text=None, editor=None, env=None, require_save=True, file as an indirection in that case. """ from ._termui_impl import Editor - editor = Editor(editor=editor, env=env, require_save=require_save, - extension=extension) + + editor = Editor( + editor=editor, env=env, require_save=require_save, extension=extension + ) if filename is None: return editor.edit(text) editor.edit_file(filename) @@ -538,6 +611,7 @@ def launch(url, wait=False, locate=False): the filesystem. """ from ._termui_impl import open_url + return open_url(url, wait=wait, locate=locate) @@ -574,10 +648,11 @@ def getchar(echo=False): def raw_terminal(): from ._termui_impl import raw_terminal as f + return f() -def pause(info='Press any key to continue ...', err=False): +def pause(info="Press any key to continue ...", err=False): """This command stops execution and waits for the user to press any key to continue. This is similar to the Windows batch "pause" command. If the program is not run through a terminal, this command diff --git a/pipenv/vendor/click/testing.py b/pipenv/vendor/click/testing.py index 1b2924e0b1..a3dba3b301 100644 --- a/pipenv/vendor/click/testing.py +++ b/pipenv/vendor/click/testing.py @@ -1,18 +1,16 @@ +import contextlib import os -import sys +import shlex import shutil +import sys import tempfile -import contextlib -import shlex - -from ._compat import iteritems, PY2, string_types - -# If someone wants to vendor click, we want to ensure the -# correct package is discovered. Ideally we could use a -# relative import here but unfortunately Python does not -# support that. -clickpkg = sys.modules[__name__.rsplit('.', 1)[0]] +from . import formatting +from . import termui +from . import utils +from ._compat import iteritems +from ._compat import PY2 +from ._compat import string_types if PY2: @@ -23,7 +21,6 @@ class EchoingStdin(object): - def __init__(self, input, output): self._input = input self._output = output @@ -53,16 +50,16 @@ def __repr__(self): def make_input_stream(input, charset): # Is already an input stream. - if hasattr(input, 'read'): + if hasattr(input, "read"): if PY2: return input rv = _find_binary_reader(input) if rv is not None: return rv - raise TypeError('Could not find binary reader for input stream.') + raise TypeError("Could not find binary reader for input stream.") if input is None: - input = b'' + input = b"" elif not isinstance(input, bytes): input = input.encode(charset) if PY2: @@ -73,13 +70,14 @@ def make_input_stream(input, charset): class Result(object): """Holds the captured result of an invoked CLI script.""" - def __init__(self, runner, stdout_bytes, stderr_bytes, exit_code, - exception, exc_info=None): + def __init__( + self, runner, stdout_bytes, stderr_bytes, exit_code, exception, exc_info=None + ): #: The runner that created the result self.runner = runner #: The standard output as bytes. self.stdout_bytes = stdout_bytes - #: The standard error as bytes, or False(y) if not available + #: The standard error as bytes, or None if not available self.stderr_bytes = stderr_bytes #: The exit code as integer. self.exit_code = exit_code @@ -96,22 +94,22 @@ def output(self): @property def stdout(self): """The standard output as unicode string.""" - return self.stdout_bytes.decode(self.runner.charset, 'replace') \ - .replace('\r\n', '\n') + return self.stdout_bytes.decode(self.runner.charset, "replace").replace( + "\r\n", "\n" + ) @property def stderr(self): """The standard error as unicode string.""" - if not self.stderr_bytes: + if self.stderr_bytes is None: raise ValueError("stderr not separately captured") - return self.stderr_bytes.decode(self.runner.charset, 'replace') \ - .replace('\r\n', '\n') - + return self.stderr_bytes.decode(self.runner.charset, "replace").replace( + "\r\n", "\n" + ) def __repr__(self): - return '<%s %s>' % ( - type(self).__name__, - self.exception and repr(self.exception) or 'okay', + return "<{} {}>".format( + type(self).__name__, repr(self.exception) if self.exception else "okay" ) @@ -136,10 +134,9 @@ class CliRunner(object): independently """ - def __init__(self, charset=None, env=None, echo_stdin=False, - mix_stderr=True): + def __init__(self, charset=None, env=None, echo_stdin=False, mix_stderr=True): if charset is None: - charset = 'utf-8' + charset = "utf-8" self.charset = charset self.env = env or {} self.echo_stdin = echo_stdin @@ -150,7 +147,7 @@ def get_default_prog_name(self, cli): for it. The default is the `name` attribute or ``"root"`` if not set. """ - return cli.name or 'root' + return cli.name or "root" def make_env(self, overrides=None): """Returns the environment overrides for invoking a script.""" @@ -182,8 +179,8 @@ def isolation(self, input=None, env=None, color=False): old_stdin = sys.stdin old_stdout = sys.stdout old_stderr = sys.stderr - old_forced_width = clickpkg.formatting.FORCED_WIDTH - clickpkg.formatting.FORCED_WIDTH = 80 + old_forced_width = formatting.FORCED_WIDTH + formatting.FORCED_WIDTH = 80 env = self.make_env(env) @@ -200,12 +197,10 @@ def isolation(self, input=None, env=None, color=False): if self.echo_stdin: input = EchoingStdin(input, bytes_output) input = io.TextIOWrapper(input, encoding=self.charset) - sys.stdout = io.TextIOWrapper( - bytes_output, encoding=self.charset) + sys.stdout = io.TextIOWrapper(bytes_output, encoding=self.charset) if not self.mix_stderr: bytes_error = io.BytesIO() - sys.stderr = io.TextIOWrapper( - bytes_error, encoding=self.charset) + sys.stderr = io.TextIOWrapper(bytes_error, encoding=self.charset) if self.mix_stderr: sys.stderr = sys.stdout @@ -213,16 +208,16 @@ def isolation(self, input=None, env=None, color=False): sys.stdin = input def visible_input(prompt=None): - sys.stdout.write(prompt or '') - val = input.readline().rstrip('\r\n') - sys.stdout.write(val + '\n') + sys.stdout.write(prompt or "") + val = input.readline().rstrip("\r\n") + sys.stdout.write("{}\n".format(val)) sys.stdout.flush() return val def hidden_input(prompt=None): - sys.stdout.write((prompt or '') + '\n') + sys.stdout.write("{}\n".format(prompt or "")) sys.stdout.flush() - return input.readline().rstrip('\r\n') + return input.readline().rstrip("\r\n") def _getchar(echo): char = sys.stdin.read(1) @@ -238,14 +233,14 @@ def should_strip_ansi(stream=None, color=None): return not default_color return not color - old_visible_prompt_func = clickpkg.termui.visible_prompt_func - old_hidden_prompt_func = clickpkg.termui.hidden_prompt_func - old__getchar_func = clickpkg.termui._getchar - old_should_strip_ansi = clickpkg.utils.should_strip_ansi - clickpkg.termui.visible_prompt_func = visible_input - clickpkg.termui.hidden_prompt_func = hidden_input - clickpkg.termui._getchar = _getchar - clickpkg.utils.should_strip_ansi = should_strip_ansi + old_visible_prompt_func = termui.visible_prompt_func + old_hidden_prompt_func = termui.hidden_prompt_func + old__getchar_func = termui._getchar + old_should_strip_ansi = utils.should_strip_ansi + termui.visible_prompt_func = visible_input + termui.hidden_prompt_func = hidden_input + termui._getchar = _getchar + utils.should_strip_ansi = should_strip_ansi old_env = {} try: @@ -271,14 +266,22 @@ def should_strip_ansi(stream=None, color=None): sys.stdout = old_stdout sys.stderr = old_stderr sys.stdin = old_stdin - clickpkg.termui.visible_prompt_func = old_visible_prompt_func - clickpkg.termui.hidden_prompt_func = old_hidden_prompt_func - clickpkg.termui._getchar = old__getchar_func - clickpkg.utils.should_strip_ansi = old_should_strip_ansi - clickpkg.formatting.FORCED_WIDTH = old_forced_width - - def invoke(self, cli, args=None, input=None, env=None, - catch_exceptions=True, color=False, mix_stderr=False, **extra): + termui.visible_prompt_func = old_visible_prompt_func + termui.hidden_prompt_func = old_hidden_prompt_func + termui._getchar = old__getchar_func + utils.should_strip_ansi = old_should_strip_ansi + formatting.FORCED_WIDTH = old_forced_width + + def invoke( + self, + cli, + args=None, + input=None, + env=None, + catch_exceptions=True, + color=False, + **extra + ): """Invokes a command in an isolated environment. The arguments are forwarded directly to the command line script, the `extra` keyword arguments are passed to the :meth:`~clickpkg.Command.main` function of @@ -335,7 +338,7 @@ def invoke(self, cli, args=None, input=None, env=None, if not isinstance(exit_code, int): sys.stdout.write(str(exit_code)) - sys.stdout.write('\n') + sys.stdout.write("\n") exit_code = 1 except Exception as e: @@ -347,14 +350,19 @@ def invoke(self, cli, args=None, input=None, env=None, finally: sys.stdout.flush() stdout = outstreams[0].getvalue() - stderr = outstreams[1] and outstreams[1].getvalue() - - return Result(runner=self, - stdout_bytes=stdout, - stderr_bytes=stderr, - exit_code=exit_code, - exception=exception, - exc_info=exc_info) + if self.mix_stderr: + stderr = None + else: + stderr = outstreams[1].getvalue() + + return Result( + runner=self, + stdout_bytes=stdout, + stderr_bytes=stderr, + exit_code=exit_code, + exception=exception, + exc_info=exc_info, + ) @contextlib.contextmanager def isolated_filesystem(self): @@ -370,5 +378,5 @@ def isolated_filesystem(self): os.chdir(cwd) try: shutil.rmtree(t) - except (OSError, IOError): + except (OSError, IOError): # noqa: B014 pass diff --git a/pipenv/vendor/click/types.py b/pipenv/vendor/click/types.py index 1f88032f54..505c39f850 100644 --- a/pipenv/vendor/click/types.py +++ b/pipenv/vendor/click/types.py @@ -2,10 +2,16 @@ import stat from datetime import datetime -from ._compat import open_stream, text_type, filename_to_ui, \ - get_filesystem_encoding, get_streerror, _get_argv_encoding, PY2 +from ._compat import _get_argv_encoding +from ._compat import filename_to_ui +from ._compat import get_filesystem_encoding +from ._compat import get_streerror +from ._compat import open_stream +from ._compat import PY2 +from ._compat import text_type from .exceptions import BadParameter -from .utils import safecall, LazyFile +from .utils import LazyFile +from .utils import safecall class ParamType(object): @@ -21,6 +27,7 @@ class ParamType(object): This can be the case when the object is used with prompt inputs. """ + is_composite = False #: the descriptive name of this type @@ -62,7 +69,7 @@ def split_envvar_value(self, rv): then leading and trailing whitespace is ignored. Otherwise, leading and trailing splitters usually lead to empty items being included. """ - return (rv or '').split(self.envvar_list_splitter) + return (rv or "").split(self.envvar_list_splitter) def fail(self, message, param=None, ctx=None): """Helper method to fail with an invalid value message.""" @@ -78,7 +85,6 @@ def arity(self): class FuncParamType(ParamType): - def __init__(self, func): self.name = func.__name__ self.func = func @@ -90,22 +96,22 @@ def convert(self, value, param, ctx): try: value = text_type(value) except UnicodeError: - value = str(value).decode('utf-8', 'replace') + value = str(value).decode("utf-8", "replace") self.fail(value, param, ctx) class UnprocessedParamType(ParamType): - name = 'text' + name = "text" def convert(self, value, param, ctx): return value def __repr__(self): - return 'UNPROCESSED' + return "UNPROCESSED" class StringParamType(ParamType): - name = 'text' + name = "text" def convert(self, value, param, ctx): if isinstance(value, bytes): @@ -118,12 +124,14 @@ def convert(self, value, param, ctx): try: value = value.decode(fs_enc) except UnicodeError: - value = value.decode('utf-8', 'replace') + value = value.decode("utf-8", "replace") + else: + value = value.decode("utf-8", "replace") return value return value def __repr__(self): - return 'STRING' + return "STRING" class Choice(ParamType): @@ -133,54 +141,68 @@ class Choice(ParamType): You should only pass a list or tuple of choices. Other iterables (like generators) may lead to surprising results. + The resulting value will always be one of the originally passed choices + regardless of ``case_sensitive`` or any ``ctx.token_normalize_func`` + being specified. + See :ref:`choice-opts` for an example. :param case_sensitive: Set to false to make choices case insensitive. Defaults to true. """ - name = 'choice' + name = "choice" def __init__(self, choices, case_sensitive=True): self.choices = choices self.case_sensitive = case_sensitive def get_metavar(self, param): - return '[%s]' % '|'.join(self.choices) + return "[{}]".format("|".join(self.choices)) def get_missing_message(self, param): - return 'Choose from:\n\t%s.' % ',\n\t'.join(self.choices) + return "Choose from:\n\t{}.".format(",\n\t".join(self.choices)) def convert(self, value, param, ctx): - # Exact match - if value in self.choices: - return value - # Match through normalization and case sensitivity # first do token_normalize_func, then lowercase # preserve original `value` to produce an accurate message in # `self.fail` normed_value = value - normed_choices = self.choices + normed_choices = {choice: choice for choice in self.choices} - if ctx is not None and \ - ctx.token_normalize_func is not None: + if ctx is not None and ctx.token_normalize_func is not None: normed_value = ctx.token_normalize_func(value) - normed_choices = [ctx.token_normalize_func(choice) for choice in - self.choices] + normed_choices = { + ctx.token_normalize_func(normed_choice): original + for normed_choice, original in normed_choices.items() + } if not self.case_sensitive: - normed_value = normed_value.lower() - normed_choices = [choice.lower() for choice in normed_choices] + if PY2: + lower = str.lower + else: + lower = str.casefold + + normed_value = lower(normed_value) + normed_choices = { + lower(normed_choice): original + for normed_choice, original in normed_choices.items() + } if normed_value in normed_choices: - return normed_value + return normed_choices[normed_value] - self.fail('invalid choice: %s. (choose from %s)' % - (value, ', '.join(self.choices)), param, ctx) + self.fail( + "invalid choice: {}. (choose from {})".format( + value, ", ".join(self.choices) + ), + param, + ctx, + ) def __repr__(self): - return 'Choice(%r)' % list(self.choices) + return "Choice('{}')".format(list(self.choices)) class DateTime(ParamType): @@ -203,17 +225,14 @@ class DateTime(ParamType): ``'%Y-%m-%d'``, ``'%Y-%m-%dT%H:%M:%S'``, ``'%Y-%m-%d %H:%M:%S'``. """ - name = 'datetime' + + name = "datetime" def __init__(self, formats=None): - self.formats = formats or [ - '%Y-%m-%d', - '%Y-%m-%dT%H:%M:%S', - '%Y-%m-%d %H:%M:%S' - ] + self.formats = formats or ["%Y-%m-%d", "%Y-%m-%dT%H:%M:%S", "%Y-%m-%d %H:%M:%S"] def get_metavar(self, param): - return '[{}]'.format('|'.join(self.formats)) + return "[{}]".format("|".join(self.formats)) def _try_to_convert_date(self, value, format): try: @@ -229,24 +248,26 @@ def convert(self, value, param, ctx): return dtime self.fail( - 'invalid datetime format: {}. (choose from {})'.format( - value, ', '.join(self.formats))) + "invalid datetime format: {}. (choose from {})".format( + value, ", ".join(self.formats) + ) + ) def __repr__(self): - return 'DateTime' + return "DateTime" class IntParamType(ParamType): - name = 'integer' + name = "integer" def convert(self, value, param, ctx): try: return int(value) - except (ValueError, UnicodeError): - self.fail('%s is not a valid integer' % value, param, ctx) + except ValueError: + self.fail("{} is not a valid integer".format(value), param, ctx) def __repr__(self): - return 'INT' + return "INT" class IntRange(IntParamType): @@ -257,7 +278,8 @@ class IntRange(IntParamType): See :ref:`ranges` for an example. """ - name = 'integer range' + + name = "integer range" def __init__(self, min=None, max=None, clamp=False): self.min = min @@ -271,35 +293,55 @@ def convert(self, value, param, ctx): return self.min if self.max is not None and rv > self.max: return self.max - if self.min is not None and rv < self.min or \ - self.max is not None and rv > self.max: + if ( + self.min is not None + and rv < self.min + or self.max is not None + and rv > self.max + ): if self.min is None: - self.fail('%s is bigger than the maximum valid value ' - '%s.' % (rv, self.max), param, ctx) + self.fail( + "{} is bigger than the maximum valid value {}.".format( + rv, self.max + ), + param, + ctx, + ) elif self.max is None: - self.fail('%s is smaller than the minimum valid value ' - '%s.' % (rv, self.min), param, ctx) + self.fail( + "{} is smaller than the minimum valid value {}.".format( + rv, self.min + ), + param, + ctx, + ) else: - self.fail('%s is not in the valid range of %s to %s.' - % (rv, self.min, self.max), param, ctx) + self.fail( + "{} is not in the valid range of {} to {}.".format( + rv, self.min, self.max + ), + param, + ctx, + ) return rv def __repr__(self): - return 'IntRange(%r, %r)' % (self.min, self.max) + return "IntRange({}, {})".format(self.min, self.max) class FloatParamType(ParamType): - name = 'float' + name = "float" def convert(self, value, param, ctx): try: return float(value) - except (UnicodeError, ValueError): - self.fail('%s is not a valid floating point value' % - value, param, ctx) + except ValueError: + self.fail( + "{} is not a valid floating point value".format(value), param, ctx + ) def __repr__(self): - return 'FLOAT' + return "FLOAT" class FloatRange(FloatParamType): @@ -310,7 +352,8 @@ class FloatRange(FloatParamType): See :ref:`ranges` for an example. """ - name = 'float range' + + name = "float range" def __init__(self, min=None, max=None, clamp=False): self.min = min @@ -324,54 +367,74 @@ def convert(self, value, param, ctx): return self.min if self.max is not None and rv > self.max: return self.max - if self.min is not None and rv < self.min or \ - self.max is not None and rv > self.max: + if ( + self.min is not None + and rv < self.min + or self.max is not None + and rv > self.max + ): if self.min is None: - self.fail('%s is bigger than the maximum valid value ' - '%s.' % (rv, self.max), param, ctx) + self.fail( + "{} is bigger than the maximum valid value {}.".format( + rv, self.max + ), + param, + ctx, + ) elif self.max is None: - self.fail('%s is smaller than the minimum valid value ' - '%s.' % (rv, self.min), param, ctx) + self.fail( + "{} is smaller than the minimum valid value {}.".format( + rv, self.min + ), + param, + ctx, + ) else: - self.fail('%s is not in the valid range of %s to %s.' - % (rv, self.min, self.max), param, ctx) + self.fail( + "{} is not in the valid range of {} to {}.".format( + rv, self.min, self.max + ), + param, + ctx, + ) return rv def __repr__(self): - return 'FloatRange(%r, %r)' % (self.min, self.max) + return "FloatRange({}, {})".format(self.min, self.max) class BoolParamType(ParamType): - name = 'boolean' + name = "boolean" def convert(self, value, param, ctx): if isinstance(value, bool): return bool(value) value = value.lower() - if value in ('true', 't', '1', 'yes', 'y'): + if value in ("true", "t", "1", "yes", "y"): return True - elif value in ('false', 'f', '0', 'no', 'n'): + elif value in ("false", "f", "0", "no", "n"): return False - self.fail('%s is not a valid boolean' % value, param, ctx) + self.fail("{} is not a valid boolean".format(value), param, ctx) def __repr__(self): - return 'BOOL' + return "BOOL" class UUIDParameterType(ParamType): - name = 'uuid' + name = "uuid" def convert(self, value, param, ctx): import uuid + try: if PY2 and isinstance(value, text_type): - value = value.encode('ascii') + value = value.encode("ascii") return uuid.UUID(value) - except (UnicodeError, ValueError): - self.fail('%s is not a valid UUID value' % value, param, ctx) + except ValueError: + self.fail("{} is not a valid UUID value".format(value), param, ctx) def __repr__(self): - return 'UUID' + return "UUID" class File(ParamType): @@ -400,11 +463,13 @@ class File(ParamType): See :ref:`file-args` for more information. """ - name = 'filename' + + name = "filename" envvar_list_splitter = os.path.pathsep - def __init__(self, mode='r', encoding=None, errors='strict', lazy=None, - atomic=False): + def __init__( + self, mode="r", encoding=None, errors="strict", lazy=None, atomic=False + ): self.mode = mode self.encoding = encoding self.errors = errors @@ -414,29 +479,30 @@ def __init__(self, mode='r', encoding=None, errors='strict', lazy=None, def resolve_lazy_flag(self, value): if self.lazy is not None: return self.lazy - if value == '-': + if value == "-": return False - elif 'w' in self.mode: + elif "w" in self.mode: return True return False def convert(self, value, param, ctx): try: - if hasattr(value, 'read') or hasattr(value, 'write'): + if hasattr(value, "read") or hasattr(value, "write"): return value lazy = self.resolve_lazy_flag(value) if lazy: - f = LazyFile(value, self.mode, self.encoding, self.errors, - atomic=self.atomic) + f = LazyFile( + value, self.mode, self.encoding, self.errors, atomic=self.atomic + ) if ctx is not None: ctx.call_on_close(f.close_intelligently) return f - f, should_close = open_stream(value, self.mode, - self.encoding, self.errors, - atomic=self.atomic) + f, should_close = open_stream( + value, self.mode, self.encoding, self.errors, atomic=self.atomic + ) # If a context is provided, we automatically close the file # at the end of the context execution (or flush out). If a # context does not exist, it's the caller's responsibility to @@ -448,11 +514,14 @@ def convert(self, value, param, ctx): else: ctx.call_on_close(safecall(f.flush)) return f - except (IOError, OSError) as e: - self.fail('Could not open file: %s: %s' % ( - filename_to_ui(value), - get_streerror(e), - ), param, ctx) + except (IOError, OSError) as e: # noqa: B014 + self.fail( + "Could not open file: {}: {}".format( + filename_to_ui(value), get_streerror(e) + ), + param, + ctx, + ) class Path(ParamType): @@ -485,11 +554,20 @@ class Path(ParamType): unicode depending on what makes most sense given the input data Click deals with. """ + envvar_list_splitter = os.path.pathsep - def __init__(self, exists=False, file_okay=True, dir_okay=True, - writable=False, readable=True, resolve_path=False, - allow_dash=False, path_type=None): + def __init__( + self, + exists=False, + file_okay=True, + dir_okay=True, + writable=False, + readable=True, + resolve_path=False, + allow_dash=False, + path_type=None, + ): self.exists = exists self.file_okay = file_okay self.dir_okay = dir_okay @@ -500,14 +578,14 @@ def __init__(self, exists=False, file_okay=True, dir_okay=True, self.type = path_type if self.file_okay and not self.dir_okay: - self.name = 'file' - self.path_type = 'File' + self.name = "file" + self.path_type = "File" elif self.dir_okay and not self.file_okay: - self.name = 'directory' - self.path_type = 'Directory' + self.name = "directory" + self.path_type = "Directory" else: - self.name = 'path' - self.path_type = 'Path' + self.name = "path" + self.path_type = "Path" def coerce_path_result(self, rv): if self.type is not None and not isinstance(rv, self.type): @@ -520,7 +598,7 @@ def coerce_path_result(self, rv): def convert(self, value, param, ctx): rv = value - is_dash = self.file_okay and self.allow_dash and rv in (b'-', '-') + is_dash = self.file_okay and self.allow_dash and rv in (b"-", "-") if not is_dash: if self.resolve_path: @@ -531,31 +609,44 @@ def convert(self, value, param, ctx): except OSError: if not self.exists: return self.coerce_path_result(rv) - self.fail('%s "%s" does not exist.' % ( - self.path_type, - filename_to_ui(value) - ), param, ctx) + self.fail( + "{} '{}' does not exist.".format( + self.path_type, filename_to_ui(value) + ), + param, + ctx, + ) if not self.file_okay and stat.S_ISREG(st.st_mode): - self.fail('%s "%s" is a file.' % ( - self.path_type, - filename_to_ui(value) - ), param, ctx) + self.fail( + "{} '{}' is a file.".format(self.path_type, filename_to_ui(value)), + param, + ctx, + ) if not self.dir_okay and stat.S_ISDIR(st.st_mode): - self.fail('%s "%s" is a directory.' % ( - self.path_type, - filename_to_ui(value) - ), param, ctx) + self.fail( + "{} '{}' is a directory.".format( + self.path_type, filename_to_ui(value) + ), + param, + ctx, + ) if self.writable and not os.access(value, os.W_OK): - self.fail('%s "%s" is not writable.' % ( - self.path_type, - filename_to_ui(value) - ), param, ctx) + self.fail( + "{} '{}' is not writable.".format( + self.path_type, filename_to_ui(value) + ), + param, + ctx, + ) if self.readable and not os.access(value, os.R_OK): - self.fail('%s "%s" is not readable.' % ( - self.path_type, - filename_to_ui(value) - ), param, ctx) + self.fail( + "{} '{}' is not readable.".format( + self.path_type, filename_to_ui(value) + ), + param, + ctx, + ) return self.coerce_path_result(rv) @@ -579,7 +670,7 @@ def __init__(self, types): @property def name(self): - return "<" + " ".join(ty.name for ty in self.types) + ">" + return "<{}>".format(" ".join(ty.name for ty in self.types)) @property def arity(self): @@ -587,14 +678,16 @@ def arity(self): def convert(self, value, param, ctx): if len(value) != len(self.types): - raise TypeError('It would appear that nargs is set to conflict ' - 'with the composite type arity.') + raise TypeError( + "It would appear that nargs is set to conflict with the" + " composite type arity." + ) return tuple(ty(x, param, ctx) for ty, x in zip(self.types, value)) def convert_type(ty, default=None): - """Converts a callable or python ty into the most appropriate param - ty. + """Converts a callable or python type into the most appropriate + param type. """ guessed_type = False if ty is None and default is not None: @@ -627,8 +720,9 @@ def convert_type(ty, default=None): if __debug__: try: if issubclass(ty, ParamType): - raise AssertionError('Attempted to use an uninstantiated ' - 'parameter type (%s).' % ty) + raise AssertionError( + "Attempted to use an uninstantiated parameter type ({}).".format(ty) + ) except TypeError: pass return FuncParamType(ty) diff --git a/pipenv/vendor/click/utils.py b/pipenv/vendor/click/utils.py index fc84369fc9..79265e732d 100644 --- a/pipenv/vendor/click/utils.py +++ b/pipenv/vendor/click/utils.py @@ -1,34 +1,47 @@ import os import sys +from ._compat import _default_text_stderr +from ._compat import _default_text_stdout +from ._compat import auto_wrap_for_ansi +from ._compat import binary_streams +from ._compat import filename_to_ui +from ._compat import get_filesystem_encoding +from ._compat import get_streerror +from ._compat import is_bytes +from ._compat import open_stream +from ._compat import PY2 +from ._compat import should_strip_ansi +from ._compat import string_types +from ._compat import strip_ansi +from ._compat import text_streams +from ._compat import text_type +from ._compat import WIN from .globals import resolve_color_default -from ._compat import text_type, open_stream, get_filesystem_encoding, \ - get_streerror, string_types, PY2, binary_streams, text_streams, \ - filename_to_ui, auto_wrap_for_ansi, strip_ansi, should_strip_ansi, \ - _default_text_stdout, _default_text_stderr, is_bytes, WIN - if not PY2: from ._compat import _find_binary_writer elif WIN: - from ._winconsole import _get_windows_argv, \ - _hash_py_argv, _initial_argv_hash - + from ._winconsole import _get_windows_argv + from ._winconsole import _hash_py_argv + from ._winconsole import _initial_argv_hash echo_native_types = string_types + (bytes, bytearray) def _posixify(name): - return '-'.join(name.split()).lower() + return "-".join(name.split()).lower() def safecall(func): """Wraps a function so that it swallows exceptions.""" + def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except Exception: pass + return wrapper @@ -38,7 +51,7 @@ def make_str(value): try: return value.decode(get_filesystem_encoding()) except UnicodeError: - return value.decode('utf-8', 'replace') + return value.decode("utf-8", "replace") return text_type(value) @@ -50,21 +63,21 @@ def make_default_short_help(help, max_length=45): done = False for word in words: - if word[-1:] == '.': + if word[-1:] == ".": done = True - new_length = result and 1 + len(word) or len(word) + new_length = 1 + len(word) if result else len(word) if total_length + new_length > max_length: - result.append('...') + result.append("...") done = True else: if result: - result.append(' ') + result.append(" ") result.append(word) if done: break total_length += new_length - return ''.join(result) + return "".join(result) class LazyFile(object): @@ -74,19 +87,19 @@ class LazyFile(object): files for writing. """ - def __init__(self, filename, mode='r', encoding=None, errors='strict', - atomic=False): + def __init__( + self, filename, mode="r", encoding=None, errors="strict", atomic=False + ): self.name = filename self.mode = mode self.encoding = encoding self.errors = errors self.atomic = atomic - if filename == '-': - self._f, self.should_close = open_stream(filename, mode, - encoding, errors) + if filename == "-": + self._f, self.should_close = open_stream(filename, mode, encoding, errors) else: - if 'r' in mode: + if "r" in mode: # Open and close the file in case we're opening it for # reading so that we can catch at least some errors in # some cases early. @@ -100,7 +113,7 @@ def __getattr__(self, name): def __repr__(self): if self._f is not None: return repr(self._f) - return '<unopened file %r %s>' % (self.name, self.mode) + return "<unopened file '{}' {}>".format(self.name, self.mode) def open(self): """Opens the file if it's not yet open. This call might fail with @@ -110,12 +123,12 @@ def open(self): if self._f is not None: return self._f try: - rv, self.should_close = open_stream(self.name, self.mode, - self.encoding, - self.errors, - atomic=self.atomic) - except (IOError, OSError) as e: + rv, self.should_close = open_stream( + self.name, self.mode, self.encoding, self.errors, atomic=self.atomic + ) + except (IOError, OSError) as e: # noqa: E402 from .exceptions import FileError + raise FileError(self.name, hint=get_streerror(e)) self._f = rv return rv @@ -144,7 +157,6 @@ def __iter__(self): class KeepOpenFile(object): - def __init__(self, file): self._file = file @@ -222,11 +234,11 @@ def echo(message=None, file=None, nl=True, err=False, color=None): message = text_type(message) if nl: - message = message or u'' + message = message or u"" if isinstance(message, text_type): - message += u'\n' + message += u"\n" else: - message += b'\n' + message += b"\n" # If there is a message, and we're in Python 3, and the value looks # like bytes, we manually need to find the binary stream and write the @@ -273,11 +285,11 @@ def get_binary_stream(name): """ opener = binary_streams.get(name) if opener is None: - raise TypeError('Unknown standard stream %r' % name) + raise TypeError("Unknown standard stream '{}'".format(name)) return opener() -def get_text_stream(name, encoding=None, errors='strict'): +def get_text_stream(name, encoding=None, errors="strict"): """Returns a system stream for text processing. This usually returns a wrapped stream around a binary stream returned from :func:`get_binary_stream` but it also can take shortcuts on Python 3 @@ -290,12 +302,13 @@ def get_text_stream(name, encoding=None, errors='strict'): """ opener = text_streams.get(name) if opener is None: - raise TypeError('Unknown standard stream %r' % name) + raise TypeError("Unknown standard stream '{}'".format(name)) return opener(encoding, errors) -def open_file(filename, mode='r', encoding=None, errors='strict', - lazy=False, atomic=False): +def open_file( + filename, mode="r", encoding=None, errors="strict", lazy=False, atomic=False +): """This is similar to how the :class:`File` works but for manual usage. Files are opened non lazy by default. This can open regular files as well as stdin/stdout if ``'-'`` is passed. @@ -320,8 +333,7 @@ def open_file(filename, mode='r', encoding=None, errors='strict', """ if lazy: return LazyFile(filename, mode, encoding, errors, atomic=atomic) - f, should_close = open_stream(filename, mode, encoding, errors, - atomic=atomic) + f, should_close = open_stream(filename, mode, encoding, errors, atomic=atomic) if not should_close: f = KeepOpenFile(f) return f @@ -401,19 +413,21 @@ def get_app_dir(app_name, roaming=True, force_posix=False): application support folder. """ if WIN: - key = roaming and 'APPDATA' or 'LOCALAPPDATA' + key = "APPDATA" if roaming else "LOCALAPPDATA" folder = os.environ.get(key) if folder is None: - folder = os.path.expanduser('~') + folder = os.path.expanduser("~") return os.path.join(folder, app_name) if force_posix: - return os.path.join(os.path.expanduser('~/.' + _posixify(app_name))) - if sys.platform == 'darwin': - return os.path.join(os.path.expanduser( - '~/Library/Application Support'), app_name) + return os.path.join(os.path.expanduser("~/.{}".format(_posixify(app_name)))) + if sys.platform == "darwin": + return os.path.join( + os.path.expanduser("~/Library/Application Support"), app_name + ) return os.path.join( - os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.config')), - _posixify(app_name)) + os.environ.get("XDG_CONFIG_HOME", os.path.expanduser("~/.config")), + _posixify(app_name), + ) class PacifyFlushWrapper(object): @@ -433,6 +447,7 @@ def flush(self): self.wrapped.flush() except IOError as e: import errno + if e.errno != errno.EPIPE: raise diff --git a/pipenv/vendor/colorama/__init__.py b/pipenv/vendor/colorama/__init__.py index 2a3bf47142..34c263cc8b 100644 --- a/pipenv/vendor/colorama/__init__.py +++ b/pipenv/vendor/colorama/__init__.py @@ -3,4 +3,4 @@ from .ansi import Fore, Back, Style, Cursor from .ansitowin32 import AnsiToWin32 -__version__ = '0.4.1' +__version__ = '0.4.3' diff --git a/pipenv/vendor/funcsigs/LICENSE b/pipenv/vendor/funcsigs/LICENSE new file mode 100644 index 0000000000..3e563d6fbd --- /dev/null +++ b/pipenv/vendor/funcsigs/LICENSE @@ -0,0 +1,13 @@ +Copyright 2013 Aaron Iles + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/pipenv/vendor/idna/LICENSE.rst b/pipenv/vendor/idna/LICENSE.rst index 3ee64fba29..63664b82e7 100644 --- a/pipenv/vendor/idna/LICENSE.rst +++ b/pipenv/vendor/idna/LICENSE.rst @@ -1,7 +1,9 @@ License ------- -Copyright (c) 2013-2018, Kim Davies. All rights reserved. +License: bsd-3-clause + +Copyright (c) 2013-2020, Kim Davies. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: @@ -30,51 +32,3 @@ modification, are permitted provided that the following conditions are met: (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -Portions of the codec implementation and unit tests are derived from the -Python standard library, which carries the `Python Software Foundation -License <https://docs.python.org/2/license.html>`_: - - Copyright (c) 2001-2014 Python Software Foundation; All Rights Reserved - -Portions of the unit tests are derived from the Unicode standard, which -is subject to the Unicode, Inc. License Agreement: - - Copyright (c) 1991-2014 Unicode, Inc. All rights reserved. - Distributed under the Terms of Use in - <http://www.unicode.org/copyright.html>. - - Permission is hereby granted, free of charge, to any person obtaining - a copy of the Unicode data files and any associated documentation - (the "Data Files") or Unicode software and any associated documentation - (the "Software") to deal in the Data Files or Software - without restriction, including without limitation the rights to use, - copy, modify, merge, publish, distribute, and/or sell copies of - the Data Files or Software, and to permit persons to whom the Data Files - or Software are furnished to do so, provided that - - (a) this copyright and permission notice appear with all copies - of the Data Files or Software, - - (b) this copyright and permission notice appear in associated - documentation, and - - (c) there is clear notice in each modified Data File or in the Software - as well as in the documentation associated with the Data File(s) or - Software that the data or software has been modified. - - THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF - ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE - WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT OF THIRD PARTY RIGHTS. - IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS - NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL - DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, - DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER - TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR - PERFORMANCE OF THE DATA FILES OR SOFTWARE. - - Except as contained in this notice, the name of a copyright holder - shall not be used in advertising or otherwise to promote the sale, - use or other dealings in these Data Files or Software without prior - written authorization of the copyright holder. diff --git a/pipenv/vendor/idna/core.py b/pipenv/vendor/idna/core.py index 104624ad2d..9c3bba2ad7 100644 --- a/pipenv/vendor/idna/core.py +++ b/pipenv/vendor/idna/core.py @@ -9,7 +9,7 @@ _alabel_prefix = b'xn--' _unicode_dots_re = re.compile(u'[\u002e\u3002\uff0e\uff61]') -if sys.version_info[0] == 3: +if sys.version_info[0] >= 3: unicode = str unichr = chr @@ -300,6 +300,8 @@ def ulabel(label): label = label.lower() if label.startswith(_alabel_prefix): label = label[len(_alabel_prefix):] + if label.decode('ascii')[-1] == '-': + raise IDNAError('A-label must not end with a hyphen') else: check_label(label) return label.decode('ascii') diff --git a/pipenv/vendor/idna/idnadata.py b/pipenv/vendor/idna/idnadata.py index a80c959d2a..2b81c522cf 100644 --- a/pipenv/vendor/idna/idnadata.py +++ b/pipenv/vendor/idna/idnadata.py @@ -1,6 +1,6 @@ # This file is automatically generated by tools/idna-data -__version__ = "11.0.0" +__version__ = "12.1.0" scripts = { 'Greek': ( 0x37000000374, @@ -74,6 +74,7 @@ 0x304100003097, 0x309d000030a0, 0x1b0010001b11f, + 0x1b1500001b153, 0x1f2000001f201, ), 'Katakana': ( @@ -85,6 +86,7 @@ 0xff660000ff70, 0xff710000ff9e, 0x1b0000001b001, + 0x1b1640001b168, ), } joining_types = { @@ -824,6 +826,7 @@ 0x1e941: 68, 0x1e942: 68, 0x1e943: 68, + 0x1e94b: 84, } codepoint_classes = { 'PVALID': ( @@ -1258,18 +1261,11 @@ 0xe5000000e5a, 0xe8100000e83, 0xe8400000e85, - 0xe8700000e89, - 0xe8a00000e8b, - 0xe8d00000e8e, - 0xe9400000e98, - 0xe9900000ea0, - 0xea100000ea4, + 0xe8600000e8b, + 0xe8c00000ea4, 0xea500000ea6, - 0xea700000ea8, - 0xeaa00000eac, - 0xead00000eb3, - 0xeb400000eba, - 0xebb00000ebe, + 0xea700000eb3, + 0xeb400000ebe, 0xec000000ec5, 0xec600000ec7, 0xec800000ece, @@ -1370,7 +1366,7 @@ 0x1c4000001c4a, 0x1c4d00001c7e, 0x1cd000001cd3, - 0x1cd400001cfa, + 0x1cd400001cfb, 0x1d0000001d2c, 0x1d2f00001d30, 0x1d3b00001d3c, @@ -1727,6 +1723,10 @@ 0xa7b50000a7b6, 0xa7b70000a7b8, 0xa7b90000a7ba, + 0xa7bb0000a7bc, + 0xa7bd0000a7be, + 0xa7bf0000a7c0, + 0xa7c30000a7c4, 0xa7f70000a7f8, 0xa7fa0000a828, 0xa8400000a874, @@ -1753,7 +1753,7 @@ 0xab200000ab27, 0xab280000ab2f, 0xab300000ab5b, - 0xab600000ab66, + 0xab600000ab68, 0xabc00000abeb, 0xabec0000abee, 0xabf00000abfa, @@ -1830,6 +1830,7 @@ 0x10f0000010f1d, 0x10f2700010f28, 0x10f3000010f51, + 0x10fe000010ff7, 0x1100000011047, 0x1106600011070, 0x1107f000110bb, @@ -1871,7 +1872,7 @@ 0x1137000011375, 0x114000001144b, 0x114500001145a, - 0x1145e0001145f, + 0x1145e00011460, 0x11480000114c6, 0x114c7000114c8, 0x114d0000114da, @@ -1881,7 +1882,7 @@ 0x1160000011641, 0x1164400011645, 0x116500001165a, - 0x11680000116b8, + 0x11680000116b9, 0x116c0000116ca, 0x117000001171b, 0x1171d0001172c, @@ -1889,10 +1890,13 @@ 0x118000001183b, 0x118c0000118ea, 0x118ff00011900, + 0x119a0000119a8, + 0x119aa000119d8, + 0x119da000119e2, + 0x119e3000119e5, 0x11a0000011a3f, 0x11a4700011a48, - 0x11a5000011a84, - 0x11a8600011a9a, + 0x11a5000011a9a, 0x11a9d00011a9e, 0x11ac000011af9, 0x11c0000011c09, @@ -1931,13 +1935,16 @@ 0x16b6300016b78, 0x16b7d00016b90, 0x16e6000016e80, - 0x16f0000016f45, - 0x16f5000016f7f, + 0x16f0000016f4b, + 0x16f4f00016f88, 0x16f8f00016fa0, 0x16fe000016fe2, - 0x17000000187f2, + 0x16fe300016fe4, + 0x17000000187f8, 0x1880000018af3, 0x1b0000001b11f, + 0x1b1500001b153, + 0x1b1640001b168, 0x1b1700001b2fc, 0x1bc000001bc6b, 0x1bc700001bc7d, @@ -1955,9 +1962,14 @@ 0x1e01b0001e022, 0x1e0230001e025, 0x1e0260001e02b, + 0x1e1000001e12d, + 0x1e1300001e13e, + 0x1e1400001e14a, + 0x1e14e0001e14f, + 0x1e2c00001e2fa, 0x1e8000001e8c5, 0x1e8d00001e8d7, - 0x1e9220001e94b, + 0x1e9220001e94c, 0x1e9500001e95a, 0x200000002a6d7, 0x2a7000002b735, diff --git a/pipenv/vendor/idna/package_data.py b/pipenv/vendor/idna/package_data.py index 257e898939..b5d8216558 100644 --- a/pipenv/vendor/idna/package_data.py +++ b/pipenv/vendor/idna/package_data.py @@ -1,2 +1,2 @@ -__version__ = '2.8' +__version__ = '2.9' diff --git a/pipenv/vendor/idna/uts46data.py b/pipenv/vendor/idna/uts46data.py index a68ed4c0ec..2711136d7d 100644 --- a/pipenv/vendor/idna/uts46data.py +++ b/pipenv/vendor/idna/uts46data.py @@ -4,7 +4,7 @@ """IDNA Mapping Table from UTS46.""" -__version__ = "11.0.0" +__version__ = "12.1.0" def _seg_0(): return [ (0x0, '3'), @@ -1272,7 +1272,7 @@ def _seg_12(): (0xC64, 'X'), (0xC66, 'V'), (0xC70, 'X'), - (0xC78, 'V'), + (0xC77, 'V'), (0xC8D, 'X'), (0xC8E, 'V'), (0xC91, 'X'), @@ -1348,33 +1348,19 @@ def _seg_12(): (0xE83, 'X'), (0xE84, 'V'), (0xE85, 'X'), - (0xE87, 'V'), - (0xE89, 'X'), - (0xE8A, 'V'), + (0xE86, 'V'), (0xE8B, 'X'), - (0xE8D, 'V'), - (0xE8E, 'X'), - (0xE94, 'V'), - ] - -def _seg_13(): - return [ - (0xE98, 'X'), - (0xE99, 'V'), - (0xEA0, 'X'), - (0xEA1, 'V'), + (0xE8C, 'V'), (0xEA4, 'X'), (0xEA5, 'V'), (0xEA6, 'X'), (0xEA7, 'V'), - (0xEA8, 'X'), - (0xEAA, 'V'), - (0xEAC, 'X'), - (0xEAD, 'V'), + ] + +def _seg_13(): + return [ (0xEB3, 'M', u'ໍາ'), (0xEB4, 'V'), - (0xEBA, 'X'), - (0xEBB, 'V'), (0xEBE, 'X'), (0xEC0, 'V'), (0xEC5, 'X'), @@ -1459,10 +1445,6 @@ def _seg_13(): (0x1260, 'V'), (0x1289, 'X'), (0x128A, 'V'), - ] - -def _seg_14(): - return [ (0x128E, 'X'), (0x1290, 'V'), (0x12B1, 'X'), @@ -1477,6 +1459,10 @@ def _seg_14(): (0x12C8, 'V'), (0x12D7, 'X'), (0x12D8, 'V'), + ] + +def _seg_14(): + return [ (0x1311, 'X'), (0x1312, 'V'), (0x1316, 'X'), @@ -1563,10 +1549,6 @@ def _seg_14(): (0x1A7F, 'V'), (0x1A8A, 'X'), (0x1A90, 'V'), - ] - -def _seg_15(): - return [ (0x1A9A, 'X'), (0x1AA0, 'V'), (0x1AAE, 'X'), @@ -1581,6 +1563,10 @@ def _seg_15(): (0x1BFC, 'V'), (0x1C38, 'X'), (0x1C3B, 'V'), + ] + +def _seg_15(): + return [ (0x1C4A, 'X'), (0x1C4D, 'V'), (0x1C80, 'M', u'в'), @@ -1592,10 +1578,57 @@ def _seg_15(): (0x1C87, 'M', u'ѣ'), (0x1C88, 'M', u'ꙋ'), (0x1C89, 'X'), + (0x1C90, 'M', u'ა'), + (0x1C91, 'M', u'ბ'), + (0x1C92, 'M', u'გ'), + (0x1C93, 'M', u'დ'), + (0x1C94, 'M', u'ე'), + (0x1C95, 'M', u'ვ'), + (0x1C96, 'M', u'ზ'), + (0x1C97, 'M', u'თ'), + (0x1C98, 'M', u'ი'), + (0x1C99, 'M', u'კ'), + (0x1C9A, 'M', u'ლ'), + (0x1C9B, 'M', u'მ'), + (0x1C9C, 'M', u'ნ'), + (0x1C9D, 'M', u'ო'), + (0x1C9E, 'M', u'პ'), + (0x1C9F, 'M', u'ჟ'), + (0x1CA0, 'M', u'რ'), + (0x1CA1, 'M', u'ს'), + (0x1CA2, 'M', u'ტ'), + (0x1CA3, 'M', u'უ'), + (0x1CA4, 'M', u'ფ'), + (0x1CA5, 'M', u'ქ'), + (0x1CA6, 'M', u'ღ'), + (0x1CA7, 'M', u'ყ'), + (0x1CA8, 'M', u'შ'), + (0x1CA9, 'M', u'ჩ'), + (0x1CAA, 'M', u'ც'), + (0x1CAB, 'M', u'ძ'), + (0x1CAC, 'M', u'წ'), + (0x1CAD, 'M', u'ჭ'), + (0x1CAE, 'M', u'ხ'), + (0x1CAF, 'M', u'ჯ'), + (0x1CB0, 'M', u'ჰ'), + (0x1CB1, 'M', u'ჱ'), + (0x1CB2, 'M', u'ჲ'), + (0x1CB3, 'M', u'ჳ'), + (0x1CB4, 'M', u'ჴ'), + (0x1CB5, 'M', u'ჵ'), + (0x1CB6, 'M', u'ჶ'), + (0x1CB7, 'M', u'ჷ'), + (0x1CB8, 'M', u'ჸ'), + (0x1CB9, 'M', u'ჹ'), + (0x1CBA, 'M', u'ჺ'), + (0x1CBB, 'X'), + (0x1CBD, 'M', u'ჽ'), + (0x1CBE, 'M', u'ჾ'), + (0x1CBF, 'M', u'ჿ'), (0x1CC0, 'V'), (0x1CC8, 'X'), (0x1CD0, 'V'), - (0x1CFA, 'X'), + (0x1CFB, 'X'), (0x1D00, 'V'), (0x1D2C, 'M', u'a'), (0x1D2D, 'M', u'æ'), @@ -1634,6 +1667,10 @@ def _seg_15(): (0x1D4E, 'V'), (0x1D4F, 'M', u'k'), (0x1D50, 'M', u'm'), + ] + +def _seg_16(): + return [ (0x1D51, 'M', u'ŋ'), (0x1D52, 'M', u'o'), (0x1D53, 'M', u'ɔ'), @@ -1667,10 +1704,6 @@ def _seg_15(): (0x1D9C, 'M', u'c'), (0x1D9D, 'M', u'ɕ'), (0x1D9E, 'M', u'ð'), - ] - -def _seg_16(): - return [ (0x1D9F, 'M', u'ɜ'), (0x1DA0, 'M', u'f'), (0x1DA1, 'M', u'ɟ'), @@ -1738,6 +1771,10 @@ def _seg_16(): (0x1E1C, 'M', u'ḝ'), (0x1E1D, 'V'), (0x1E1E, 'M', u'ḟ'), + ] + +def _seg_17(): + return [ (0x1E1F, 'V'), (0x1E20, 'M', u'ḡ'), (0x1E21, 'V'), @@ -1771,10 +1808,6 @@ def _seg_16(): (0x1E3D, 'V'), (0x1E3E, 'M', u'ḿ'), (0x1E3F, 'V'), - ] - -def _seg_17(): - return [ (0x1E40, 'M', u'ṁ'), (0x1E41, 'V'), (0x1E42, 'M', u'ṃ'), @@ -1842,6 +1875,10 @@ def _seg_17(): (0x1E80, 'M', u'ẁ'), (0x1E81, 'V'), (0x1E82, 'M', u'ẃ'), + ] + +def _seg_18(): + return [ (0x1E83, 'V'), (0x1E84, 'M', u'ẅ'), (0x1E85, 'V'), @@ -1875,10 +1912,6 @@ def _seg_17(): (0x1EA6, 'M', u'ầ'), (0x1EA7, 'V'), (0x1EA8, 'M', u'ẩ'), - ] - -def _seg_18(): - return [ (0x1EA9, 'V'), (0x1EAA, 'M', u'ẫ'), (0x1EAB, 'V'), @@ -1946,6 +1979,10 @@ def _seg_18(): (0x1EE9, 'V'), (0x1EEA, 'M', u'ừ'), (0x1EEB, 'V'), + ] + +def _seg_19(): + return [ (0x1EEC, 'M', u'ử'), (0x1EED, 'V'), (0x1EEE, 'M', u'ữ'), @@ -1979,10 +2016,6 @@ def _seg_18(): (0x1F18, 'M', u'ἐ'), (0x1F19, 'M', u'ἑ'), (0x1F1A, 'M', u'ἒ'), - ] - -def _seg_19(): - return [ (0x1F1B, 'M', u'ἓ'), (0x1F1C, 'M', u'ἔ'), (0x1F1D, 'M', u'ἕ'), @@ -2050,6 +2083,10 @@ def _seg_19(): (0x1F80, 'M', u'ἀι'), (0x1F81, 'M', u'ἁι'), (0x1F82, 'M', u'ἂι'), + ] + +def _seg_20(): + return [ (0x1F83, 'M', u'ἃι'), (0x1F84, 'M', u'ἄι'), (0x1F85, 'M', u'ἅι'), @@ -2083,10 +2120,6 @@ def _seg_19(): (0x1FA1, 'M', u'ὡι'), (0x1FA2, 'M', u'ὢι'), (0x1FA3, 'M', u'ὣι'), - ] - -def _seg_20(): - return [ (0x1FA4, 'M', u'ὤι'), (0x1FA5, 'M', u'ὥι'), (0x1FA6, 'M', u'ὦι'), @@ -2154,6 +2187,10 @@ def _seg_20(): (0x1FEE, '3', u' ̈́'), (0x1FEF, '3', u'`'), (0x1FF0, 'X'), + ] + +def _seg_21(): + return [ (0x1FF2, 'M', u'ὼι'), (0x1FF3, 'M', u'ωι'), (0x1FF4, 'M', u'ώι'), @@ -2187,10 +2224,6 @@ def _seg_20(): (0x2035, 'V'), (0x2036, 'M', u'‵‵'), (0x2037, 'M', u'‵‵‵'), - ] - -def _seg_21(): - return [ (0x2038, 'V'), (0x203C, '3', u'!!'), (0x203D, 'V'), @@ -2258,6 +2291,10 @@ def _seg_21(): (0x20C0, 'X'), (0x20D0, 'V'), (0x20F1, 'X'), + ] + +def _seg_22(): + return [ (0x2100, '3', u'a/c'), (0x2101, '3', u'a/s'), (0x2102, 'M', u'c'), @@ -2291,10 +2328,6 @@ def _seg_21(): (0x2127, 'V'), (0x2128, 'M', u'z'), (0x2129, 'V'), - ] - -def _seg_22(): - return [ (0x212A, 'M', u'k'), (0x212B, 'M', u'å'), (0x212C, 'M', u'b'), @@ -2362,6 +2395,10 @@ def _seg_22(): (0x2175, 'M', u'vi'), (0x2176, 'M', u'vii'), (0x2177, 'M', u'viii'), + ] + +def _seg_23(): + return [ (0x2178, 'M', u'ix'), (0x2179, 'M', u'x'), (0x217A, 'M', u'xi'), @@ -2395,10 +2432,6 @@ def _seg_22(): (0x244B, 'X'), (0x2460, 'M', u'1'), (0x2461, 'M', u'2'), - ] - -def _seg_23(): - return [ (0x2462, 'M', u'3'), (0x2463, 'M', u'4'), (0x2464, 'M', u'5'), @@ -2466,6 +2499,10 @@ def _seg_23(): (0x24B5, '3', u'(z)'), (0x24B6, 'M', u'a'), (0x24B7, 'M', u'b'), + ] + +def _seg_24(): + return [ (0x24B8, 'M', u'c'), (0x24B9, 'M', u'd'), (0x24BA, 'M', u'e'), @@ -2499,10 +2536,6 @@ def _seg_23(): (0x24D6, 'M', u'g'), (0x24D7, 'M', u'h'), (0x24D8, 'M', u'i'), - ] - -def _seg_24(): - return [ (0x24D9, 'M', u'j'), (0x24DA, 'M', u'k'), (0x24DB, 'M', u'l'), @@ -2534,9 +2567,6 @@ def _seg_24(): (0x2B76, 'V'), (0x2B96, 'X'), (0x2B98, 'V'), - (0x2BC9, 'X'), - (0x2BCA, 'V'), - (0x2BFF, 'X'), (0x2C00, 'M', u'ⰰ'), (0x2C01, 'M', u'ⰱ'), (0x2C02, 'M', u'ⰲ'), @@ -2573,6 +2603,10 @@ def _seg_24(): (0x2C21, 'M', u'ⱑ'), (0x2C22, 'M', u'ⱒ'), (0x2C23, 'M', u'ⱓ'), + ] + +def _seg_25(): + return [ (0x2C24, 'M', u'ⱔ'), (0x2C25, 'M', u'ⱕ'), (0x2C26, 'M', u'ⱖ'), @@ -2603,10 +2637,6 @@ def _seg_24(): (0x2C6E, 'M', u'ɱ'), (0x2C6F, 'M', u'ɐ'), (0x2C70, 'M', u'ɒ'), - ] - -def _seg_25(): - return [ (0x2C71, 'V'), (0x2C72, 'M', u'ⱳ'), (0x2C73, 'V'), @@ -2677,6 +2707,10 @@ def _seg_25(): (0x2CBA, 'M', u'ⲻ'), (0x2CBB, 'V'), (0x2CBC, 'M', u'ⲽ'), + ] + +def _seg_26(): + return [ (0x2CBD, 'V'), (0x2CBE, 'M', u'ⲿ'), (0x2CBF, 'V'), @@ -2707,10 +2741,6 @@ def _seg_25(): (0x2CD8, 'M', u'ⳙ'), (0x2CD9, 'V'), (0x2CDA, 'M', u'ⳛ'), - ] - -def _seg_26(): - return [ (0x2CDB, 'V'), (0x2CDC, 'M', u'ⳝ'), (0x2CDD, 'V'), @@ -2757,7 +2787,7 @@ def _seg_26(): (0x2DD8, 'V'), (0x2DDF, 'X'), (0x2DE0, 'V'), - (0x2E4F, 'X'), + (0x2E50, 'X'), (0x2E80, 'V'), (0x2E9A, 'X'), (0x2E9B, 'V'), @@ -2781,6 +2811,10 @@ def _seg_26(): (0x2F0D, 'M', u'冖'), (0x2F0E, 'M', u'冫'), (0x2F0F, 'M', u'几'), + ] + +def _seg_27(): + return [ (0x2F10, 'M', u'凵'), (0x2F11, 'M', u'刀'), (0x2F12, 'M', u'力'), @@ -2811,10 +2845,6 @@ def _seg_26(): (0x2F2B, 'M', u'尸'), (0x2F2C, 'M', u'屮'), (0x2F2D, 'M', u'山'), - ] - -def _seg_27(): - return [ (0x2F2E, 'M', u'巛'), (0x2F2F, 'M', u'工'), (0x2F30, 'M', u'己'), @@ -2885,6 +2915,10 @@ def _seg_27(): (0x2F71, 'M', u'禸'), (0x2F72, 'M', u'禾'), (0x2F73, 'M', u'穴'), + ] + +def _seg_28(): + return [ (0x2F74, 'M', u'立'), (0x2F75, 'M', u'竹'), (0x2F76, 'M', u'米'), @@ -2915,10 +2949,6 @@ def _seg_27(): (0x2F8F, 'M', u'行'), (0x2F90, 'M', u'衣'), (0x2F91, 'M', u'襾'), - ] - -def _seg_28(): - return [ (0x2F92, 'M', u'見'), (0x2F93, 'M', u'角'), (0x2F94, 'M', u'言'), @@ -2989,6 +3019,10 @@ def _seg_28(): (0x2FD5, 'M', u'龠'), (0x2FD6, 'X'), (0x3000, '3', u' '), + ] + +def _seg_29(): + return [ (0x3001, 'V'), (0x3002, 'M', u'.'), (0x3003, 'V'), @@ -3019,10 +3053,6 @@ def _seg_28(): (0x3136, 'M', u'ᆭ'), (0x3137, 'M', u'ᄃ'), (0x3138, 'M', u'ᄄ'), - ] - -def _seg_29(): - return [ (0x3139, 'M', u'ᄅ'), (0x313A, 'M', u'ᆰ'), (0x313B, 'M', u'ᆱ'), @@ -3093,6 +3123,10 @@ def _seg_29(): (0x317C, 'M', u'ᄯ'), (0x317D, 'M', u'ᄲ'), (0x317E, 'M', u'ᄶ'), + ] + +def _seg_30(): + return [ (0x317F, 'M', u'ᅀ'), (0x3180, 'M', u'ᅇ'), (0x3181, 'M', u'ᅌ'), @@ -3123,10 +3157,6 @@ def _seg_29(): (0x319B, 'M', u'丙'), (0x319C, 'M', u'丁'), (0x319D, 'M', u'天'), - ] - -def _seg_30(): - return [ (0x319E, 'M', u'地'), (0x319F, 'M', u'人'), (0x31A0, 'V'), @@ -3197,6 +3227,10 @@ def _seg_30(): (0x323C, '3', u'(監)'), (0x323D, '3', u'(企)'), (0x323E, '3', u'(資)'), + ] + +def _seg_31(): + return [ (0x323F, '3', u'(協)'), (0x3240, '3', u'(祭)'), (0x3241, '3', u'(休)'), @@ -3227,10 +3261,6 @@ def _seg_30(): (0x3261, 'M', u'ᄂ'), (0x3262, 'M', u'ᄃ'), (0x3263, 'M', u'ᄅ'), - ] - -def _seg_31(): - return [ (0x3264, 'M', u'ᄆ'), (0x3265, 'M', u'ᄇ'), (0x3266, 'M', u'ᄉ'), @@ -3301,6 +3331,10 @@ def _seg_31(): (0x32A7, 'M', u'左'), (0x32A8, 'M', u'右'), (0x32A9, 'M', u'医'), + ] + +def _seg_32(): + return [ (0x32AA, 'M', u'宗'), (0x32AB, 'M', u'学'), (0x32AC, 'M', u'監'), @@ -3331,10 +3365,6 @@ def _seg_31(): (0x32C5, 'M', u'6月'), (0x32C6, 'M', u'7月'), (0x32C7, 'M', u'8月'), - ] - -def _seg_32(): - return [ (0x32C8, 'M', u'9月'), (0x32C9, 'M', u'10月'), (0x32CA, 'M', u'11月'), @@ -3390,7 +3420,7 @@ def _seg_32(): (0x32FC, 'M', u'ヰ'), (0x32FD, 'M', u'ヱ'), (0x32FE, 'M', u'ヲ'), - (0x32FF, 'X'), + (0x32FF, 'M', u'令和'), (0x3300, 'M', u'アパート'), (0x3301, 'M', u'アルファ'), (0x3302, 'M', u'アンペア'), @@ -3405,6 +3435,10 @@ def _seg_32(): (0x330B, 'M', u'カイリ'), (0x330C, 'M', u'カラット'), (0x330D, 'M', u'カロリー'), + ] + +def _seg_33(): + return [ (0x330E, 'M', u'ガロン'), (0x330F, 'M', u'ガンマ'), (0x3310, 'M', u'ギガ'), @@ -3435,10 +3469,6 @@ def _seg_32(): (0x3329, 'M', u'ノット'), (0x332A, 'M', u'ハイツ'), (0x332B, 'M', u'パーセント'), - ] - -def _seg_33(): - return [ (0x332C, 'M', u'パーツ'), (0x332D, 'M', u'バーレル'), (0x332E, 'M', u'ピアストル'), @@ -3509,6 +3539,10 @@ def _seg_33(): (0x336F, 'M', u'23点'), (0x3370, 'M', u'24点'), (0x3371, 'M', u'hpa'), + ] + +def _seg_34(): + return [ (0x3372, 'M', u'da'), (0x3373, 'M', u'au'), (0x3374, 'M', u'bar'), @@ -3539,10 +3573,6 @@ def _seg_33(): (0x338D, 'M', u'μg'), (0x338E, 'M', u'mg'), (0x338F, 'M', u'kg'), - ] - -def _seg_34(): - return [ (0x3390, 'M', u'hz'), (0x3391, 'M', u'khz'), (0x3392, 'M', u'mhz'), @@ -3613,6 +3643,10 @@ def _seg_34(): (0x33D3, 'M', u'lx'), (0x33D4, 'M', u'mb'), (0x33D5, 'M', u'mil'), + ] + +def _seg_35(): + return [ (0x33D6, 'M', u'mol'), (0x33D7, 'M', u'ph'), (0x33D8, 'X'), @@ -3643,10 +3677,6 @@ def _seg_34(): (0x33F1, 'M', u'18日'), (0x33F2, 'M', u'19日'), (0x33F3, 'M', u'20日'), - ] - -def _seg_35(): - return [ (0x33F4, 'M', u'21日'), (0x33F5, 'M', u'22日'), (0x33F6, 'M', u'23日'), @@ -3717,6 +3747,10 @@ def _seg_35(): (0xA66D, 'V'), (0xA680, 'M', u'ꚁ'), (0xA681, 'V'), + ] + +def _seg_36(): + return [ (0xA682, 'M', u'ꚃ'), (0xA683, 'V'), (0xA684, 'M', u'ꚅ'), @@ -3747,10 +3781,6 @@ def _seg_35(): (0xA69D, 'M', u'ь'), (0xA69E, 'V'), (0xA6F8, 'X'), - ] - -def _seg_36(): - return [ (0xA700, 'V'), (0xA722, 'M', u'ꜣ'), (0xA723, 'V'), @@ -3821,6 +3851,10 @@ def _seg_36(): (0xA766, 'M', u'ꝧ'), (0xA767, 'V'), (0xA768, 'M', u'ꝩ'), + ] + +def _seg_37(): + return [ (0xA769, 'V'), (0xA76A, 'M', u'ꝫ'), (0xA76B, 'V'), @@ -3851,10 +3885,6 @@ def _seg_36(): (0xA78E, 'V'), (0xA790, 'M', u'ꞑ'), (0xA791, 'V'), - ] - -def _seg_37(): - return [ (0xA792, 'M', u'ꞓ'), (0xA793, 'V'), (0xA796, 'M', u'ꞗ'), @@ -3891,9 +3921,21 @@ def _seg_37(): (0xA7B5, 'V'), (0xA7B6, 'M', u'ꞷ'), (0xA7B7, 'V'), - (0xA7B8, 'X'), + (0xA7B8, 'M', u'ꞹ'), (0xA7B9, 'V'), - (0xA7BA, 'X'), + (0xA7BA, 'M', u'ꞻ'), + (0xA7BB, 'V'), + (0xA7BC, 'M', u'ꞽ'), + (0xA7BD, 'V'), + (0xA7BE, 'M', u'ꞿ'), + (0xA7BF, 'V'), + (0xA7C0, 'X'), + (0xA7C2, 'M', u'ꟃ'), + (0xA7C3, 'V'), + (0xA7C4, 'M', u'ꞔ'), + (0xA7C5, 'M', u'ʂ'), + (0xA7C6, 'M', u'ᶎ'), + (0xA7C7, 'X'), (0xA7F7, 'V'), (0xA7F8, 'M', u'ħ'), (0xA7F9, 'M', u'œ'), @@ -3913,6 +3955,10 @@ def _seg_37(): (0xA97D, 'X'), (0xA980, 'V'), (0xA9CE, 'X'), + ] + +def _seg_38(): + return [ (0xA9CF, 'V'), (0xA9DA, 'X'), (0xA9DE, 'V'), @@ -3943,7 +3989,7 @@ def _seg_37(): (0xAB5E, 'M', u'ɫ'), (0xAB5F, 'M', u'ꭒ'), (0xAB60, 'V'), - (0xAB66, 'X'), + (0xAB68, 'X'), (0xAB70, 'M', u'Ꭰ'), (0xAB71, 'M', u'Ꭱ'), (0xAB72, 'M', u'Ꭲ'), @@ -3955,10 +4001,6 @@ def _seg_37(): (0xAB78, 'M', u'Ꭸ'), (0xAB79, 'M', u'Ꭹ'), (0xAB7A, 'M', u'Ꭺ'), - ] - -def _seg_38(): - return [ (0xAB7B, 'M', u'Ꭻ'), (0xAB7C, 'M', u'Ꭼ'), (0xAB7D, 'M', u'Ꭽ'), @@ -4017,6 +4059,10 @@ def _seg_38(): (0xABB2, 'M', u'Ꮲ'), (0xABB3, 'M', u'Ꮳ'), (0xABB4, 'M', u'Ꮴ'), + ] + +def _seg_39(): + return [ (0xABB5, 'M', u'Ꮵ'), (0xABB6, 'M', u'Ꮶ'), (0xABB7, 'M', u'Ꮷ'), @@ -4059,10 +4105,6 @@ def _seg_38(): (0xF913, 'M', u'邏'), (0xF914, 'M', u'樂'), (0xF915, 'M', u'洛'), - ] - -def _seg_39(): - return [ (0xF916, 'M', u'烙'), (0xF917, 'M', u'珞'), (0xF918, 'M', u'落'), @@ -4121,6 +4163,10 @@ def _seg_39(): (0xF94D, 'M', u'淚'), (0xF94E, 'M', u'漏'), (0xF94F, 'M', u'累'), + ] + +def _seg_40(): + return [ (0xF950, 'M', u'縷'), (0xF951, 'M', u'陋'), (0xF952, 'M', u'勒'), @@ -4163,10 +4209,6 @@ def _seg_39(): (0xF977, 'M', u'亮'), (0xF978, 'M', u'兩'), (0xF979, 'M', u'凉'), - ] - -def _seg_40(): - return [ (0xF97A, 'M', u'梁'), (0xF97B, 'M', u'糧'), (0xF97C, 'M', u'良'), @@ -4225,6 +4267,10 @@ def _seg_40(): (0xF9B1, 'M', u'鈴'), (0xF9B2, 'M', u'零'), (0xF9B3, 'M', u'靈'), + ] + +def _seg_41(): + return [ (0xF9B4, 'M', u'領'), (0xF9B5, 'M', u'例'), (0xF9B6, 'M', u'禮'), @@ -4267,10 +4313,6 @@ def _seg_40(): (0xF9DB, 'M', u'率'), (0xF9DC, 'M', u'隆'), (0xF9DD, 'M', u'利'), - ] - -def _seg_41(): - return [ (0xF9DE, 'M', u'吏'), (0xF9DF, 'M', u'履'), (0xF9E0, 'M', u'易'), @@ -4329,6 +4371,10 @@ def _seg_41(): (0xFA17, 'M', u'益'), (0xFA18, 'M', u'礼'), (0xFA19, 'M', u'神'), + ] + +def _seg_42(): + return [ (0xFA1A, 'M', u'祥'), (0xFA1B, 'M', u'福'), (0xFA1C, 'M', u'靖'), @@ -4371,10 +4417,6 @@ def _seg_41(): (0xFA44, 'M', u'梅'), (0xFA45, 'M', u'海'), (0xFA46, 'M', u'渚'), - ] - -def _seg_42(): - return [ (0xFA47, 'M', u'漢'), (0xFA48, 'M', u'煮'), (0xFA49, 'M', u'爫'), @@ -4433,6 +4475,10 @@ def _seg_42(): (0xFA80, 'M', u'婢'), (0xFA81, 'M', u'嬨'), (0xFA82, 'M', u'廒'), + ] + +def _seg_43(): + return [ (0xFA83, 'M', u'廙'), (0xFA84, 'M', u'彩'), (0xFA85, 'M', u'徭'), @@ -4475,10 +4521,6 @@ def _seg_42(): (0xFAAA, 'M', u'着'), (0xFAAB, 'M', u'磌'), (0xFAAC, 'M', u'窱'), - ] - -def _seg_43(): - return [ (0xFAAD, 'M', u'節'), (0xFAAE, 'M', u'类'), (0xFAAF, 'M', u'絛'), @@ -4537,6 +4579,10 @@ def _seg_43(): (0xFB15, 'M', u'մի'), (0xFB16, 'M', u'վն'), (0xFB17, 'M', u'մխ'), + ] + +def _seg_44(): + return [ (0xFB18, 'X'), (0xFB1D, 'M', u'יִ'), (0xFB1E, 'V'), @@ -4579,10 +4625,6 @@ def _seg_43(): (0xFB43, 'M', u'ףּ'), (0xFB44, 'M', u'פּ'), (0xFB45, 'X'), - ] - -def _seg_44(): - return [ (0xFB46, 'M', u'צּ'), (0xFB47, 'M', u'קּ'), (0xFB48, 'M', u'רּ'), @@ -4641,6 +4683,10 @@ def _seg_44(): (0xFBF0, 'M', u'ئۇ'), (0xFBF2, 'M', u'ئۆ'), (0xFBF4, 'M', u'ئۈ'), + ] + +def _seg_45(): + return [ (0xFBF6, 'M', u'ئې'), (0xFBF9, 'M', u'ئى'), (0xFBFC, 'M', u'ی'), @@ -4683,10 +4729,6 @@ def _seg_44(): (0xFC24, 'M', u'ضخ'), (0xFC25, 'M', u'ضم'), (0xFC26, 'M', u'طح'), - ] - -def _seg_45(): - return [ (0xFC27, 'M', u'طم'), (0xFC28, 'M', u'ظم'), (0xFC29, 'M', u'عج'), @@ -4745,6 +4787,10 @@ def _seg_45(): (0xFC5E, '3', u' ٌّ'), (0xFC5F, '3', u' ٍّ'), (0xFC60, '3', u' َّ'), + ] + +def _seg_46(): + return [ (0xFC61, '3', u' ُّ'), (0xFC62, '3', u' ِّ'), (0xFC63, '3', u' ّٰ'), @@ -4787,10 +4833,6 @@ def _seg_45(): (0xFC88, 'M', u'ما'), (0xFC89, 'M', u'مم'), (0xFC8A, 'M', u'نر'), - ] - -def _seg_46(): - return [ (0xFC8B, 'M', u'نز'), (0xFC8C, 'M', u'نم'), (0xFC8D, 'M', u'نن'), @@ -4849,6 +4891,10 @@ def _seg_46(): (0xFCC2, 'M', u'قح'), (0xFCC3, 'M', u'قم'), (0xFCC4, 'M', u'كج'), + ] + +def _seg_47(): + return [ (0xFCC5, 'M', u'كح'), (0xFCC6, 'M', u'كخ'), (0xFCC7, 'M', u'كل'), @@ -4891,10 +4937,6 @@ def _seg_46(): (0xFCEC, 'M', u'كم'), (0xFCED, 'M', u'لم'), (0xFCEE, 'M', u'نم'), - ] - -def _seg_47(): - return [ (0xFCEF, 'M', u'نه'), (0xFCF0, 'M', u'يم'), (0xFCF1, 'M', u'يه'), @@ -4953,6 +4995,10 @@ def _seg_47(): (0xFD26, 'M', u'شح'), (0xFD27, 'M', u'شخ'), (0xFD28, 'M', u'شم'), + ] + +def _seg_48(): + return [ (0xFD29, 'M', u'شر'), (0xFD2A, 'M', u'سر'), (0xFD2B, 'M', u'صر'), @@ -4995,10 +5041,6 @@ def _seg_47(): (0xFD66, 'M', u'صمم'), (0xFD67, 'M', u'شحم'), (0xFD69, 'M', u'شجي'), - ] - -def _seg_48(): - return [ (0xFD6A, 'M', u'شمخ'), (0xFD6C, 'M', u'شمم'), (0xFD6E, 'M', u'ضحى'), @@ -5057,6 +5099,10 @@ def _seg_48(): (0xFDAD, 'M', u'لمي'), (0xFDAE, 'M', u'يحي'), (0xFDAF, 'M', u'يجي'), + ] + +def _seg_49(): + return [ (0xFDB0, 'M', u'يمي'), (0xFDB1, 'M', u'ممي'), (0xFDB2, 'M', u'قمي'), @@ -5099,10 +5145,6 @@ def _seg_48(): (0xFDFE, 'X'), (0xFE00, 'I'), (0xFE10, '3', u','), - ] - -def _seg_49(): - return [ (0xFE11, 'M', u'、'), (0xFE12, 'X'), (0xFE13, '3', u':'), @@ -5161,6 +5203,10 @@ def _seg_49(): (0xFE65, '3', u'>'), (0xFE66, '3', u'='), (0xFE67, 'X'), + ] + +def _seg_50(): + return [ (0xFE68, '3', u'\\'), (0xFE69, '3', u'$'), (0xFE6A, '3', u'%'), @@ -5203,10 +5249,6 @@ def _seg_49(): (0xFEB1, 'M', u'س'), (0xFEB5, 'M', u'ش'), (0xFEB9, 'M', u'ص'), - ] - -def _seg_50(): - return [ (0xFEBD, 'M', u'ض'), (0xFEC1, 'M', u'ط'), (0xFEC5, 'M', u'ظ'), @@ -5265,6 +5307,10 @@ def _seg_50(): (0xFF22, 'M', u'b'), (0xFF23, 'M', u'c'), (0xFF24, 'M', u'd'), + ] + +def _seg_51(): + return [ (0xFF25, 'M', u'e'), (0xFF26, 'M', u'f'), (0xFF27, 'M', u'g'), @@ -5307,10 +5353,6 @@ def _seg_50(): (0xFF4C, 'M', u'l'), (0xFF4D, 'M', u'm'), (0xFF4E, 'M', u'n'), - ] - -def _seg_51(): - return [ (0xFF4F, 'M', u'o'), (0xFF50, 'M', u'p'), (0xFF51, 'M', u'q'), @@ -5369,6 +5411,10 @@ def _seg_51(): (0xFF86, 'M', u'ニ'), (0xFF87, 'M', u'ヌ'), (0xFF88, 'M', u'ネ'), + ] + +def _seg_52(): + return [ (0xFF89, 'M', u'ノ'), (0xFF8A, 'M', u'ハ'), (0xFF8B, 'M', u'ヒ'), @@ -5411,10 +5457,6 @@ def _seg_51(): (0xFFB0, 'M', u'ᄚ'), (0xFFB1, 'M', u'ᄆ'), (0xFFB2, 'M', u'ᄇ'), - ] - -def _seg_52(): - return [ (0xFFB3, 'M', u'ᄈ'), (0xFFB4, 'M', u'ᄡ'), (0xFFB5, 'M', u'ᄉ'), @@ -5473,6 +5515,10 @@ def _seg_52(): (0x1000C, 'X'), (0x1000D, 'V'), (0x10027, 'X'), + ] + +def _seg_53(): + return [ (0x10028, 'V'), (0x1003B, 'X'), (0x1003C, 'V'), @@ -5515,10 +5561,6 @@ def _seg_52(): (0x103D6, 'X'), (0x10400, 'M', u'𐐨'), (0x10401, 'M', u'𐐩'), - ] - -def _seg_53(): - return [ (0x10402, 'M', u'𐐪'), (0x10403, 'M', u'𐐫'), (0x10404, 'M', u'𐐬'), @@ -5577,6 +5619,10 @@ def _seg_53(): (0x104BD, 'M', u'𐓥'), (0x104BE, 'M', u'𐓦'), (0x104BF, 'M', u'𐓧'), + ] + +def _seg_54(): + return [ (0x104C0, 'M', u'𐓨'), (0x104C1, 'M', u'𐓩'), (0x104C2, 'M', u'𐓪'), @@ -5619,10 +5665,6 @@ def _seg_53(): (0x1080A, 'V'), (0x10836, 'X'), (0x10837, 'V'), - ] - -def _seg_54(): - return [ (0x10839, 'X'), (0x1083C, 'V'), (0x1083D, 'X'), @@ -5681,6 +5723,10 @@ def _seg_54(): (0x10BA9, 'V'), (0x10BB0, 'X'), (0x10C00, 'V'), + ] + +def _seg_55(): + return [ (0x10C49, 'X'), (0x10C80, 'M', u'𐳀'), (0x10C81, 'M', u'𐳁'), @@ -5723,10 +5769,6 @@ def _seg_54(): (0x10CA6, 'M', u'𐳦'), (0x10CA7, 'M', u'𐳧'), (0x10CA8, 'M', u'𐳨'), - ] - -def _seg_55(): - return [ (0x10CA9, 'M', u'𐳩'), (0x10CAA, 'M', u'𐳪'), (0x10CAB, 'M', u'𐳫'), @@ -5750,6 +5792,8 @@ def _seg_55(): (0x10F28, 'X'), (0x10F30, 'V'), (0x10F5A, 'X'), + (0x10FE0, 'V'), + (0x10FF7, 'X'), (0x11000, 'V'), (0x1104E, 'X'), (0x11052, 'V'), @@ -5783,6 +5827,10 @@ def _seg_55(): (0x11288, 'V'), (0x11289, 'X'), (0x1128A, 'V'), + ] + +def _seg_56(): + return [ (0x1128E, 'X'), (0x1128F, 'V'), (0x1129E, 'X'), @@ -5827,11 +5875,7 @@ def _seg_55(): (0x1145B, 'V'), (0x1145C, 'X'), (0x1145D, 'V'), - ] - -def _seg_56(): - return [ - (0x1145F, 'X'), + (0x11460, 'X'), (0x11480, 'V'), (0x114C8, 'X'), (0x114D0, 'V'), @@ -5847,7 +5891,7 @@ def _seg_56(): (0x11660, 'V'), (0x1166D, 'X'), (0x11680, 'V'), - (0x116B8, 'X'), + (0x116B9, 'X'), (0x116C0, 'V'), (0x116CA, 'X'), (0x11700, 'V'), @@ -5887,6 +5931,10 @@ def _seg_56(): (0x118BA, 'M', u'𑣚'), (0x118BB, 'M', u'𑣛'), (0x118BC, 'M', u'𑣜'), + ] + +def _seg_57(): + return [ (0x118BD, 'M', u'𑣝'), (0x118BE, 'M', u'𑣞'), (0x118BF, 'M', u'𑣟'), @@ -5894,11 +5942,15 @@ def _seg_56(): (0x118F3, 'X'), (0x118FF, 'V'), (0x11900, 'X'), + (0x119A0, 'V'), + (0x119A8, 'X'), + (0x119AA, 'V'), + (0x119D8, 'X'), + (0x119DA, 'V'), + (0x119E5, 'X'), (0x11A00, 'V'), (0x11A48, 'X'), (0x11A50, 'V'), - (0x11A84, 'X'), - (0x11A86, 'V'), (0x11AA3, 'X'), (0x11AC0, 'V'), (0x11AF9, 'X'), @@ -5931,10 +5983,6 @@ def _seg_56(): (0x11D50, 'V'), (0x11D5A, 'X'), (0x11D60, 'V'), - ] - -def _seg_57(): - return [ (0x11D66, 'X'), (0x11D67, 'V'), (0x11D69, 'X'), @@ -5948,7 +5996,9 @@ def _seg_57(): (0x11DAA, 'X'), (0x11EE0, 'V'), (0x11EF9, 'X'), - (0x12000, 'V'), + (0x11FC0, 'V'), + (0x11FF2, 'X'), + (0x11FFF, 'V'), (0x1239A, 'X'), (0x12400, 'V'), (0x1246F, 'X'), @@ -5982,22 +6032,62 @@ def _seg_57(): (0x16B78, 'X'), (0x16B7D, 'V'), (0x16B90, 'X'), + (0x16E40, 'M', u'𖹠'), + (0x16E41, 'M', u'𖹡'), + (0x16E42, 'M', u'𖹢'), + ] + +def _seg_58(): + return [ + (0x16E43, 'M', u'𖹣'), + (0x16E44, 'M', u'𖹤'), + (0x16E45, 'M', u'𖹥'), + (0x16E46, 'M', u'𖹦'), + (0x16E47, 'M', u'𖹧'), + (0x16E48, 'M', u'𖹨'), + (0x16E49, 'M', u'𖹩'), + (0x16E4A, 'M', u'𖹪'), + (0x16E4B, 'M', u'𖹫'), + (0x16E4C, 'M', u'𖹬'), + (0x16E4D, 'M', u'𖹭'), + (0x16E4E, 'M', u'𖹮'), + (0x16E4F, 'M', u'𖹯'), + (0x16E50, 'M', u'𖹰'), + (0x16E51, 'M', u'𖹱'), + (0x16E52, 'M', u'𖹲'), + (0x16E53, 'M', u'𖹳'), + (0x16E54, 'M', u'𖹴'), + (0x16E55, 'M', u'𖹵'), + (0x16E56, 'M', u'𖹶'), + (0x16E57, 'M', u'𖹷'), + (0x16E58, 'M', u'𖹸'), + (0x16E59, 'M', u'𖹹'), + (0x16E5A, 'M', u'𖹺'), + (0x16E5B, 'M', u'𖹻'), + (0x16E5C, 'M', u'𖹼'), + (0x16E5D, 'M', u'𖹽'), + (0x16E5E, 'M', u'𖹾'), + (0x16E5F, 'M', u'𖹿'), (0x16E60, 'V'), (0x16E9B, 'X'), (0x16F00, 'V'), - (0x16F45, 'X'), - (0x16F50, 'V'), - (0x16F7F, 'X'), + (0x16F4B, 'X'), + (0x16F4F, 'V'), + (0x16F88, 'X'), (0x16F8F, 'V'), (0x16FA0, 'X'), (0x16FE0, 'V'), - (0x16FE2, 'X'), + (0x16FE4, 'X'), (0x17000, 'V'), - (0x187F2, 'X'), + (0x187F8, 'X'), (0x18800, 'V'), (0x18AF3, 'X'), (0x1B000, 'V'), (0x1B11F, 'X'), + (0x1B150, 'V'), + (0x1B153, 'X'), + (0x1B164, 'V'), + (0x1B168, 'X'), (0x1B170, 'V'), (0x1B2FC, 'X'), (0x1BC00, 'V'), @@ -6035,10 +6125,6 @@ def _seg_57(): (0x1D1C1, 'V'), (0x1D1E9, 'X'), (0x1D200, 'V'), - ] - -def _seg_58(): - return [ (0x1D246, 'X'), (0x1D2E0, 'V'), (0x1D2F4, 'X'), @@ -6053,6 +6139,10 @@ def _seg_58(): (0x1D404, 'M', u'e'), (0x1D405, 'M', u'f'), (0x1D406, 'M', u'g'), + ] + +def _seg_59(): + return [ (0x1D407, 'M', u'h'), (0x1D408, 'M', u'i'), (0x1D409, 'M', u'j'), @@ -6139,10 +6229,6 @@ def _seg_58(): (0x1D45A, 'M', u'm'), (0x1D45B, 'M', u'n'), (0x1D45C, 'M', u'o'), - ] - -def _seg_59(): - return [ (0x1D45D, 'M', u'p'), (0x1D45E, 'M', u'q'), (0x1D45F, 'M', u'r'), @@ -6157,6 +6243,10 @@ def _seg_59(): (0x1D468, 'M', u'a'), (0x1D469, 'M', u'b'), (0x1D46A, 'M', u'c'), + ] + +def _seg_60(): + return [ (0x1D46B, 'M', u'd'), (0x1D46C, 'M', u'e'), (0x1D46D, 'M', u'f'), @@ -6243,10 +6333,6 @@ def _seg_59(): (0x1D4C1, 'M', u'l'), (0x1D4C2, 'M', u'm'), (0x1D4C3, 'M', u'n'), - ] - -def _seg_60(): - return [ (0x1D4C4, 'X'), (0x1D4C5, 'M', u'p'), (0x1D4C6, 'M', u'q'), @@ -6261,6 +6347,10 @@ def _seg_60(): (0x1D4CF, 'M', u'z'), (0x1D4D0, 'M', u'a'), (0x1D4D1, 'M', u'b'), + ] + +def _seg_61(): + return [ (0x1D4D2, 'M', u'c'), (0x1D4D3, 'M', u'd'), (0x1D4D4, 'M', u'e'), @@ -6347,10 +6437,6 @@ def _seg_60(): (0x1D526, 'M', u'i'), (0x1D527, 'M', u'j'), (0x1D528, 'M', u'k'), - ] - -def _seg_61(): - return [ (0x1D529, 'M', u'l'), (0x1D52A, 'M', u'm'), (0x1D52B, 'M', u'n'), @@ -6365,6 +6451,10 @@ def _seg_61(): (0x1D534, 'M', u'w'), (0x1D535, 'M', u'x'), (0x1D536, 'M', u'y'), + ] + +def _seg_62(): + return [ (0x1D537, 'M', u'z'), (0x1D538, 'M', u'a'), (0x1D539, 'M', u'b'), @@ -6451,10 +6541,6 @@ def _seg_61(): (0x1D58C, 'M', u'g'), (0x1D58D, 'M', u'h'), (0x1D58E, 'M', u'i'), - ] - -def _seg_62(): - return [ (0x1D58F, 'M', u'j'), (0x1D590, 'M', u'k'), (0x1D591, 'M', u'l'), @@ -6469,6 +6555,10 @@ def _seg_62(): (0x1D59A, 'M', u'u'), (0x1D59B, 'M', u'v'), (0x1D59C, 'M', u'w'), + ] + +def _seg_63(): + return [ (0x1D59D, 'M', u'x'), (0x1D59E, 'M', u'y'), (0x1D59F, 'M', u'z'), @@ -6555,10 +6645,6 @@ def _seg_62(): (0x1D5F0, 'M', u'c'), (0x1D5F1, 'M', u'd'), (0x1D5F2, 'M', u'e'), - ] - -def _seg_63(): - return [ (0x1D5F3, 'M', u'f'), (0x1D5F4, 'M', u'g'), (0x1D5F5, 'M', u'h'), @@ -6573,6 +6659,10 @@ def _seg_63(): (0x1D5FE, 'M', u'q'), (0x1D5FF, 'M', u'r'), (0x1D600, 'M', u's'), + ] + +def _seg_64(): + return [ (0x1D601, 'M', u't'), (0x1D602, 'M', u'u'), (0x1D603, 'M', u'v'), @@ -6659,10 +6749,6 @@ def _seg_63(): (0x1D654, 'M', u'y'), (0x1D655, 'M', u'z'), (0x1D656, 'M', u'a'), - ] - -def _seg_64(): - return [ (0x1D657, 'M', u'b'), (0x1D658, 'M', u'c'), (0x1D659, 'M', u'd'), @@ -6677,6 +6763,10 @@ def _seg_64(): (0x1D662, 'M', u'm'), (0x1D663, 'M', u'n'), (0x1D664, 'M', u'o'), + ] + +def _seg_65(): + return [ (0x1D665, 'M', u'p'), (0x1D666, 'M', u'q'), (0x1D667, 'M', u'r'), @@ -6763,10 +6853,6 @@ def _seg_64(): (0x1D6B9, 'M', u'θ'), (0x1D6BA, 'M', u'σ'), (0x1D6BB, 'M', u'τ'), - ] - -def _seg_65(): - return [ (0x1D6BC, 'M', u'υ'), (0x1D6BD, 'M', u'φ'), (0x1D6BE, 'M', u'χ'), @@ -6781,6 +6867,10 @@ def _seg_65(): (0x1D6C7, 'M', u'ζ'), (0x1D6C8, 'M', u'η'), (0x1D6C9, 'M', u'θ'), + ] + +def _seg_66(): + return [ (0x1D6CA, 'M', u'ι'), (0x1D6CB, 'M', u'κ'), (0x1D6CC, 'M', u'λ'), @@ -6867,10 +6957,6 @@ def _seg_65(): (0x1D71F, 'M', u'δ'), (0x1D720, 'M', u'ε'), (0x1D721, 'M', u'ζ'), - ] - -def _seg_66(): - return [ (0x1D722, 'M', u'η'), (0x1D723, 'M', u'θ'), (0x1D724, 'M', u'ι'), @@ -6885,6 +6971,10 @@ def _seg_66(): (0x1D72D, 'M', u'θ'), (0x1D72E, 'M', u'σ'), (0x1D72F, 'M', u'τ'), + ] + +def _seg_67(): + return [ (0x1D730, 'M', u'υ'), (0x1D731, 'M', u'φ'), (0x1D732, 'M', u'χ'), @@ -6971,10 +7061,6 @@ def _seg_66(): (0x1D785, 'M', u'φ'), (0x1D786, 'M', u'χ'), (0x1D787, 'M', u'ψ'), - ] - -def _seg_67(): - return [ (0x1D788, 'M', u'ω'), (0x1D789, 'M', u'∂'), (0x1D78A, 'M', u'ε'), @@ -6989,6 +7075,10 @@ def _seg_67(): (0x1D793, 'M', u'δ'), (0x1D794, 'M', u'ε'), (0x1D795, 'M', u'ζ'), + ] + +def _seg_68(): + return [ (0x1D796, 'M', u'η'), (0x1D797, 'M', u'θ'), (0x1D798, 'M', u'ι'), @@ -7075,10 +7165,6 @@ def _seg_67(): (0x1D7EC, 'M', u'0'), (0x1D7ED, 'M', u'1'), (0x1D7EE, 'M', u'2'), - ] - -def _seg_68(): - return [ (0x1D7EF, 'M', u'3'), (0x1D7F0, 'M', u'4'), (0x1D7F1, 'M', u'5'), @@ -7093,6 +7179,10 @@ def _seg_68(): (0x1D7FA, 'M', u'4'), (0x1D7FB, 'M', u'5'), (0x1D7FC, 'M', u'6'), + ] + +def _seg_69(): + return [ (0x1D7FD, 'M', u'7'), (0x1D7FE, 'M', u'8'), (0x1D7FF, 'M', u'9'), @@ -7112,6 +7202,18 @@ def _seg_68(): (0x1E025, 'X'), (0x1E026, 'V'), (0x1E02B, 'X'), + (0x1E100, 'V'), + (0x1E12D, 'X'), + (0x1E130, 'V'), + (0x1E13E, 'X'), + (0x1E140, 'V'), + (0x1E14A, 'X'), + (0x1E14E, 'V'), + (0x1E150, 'X'), + (0x1E2C0, 'V'), + (0x1E2FA, 'X'), + (0x1E2FF, 'V'), + (0x1E300, 'X'), (0x1E800, 'V'), (0x1E8C5, 'X'), (0x1E8C7, 'V'), @@ -7151,13 +7253,15 @@ def _seg_68(): (0x1E920, 'M', u'𞥂'), (0x1E921, 'M', u'𞥃'), (0x1E922, 'V'), - (0x1E94B, 'X'), + (0x1E94C, 'X'), (0x1E950, 'V'), (0x1E95A, 'X'), (0x1E95E, 'V'), (0x1E960, 'X'), (0x1EC71, 'V'), (0x1ECB5, 'X'), + (0x1ED01, 'V'), + (0x1ED3E, 'X'), (0x1EE00, 'M', u'ا'), (0x1EE01, 'M', u'ب'), (0x1EE02, 'M', u'ج'), @@ -7181,7 +7285,7 @@ def _seg_68(): (0x1EE14, 'M', u'ش'), ] -def _seg_69(): +def _seg_70(): return [ (0x1EE15, 'M', u'ت'), (0x1EE16, 'M', u'ث'), @@ -7285,7 +7389,7 @@ def _seg_69(): (0x1EE83, 'M', u'د'), ] -def _seg_70(): +def _seg_71(): return [ (0x1EE84, 'M', u'ه'), (0x1EE85, 'M', u'و'), @@ -7389,7 +7493,7 @@ def _seg_70(): (0x1F124, '3', u'(u)'), ] -def _seg_71(): +def _seg_72(): return [ (0x1F125, '3', u'(v)'), (0x1F126, '3', u'(w)'), @@ -7437,7 +7541,8 @@ def _seg_71(): (0x1F150, 'V'), (0x1F16A, 'M', u'mc'), (0x1F16B, 'M', u'md'), - (0x1F16C, 'X'), + (0x1F16C, 'M', u'mr'), + (0x1F16D, 'X'), (0x1F170, 'V'), (0x1F190, 'M', u'dj'), (0x1F191, 'V'), @@ -7490,11 +7595,11 @@ def _seg_71(): (0x1F238, 'M', u'申'), (0x1F239, 'M', u'割'), (0x1F23A, 'M', u'営'), - (0x1F23B, 'M', u'配'), ] -def _seg_72(): +def _seg_73(): return [ + (0x1F23B, 'M', u'配'), (0x1F23C, 'X'), (0x1F240, 'M', u'〔本〕'), (0x1F241, 'M', u'〔三〕'), @@ -7512,15 +7617,17 @@ def _seg_72(): (0x1F260, 'V'), (0x1F266, 'X'), (0x1F300, 'V'), - (0x1F6D5, 'X'), + (0x1F6D6, 'X'), (0x1F6E0, 'V'), (0x1F6ED, 'X'), (0x1F6F0, 'V'), - (0x1F6FA, 'X'), + (0x1F6FB, 'X'), (0x1F700, 'V'), (0x1F774, 'X'), (0x1F780, 'V'), (0x1F7D9, 'X'), + (0x1F7E0, 'V'), + (0x1F7EC, 'X'), (0x1F800, 'V'), (0x1F80C, 'X'), (0x1F810, 'V'), @@ -7533,24 +7640,28 @@ def _seg_72(): (0x1F8AE, 'X'), (0x1F900, 'V'), (0x1F90C, 'X'), - (0x1F910, 'V'), - (0x1F93F, 'X'), - (0x1F940, 'V'), - (0x1F971, 'X'), + (0x1F90D, 'V'), + (0x1F972, 'X'), (0x1F973, 'V'), (0x1F977, 'X'), (0x1F97A, 'V'), - (0x1F97B, 'X'), - (0x1F97C, 'V'), (0x1F9A3, 'X'), - (0x1F9B0, 'V'), - (0x1F9BA, 'X'), - (0x1F9C0, 'V'), - (0x1F9C3, 'X'), - (0x1F9D0, 'V'), - (0x1FA00, 'X'), + (0x1F9A5, 'V'), + (0x1F9AB, 'X'), + (0x1F9AE, 'V'), + (0x1F9CB, 'X'), + (0x1F9CD, 'V'), + (0x1FA54, 'X'), (0x1FA60, 'V'), (0x1FA6E, 'X'), + (0x1FA70, 'V'), + (0x1FA74, 'X'), + (0x1FA78, 'V'), + (0x1FA7B, 'X'), + (0x1FA80, 'V'), + (0x1FA83, 'X'), + (0x1FA90, 'V'), + (0x1FA96, 'X'), (0x20000, 'V'), (0x2A6D7, 'X'), (0x2A700, 'V'), @@ -7588,6 +7699,10 @@ def _seg_72(): (0x2F818, 'M', u'冤'), (0x2F819, 'M', u'仌'), (0x2F81A, 'M', u'冬'), + ] + +def _seg_74(): + return [ (0x2F81B, 'M', u'况'), (0x2F81C, 'M', u'𩇟'), (0x2F81D, 'M', u'凵'), @@ -7595,10 +7710,6 @@ def _seg_72(): (0x2F81F, 'M', u'㓟'), (0x2F820, 'M', u'刻'), (0x2F821, 'M', u'剆'), - ] - -def _seg_73(): - return [ (0x2F822, 'M', u'割'), (0x2F823, 'M', u'剷'), (0x2F824, 'M', u'㔕'), @@ -7692,6 +7803,10 @@ def _seg_73(): (0x2F880, 'M', u'嵼'), (0x2F881, 'M', u'巡'), (0x2F882, 'M', u'巢'), + ] + +def _seg_75(): + return [ (0x2F883, 'M', u'㠯'), (0x2F884, 'M', u'巽'), (0x2F885, 'M', u'帨'), @@ -7699,10 +7814,6 @@ def _seg_73(): (0x2F887, 'M', u'幩'), (0x2F888, 'M', u'㡢'), (0x2F889, 'M', u'𢆃'), - ] - -def _seg_74(): - return [ (0x2F88A, 'M', u'㡼'), (0x2F88B, 'M', u'庰'), (0x2F88C, 'M', u'庳'), @@ -7796,6 +7907,10 @@ def _seg_74(): (0x2F8E6, 'M', u'椔'), (0x2F8E7, 'M', u'㮝'), (0x2F8E8, 'M', u'楂'), + ] + +def _seg_76(): + return [ (0x2F8E9, 'M', u'榣'), (0x2F8EA, 'M', u'槪'), (0x2F8EB, 'M', u'檨'), @@ -7803,10 +7918,6 @@ def _seg_74(): (0x2F8ED, 'M', u'櫛'), (0x2F8EE, 'M', u'㰘'), (0x2F8EF, 'M', u'次'), - ] - -def _seg_75(): - return [ (0x2F8F0, 'M', u'𣢧'), (0x2F8F1, 'M', u'歔'), (0x2F8F2, 'M', u'㱎'), @@ -7900,6 +8011,10 @@ def _seg_75(): (0x2F94C, 'M', u'䂖'), (0x2F94D, 'M', u'𥐝'), (0x2F94E, 'M', u'硎'), + ] + +def _seg_77(): + return [ (0x2F94F, 'M', u'碌'), (0x2F950, 'M', u'磌'), (0x2F951, 'M', u'䃣'), @@ -7907,10 +8022,6 @@ def _seg_75(): (0x2F953, 'M', u'祖'), (0x2F954, 'M', u'𥚚'), (0x2F955, 'M', u'𥛅'), - ] - -def _seg_76(): - return [ (0x2F956, 'M', u'福'), (0x2F957, 'M', u'秫'), (0x2F958, 'M', u'䄯'), @@ -8004,6 +8115,10 @@ def _seg_76(): (0x2F9B1, 'M', u'𧃒'), (0x2F9B2, 'M', u'䕫'), (0x2F9B3, 'M', u'虐'), + ] + +def _seg_78(): + return [ (0x2F9B4, 'M', u'虜'), (0x2F9B5, 'M', u'虧'), (0x2F9B6, 'M', u'虩'), @@ -8011,10 +8126,6 @@ def _seg_76(): (0x2F9B8, 'M', u'蚈'), (0x2F9B9, 'M', u'蜎'), (0x2F9BA, 'M', u'蛢'), - ] - -def _seg_77(): - return [ (0x2F9BB, 'M', u'蝹'), (0x2F9BC, 'M', u'蜨'), (0x2F9BD, 'M', u'蝫'), @@ -8108,6 +8219,10 @@ def _seg_77(): (0x2FA16, 'M', u'䵖'), (0x2FA17, 'M', u'黹'), (0x2FA18, 'M', u'黾'), + ] + +def _seg_79(): + return [ (0x2FA19, 'M', u'鼅'), (0x2FA1A, 'M', u'鼏'), (0x2FA1B, 'M', u'鼖'), @@ -8115,10 +8230,6 @@ def _seg_77(): (0x2FA1D, 'M', u'𪘀'), (0x2FA1E, 'X'), (0xE0100, 'I'), - ] - -def _seg_78(): - return [ (0xE01F0, 'X'), ] @@ -8202,4 +8313,5 @@ def _seg_78(): + _seg_76() + _seg_77() + _seg_78() + + _seg_79() ) diff --git a/pipenv/vendor/importlib_metadata/__init__.py b/pipenv/vendor/importlib_metadata/__init__.py index 6da7fd2cd4..089fca97de 100644 --- a/pipenv/vendor/importlib_metadata/__init__.py +++ b/pipenv/vendor/importlib_metadata/__init__.py @@ -10,6 +10,7 @@ import operator import functools import itertools +import posixpath import collections from ._compat import ( @@ -23,7 +24,6 @@ NotADirectoryError, PermissionError, pathlib, - PYPY_OPEN_BUG, ModuleNotFoundError, MetaPathFinder, email_message_from_string, @@ -389,10 +389,6 @@ def path(self): """ return vars(self).get('path', sys.path) - @property - def pattern(self): - return '.*' if self.name is None else re.escape(self.name) - @abc.abstractmethod def find_distributions(self, context=Context()): """ @@ -404,6 +400,75 @@ def find_distributions(self, context=Context()): """ +class FastPath: + """ + Micro-optimized class for searching a path for + children. + """ + + def __init__(self, root): + self.root = root + self.base = os.path.basename(root).lower() + + def joinpath(self, child): + return pathlib.Path(self.root, child) + + def children(self): + with suppress(Exception): + return os.listdir(self.root or '') + with suppress(Exception): + return self.zip_children() + return [] + + def zip_children(self): + zip_path = zipp.Path(self.root) + names = zip_path.root.namelist() + self.joinpath = zip_path.joinpath + + return ( + posixpath.split(child)[0] + for child in names + ) + + def is_egg(self, search): + base = self.base + return ( + base == search.versionless_egg_name + or base.startswith(search.prefix) + and base.endswith('.egg')) + + def search(self, name): + for child in self.children(): + n_low = child.lower() + if (n_low in name.exact_matches + or n_low.startswith(name.prefix) + and n_low.endswith(name.suffixes) + # legacy case: + or self.is_egg(name) and n_low == 'egg-info'): + yield self.joinpath(child) + + +class Prepared: + """ + A prepared search for metadata on a possibly-named package. + """ + normalized = '' + prefix = '' + suffixes = '.dist-info', '.egg-info' + exact_matches = [''][:0] + versionless_egg_name = '' + + def __init__(self, name): + self.name = name + if name is None: + return + self.normalized = name.lower().replace('-', '_') + self.prefix = self.normalized + '-' + self.exact_matches = [ + self.normalized + suffix for suffix in self.suffixes] + self.versionless_egg_name = self.normalized + '.egg' + + @install class MetadataPathFinder(NullFinder, DistributionFinder): """A degenerate finder for distribution packages on the file system. @@ -421,45 +486,17 @@ def find_distributions(self, context=DistributionFinder.Context()): (or all names if ``None`` indicated) along the paths in the list of directories ``context.path``. """ - found = self._search_paths(context.pattern, context.path) + found = self._search_paths(context.name, context.path) return map(PathDistribution, found) @classmethod - def _search_paths(cls, pattern, paths): + def _search_paths(cls, name, paths): """Find metadata directories in paths heuristically.""" return itertools.chain.from_iterable( - cls._search_path(path, pattern) - for path in map(cls._switch_path, paths) + path.search(Prepared(name)) + for path in map(FastPath, paths) ) - @staticmethod - def _switch_path(path): - if not PYPY_OPEN_BUG or os.path.isfile(path): # pragma: no branch - with suppress(Exception): - return zipp.Path(path) - return pathlib.Path(path) - - @classmethod - def _matches_info(cls, normalized, item): - template = r'{pattern}(-.*)?\.(dist|egg)-info' - manifest = template.format(pattern=normalized) - return re.match(manifest, item.name, flags=re.IGNORECASE) - - @classmethod - def _matches_legacy(cls, normalized, item): - template = r'{pattern}-.*\.egg[\\/]EGG-INFO' - manifest = template.format(pattern=normalized) - return re.search(manifest, str(item), flags=re.IGNORECASE) - - @classmethod - def _search_path(cls, root, pattern): - if not root.is_dir(): - return () - normalized = pattern.replace('-', '_') - return (item for item in root.iterdir() - if cls._matches_info(normalized, item) - or cls._matches_legacy(normalized, item)) - class PathDistribution(Distribution): def __init__(self, path): diff --git a/pipenv/vendor/importlib_metadata/_compat.py b/pipenv/vendor/importlib_metadata/_compat.py index 3fd65ffddd..99b4005eee 100644 --- a/pipenv/vendor/importlib_metadata/_compat.py +++ b/pipenv/vendor/importlib_metadata/_compat.py @@ -9,7 +9,7 @@ if sys.version_info > (3,): # pragma: nocover import builtins from configparser import ConfigParser - from contextlib import suppress + import contextlib FileNotFoundError = builtins.FileNotFoundError IsADirectoryError = builtins.IsADirectoryError NotADirectoryError = builtins.NotADirectoryError @@ -18,12 +18,14 @@ else: # pragma: nocover from backports.configparser import ConfigParser from itertools import imap as map # type: ignore - from contextlib2 import suppress # noqa + import contextlib2 as contextlib FileNotFoundError = IOError, OSError IsADirectoryError = IOError, OSError NotADirectoryError = IOError, OSError PermissionError = IOError, OSError +suppress = contextlib.suppress + if sys.version_info > (3, 5): # pragma: nocover import pathlib else: # pragma: nocover @@ -73,7 +75,7 @@ def disable_stdlib_finder(): """ def matches(finder): return ( - finder.__module__ == '_frozen_importlib_external' + getattr(finder, '__module__', None) == '_frozen_importlib_external' and hasattr(finder, 'find_distributions') ) for finder in filter(matches, sys.meta_path): # pragma: nocover @@ -111,9 +113,6 @@ def py2_message_from_string(text): # nocoverpy3 email.message_from_string ) -# https://bitbucket.org/pypy/pypy/issues/3021/ioopen-directory-leaks-a-file-descriptor -PYPY_OPEN_BUG = getattr(sys, 'pypy_version_info', (9, 9, 9))[:3] <= (7, 1, 1) - class PyPy_repr: """ diff --git a/pipenv/vendor/importlib_metadata/docs/changelog.rst b/pipenv/vendor/importlib_metadata/docs/changelog.rst index d38b36f21f..9dbaf96af0 100644 --- a/pipenv/vendor/importlib_metadata/docs/changelog.rst +++ b/pipenv/vendor/importlib_metadata/docs/changelog.rst @@ -2,6 +2,33 @@ importlib_metadata NEWS ========================= +v1.5.1 +====== + +* Improve reliability and consistency of compatibility + imports for contextlib and pathlib when running tests. + Closes #116. + +v1.5.0 +====== + +* Additional performance optimizations in FastPath now + saves an additional 20% on a typical call. +* Correct for issue where PyOxidizer finder has no + ``__module__`` attribute. Closes #110. + +v1.4.0 +====== + +* Through careful optimization, ``distribution()`` is + 3-4x faster. Thanks to Antony Lee for the + contribution. Closes #95. + +* When searching through ``sys.path``, if any error + occurs attempting to list a path entry, that entry + is skipped, making the system much more lenient + to errors. Closes #94. + v1.3.0 ====== diff --git a/pipenv/vendor/importlib_metadata/docs/conf.py b/pipenv/vendor/importlib_metadata/docs/conf.py index af9f0e2667..129a7a4eae 100644 --- a/pipenv/vendor/importlib_metadata/docs/conf.py +++ b/pipenv/vendor/importlib_metadata/docs/conf.py @@ -166,6 +166,9 @@ # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { 'python': ('https://docs.python.org/3', None), + 'importlib_resources': ( + 'https://importlib-resources.readthedocs.io/en/latest/', None + ), } diff --git a/pipenv/vendor/importlib_metadata/docs/index.rst b/pipenv/vendor/importlib_metadata/docs/index.rst index 91e815c0c4..530197cf18 100644 --- a/pipenv/vendor/importlib_metadata/docs/index.rst +++ b/pipenv/vendor/importlib_metadata/docs/index.rst @@ -3,15 +3,15 @@ =============================== ``importlib_metadata`` is a library which provides an API for accessing an -installed package's `metadata`_, such as its entry points or its top-level +installed package's metadata (see :pep:`566`), such as its entry points or its top-level name. This functionality intends to replace most uses of ``pkg_resources`` -`entry point API`_ and `metadata API`_. Along with ``importlib.resources`` in -`Python 3.7 and newer`_ (backported as `importlib_resources`_ for older +`entry point API`_ and `metadata API`_. Along with :mod:`importlib.resources` in +Python 3.7 and newer (backported as :doc:`importlib_resources <importlib_resources:index>` for older versions of Python), this can eliminate the need to use the older and less efficient ``pkg_resources`` package. ``importlib_metadata`` is a backport of Python 3.8's standard library -`importlib.metadata`_ module for Python 2.7, and 3.4 through 3.7. Users of +:doc:`importlib.metadata <library/importlib.metadata>` module for Python 2.7, and 3.4 through 3.7. Users of Python 3.8 and beyond are encouraged to use the standard library module. When imported on Python 3.8 and later, ``importlib_metadata`` replaces the DistributionFinder behavior from the stdlib, but leaves the API in tact. @@ -46,9 +46,5 @@ Indices and tables * :ref:`search` -.. _`metadata`: https://www.python.org/dev/peps/pep-0566/ .. _`entry point API`: https://setuptools.readthedocs.io/en/latest/pkg_resources.html#entry-points .. _`metadata API`: https://setuptools.readthedocs.io/en/latest/pkg_resources.html#metadata-api -.. _`Python 3.7 and newer`: https://docs.python.org/3/library/importlib.html#module-importlib.resources -.. _`importlib_resources`: https://importlib-resources.readthedocs.io/en/latest/index.html -.. _`importlib.metadata`: https://docs.python.org/3/library/importlib.metadata.html diff --git a/pipenv/vendor/importlib_metadata/docs/using.rst b/pipenv/vendor/importlib_metadata/docs/using.rst index bd73339489..6da2bb1c21 100644 --- a/pipenv/vendor/importlib_metadata/docs/using.rst +++ b/pipenv/vendor/importlib_metadata/docs/using.rst @@ -1,15 +1,15 @@ .. _using: -========================== - Using importlib_metadata -========================== +================================= + Using :mod:`!importlib_metadata` +================================= ``importlib_metadata`` is a library that provides for access to installed package metadata. Built in part on Python's import system, this library intends to replace similar functionality in the `entry point API`_ and `metadata API`_ of ``pkg_resources``. Along with -``importlib.resources`` in `Python 3.7 -and newer`_ (backported as `importlib_resources`_ for older versions of +:mod:`importlib.resources` in Python 3.7 +and newer (backported as :doc:`importlib_resources <importlib_resources:index>` for older versions of Python), this can eliminate the need to use the older and less efficient ``pkg_resources`` package. @@ -17,9 +17,9 @@ By "installed package" we generally mean a third-party package installed into Python's ``site-packages`` directory via tools such as `pip <https://pypi.org/project/pip/>`_. Specifically, it means a package with either a discoverable ``dist-info`` or ``egg-info`` -directory, and metadata defined by `PEP 566`_ or its older specifications. +directory, and metadata defined by :pep:`566` or its older specifications. By default, package metadata can live on the file system or in zip archives on -``sys.path``. Through an extension mechanism, the metadata can live almost +:data:`sys.path`. Through an extension mechanism, the metadata can live almost anywhere. @@ -127,7 +127,7 @@ Distribution files You can also get the full set of files contained within a distribution. The ``files()`` function takes a distribution package name and returns all of the files installed by this distribution. Each file object returned is a -``PackagePath``, a `pathlib.Path`_ derived object with additional ``dist``, +``PackagePath``, a :class:`pathlib.Path` derived object with additional ``dist``, ``size``, and ``hash`` properties as indicated by the metadata. For example:: >>> util = [p for p in files('wheel') if 'util.py' in str(p)][0] @@ -196,18 +196,18 @@ instance:: >>> d.metadata['License'] 'MIT' -The full set of available metadata is not described here. See `PEP 566 -<https://www.python.org/dev/peps/pep-0566/>`_ for additional details. +The full set of available metadata is not described here. See :pep:`566` +for additional details. Extending the search algorithm ============================== -Because package metadata is not available through ``sys.path`` searches, or +Because package metadata is not available through :data:`sys.path` searches, or package loaders directly, the metadata for a package is found through import system `finders`_. To find a distribution package's metadata, -``importlib_metadata`` queries the list of `meta path finders`_ on -`sys.meta_path`_. +``importlib.metadata`` queries the list of :term:`meta path finders <meta path finder>` on +:data:`sys.meta_path`. By default ``importlib_metadata`` installs a finder for distribution packages found on the file system. This finder doesn't actually find any *packages*, @@ -217,7 +217,7 @@ The abstract class :py:class:`importlib.abc.MetaPathFinder` defines the interface expected of finders by Python's import system. ``importlib_metadata`` extends this protocol by looking for an optional ``find_distributions`` callable on the finders from -``sys.meta_path`` and presents this extended interface as the +:data:`sys.meta_path` and presents this extended interface as the ``DistributionFinder`` abstract base class, which defines this abstract method:: @@ -240,20 +240,13 @@ a custom finder, return instances of this derived ``Distribution`` in the .. _`entry point API`: https://setuptools.readthedocs.io/en/latest/pkg_resources.html#entry-points .. _`metadata API`: https://setuptools.readthedocs.io/en/latest/pkg_resources.html#metadata-api -.. _`Python 3.7 and newer`: https://docs.python.org/3/library/importlib.html#module-importlib.resources -.. _`importlib_resources`: https://importlib-resources.readthedocs.io/en/latest/index.html -.. _`PEP 566`: https://www.python.org/dev/peps/pep-0566/ .. _`finders`: https://docs.python.org/3/reference/import.html#finders-and-loaders -.. _`meta path finders`: https://docs.python.org/3/glossary.html#term-meta-path-finder -.. _`sys.meta_path`: https://docs.python.org/3/library/sys.html#sys.meta_path -.. _`pathlib.Path`: https://docs.python.org/3/library/pathlib.html#pathlib.Path .. rubric:: Footnotes .. [#f1] Technically, the returned distribution metadata object is an - `email.message.Message - <https://docs.python.org/3/library/email.message.html#email.message.EmailMessage>`_ + :class:`email.message.EmailMessage` instance, but this is an implementation detail, and not part of the stable API. You should only use dictionary-like methods and syntax to access the metadata contents. diff --git a/pipenv/vendor/importlib_metadata/tests/fixtures.py b/pipenv/vendor/importlib_metadata/tests/fixtures.py index 0b4ce18d5a..218b699c0a 100644 --- a/pipenv/vendor/importlib_metadata/tests/fixtures.py +++ b/pipenv/vendor/importlib_metadata/tests/fixtures.py @@ -5,17 +5,8 @@ import shutil import tempfile import textwrap -import contextlib -try: - from contextlib import ExitStack -except ImportError: - from contextlib2 import ExitStack - -try: - import pathlib -except ImportError: - import pathlib2 as pathlib +from .._compat import pathlib, contextlib __metaclass__ = type @@ -47,14 +38,28 @@ def tempdir_as_cwd(): yield tmp -class SiteDir: +@contextlib.contextmanager +def install_finder(finder): + sys.meta_path.append(finder) + try: + yield + finally: + sys.meta_path.remove(finder) + + +class Fixtures: def setUp(self): - self.fixtures = ExitStack() + self.fixtures = contextlib.ExitStack() self.addCleanup(self.fixtures.close) + + +class SiteDir(Fixtures): + def setUp(self): + super(SiteDir, self).setUp() self.site_dir = self.fixtures.enter_context(tempdir()) -class OnSysPath: +class OnSysPath(Fixtures): @staticmethod @contextlib.contextmanager def add_sys_path(dir): @@ -198,3 +203,8 @@ def build_files(file_defs, prefix=pathlib.Path()): def DALS(str): "Dedent and left-strip" return textwrap.dedent(str).lstrip() + + +class NullFinder: + def find_module(self, name): + pass diff --git a/pipenv/vendor/importlib_metadata/tests/test_integration.py b/pipenv/vendor/importlib_metadata/tests/test_integration.py index 11ed7dc865..c881927d55 100644 --- a/pipenv/vendor/importlib_metadata/tests/test_integration.py +++ b/pipenv/vendor/importlib_metadata/tests/test_integration.py @@ -3,7 +3,10 @@ import packaging.version from . import fixtures -from .. import version +from .. import ( + _compat, + version, + ) class IntegrationTests(fixtures.DistInfoPkg, unittest.TestCase): @@ -20,3 +23,20 @@ def is_installed(package_spec): assert is_installed('distinfo-pkg==1.0') assert is_installed('distinfo-pkg>=1.0,<2.0') assert not is_installed('distinfo-pkg<1.0') + + +class FinderTests(fixtures.Fixtures, unittest.TestCase): + + def test_finder_without_module(self): + class ModuleFreeFinder(fixtures.NullFinder): + """ + A finder without an __module__ attribute + """ + def __getattribute__(self, name): + if name == '__module__': + raise AttributeError(name) + return super().__getattribute__(name) + + self.fixtures.enter_context( + fixtures.install_finder(ModuleFreeFinder())) + _compat.disable_stdlib_finder() diff --git a/pipenv/vendor/importlib_metadata/tests/test_main.py b/pipenv/vendor/importlib_metadata/tests/test_main.py index cc2efdace1..131edceab0 100644 --- a/pipenv/vendor/importlib_metadata/tests/test_main.py +++ b/pipenv/vendor/importlib_metadata/tests/test_main.py @@ -8,6 +8,7 @@ import unittest import importlib import importlib_metadata +import pyfakefs.fake_filesystem_unittest as ffs from . import fixtures from .. import ( @@ -193,6 +194,33 @@ def test_egg(self): version('foo') +class MissingSysPath(fixtures.OnSysPath, unittest.TestCase): + site_dir = '/does-not-exist' + + def test_discovery(self): + """ + Discovering distributions should succeed even if + there is an invalid path on sys.path. + """ + importlib_metadata.distributions() + + +class InaccessibleSysPath(fixtures.OnSysPath, ffs.TestCase): + site_dir = '/access-denied' + + def setUp(self): + super(InaccessibleSysPath, self).setUp() + self.setUpPyfakefs() + self.fs.create_dir(self.site_dir, perm_bits=000) + + def test_discovery(self): + """ + Discovering distributions should succeed even if + there is an invalid path on sys.path. + """ + list(importlib_metadata.distributions()) + + class TestEntryPoints(unittest.TestCase): def __init__(self, *args): super(TestEntryPoints, self).__init__(*args) diff --git a/pipenv/vendor/jinja2/__init__.py b/pipenv/vendor/jinja2/__init__.py index 0eaf721499..7f4a1c55a8 100644 --- a/pipenv/vendor/jinja2/__init__.py +++ b/pipenv/vendor/jinja2/__init__.py @@ -1,83 +1,44 @@ # -*- coding: utf-8 -*- +"""Jinja is a template engine written in pure Python. It provides a +non-XML syntax that supports inline expressions and an optional +sandboxed environment. """ - jinja2 - ~~~~~~ - - Jinja2 is a template engine written in pure Python. It provides a - Django inspired non-XML syntax but supports inline expressions and - an optional sandboxed environment. - - Nutshell - -------- - - Here a small example of a Jinja2 template:: - - {% extends 'base.html' %} - {% block title %}Memberlist{% endblock %} - {% block content %} - <ul> - {% for user in users %} - <li><a href="{{ user.url }}">{{ user.username }}</a></li> - {% endfor %} - </ul> - {% endblock %} - - - :copyright: (c) 2017 by the Jinja Team. - :license: BSD, see LICENSE for more details. -""" -__docformat__ = 'restructuredtext en' -__version__ = "2.10.3" - -# high level interface -from jinja2.environment import Environment, Template - -# loaders -from jinja2.loaders import BaseLoader, FileSystemLoader, PackageLoader, \ - DictLoader, FunctionLoader, PrefixLoader, ChoiceLoader, \ - ModuleLoader - -# bytecode caches -from jinja2.bccache import BytecodeCache, FileSystemBytecodeCache, \ - MemcachedBytecodeCache - -# undefined types -from jinja2.runtime import Undefined, DebugUndefined, StrictUndefined, \ - make_logging_undefined - -# exceptions -from jinja2.exceptions import TemplateError, UndefinedError, \ - TemplateNotFound, TemplatesNotFound, TemplateSyntaxError, \ - TemplateAssertionError, TemplateRuntimeError - -# decorators and public utilities -from jinja2.filters import environmentfilter, contextfilter, \ - evalcontextfilter -from jinja2.utils import Markup, escape, clear_caches, \ - environmentfunction, evalcontextfunction, contextfunction, \ - is_undefined, select_autoescape - -__all__ = [ - 'Environment', 'Template', 'BaseLoader', 'FileSystemLoader', - 'PackageLoader', 'DictLoader', 'FunctionLoader', 'PrefixLoader', - 'ChoiceLoader', 'BytecodeCache', 'FileSystemBytecodeCache', - 'MemcachedBytecodeCache', 'Undefined', 'DebugUndefined', - 'StrictUndefined', 'TemplateError', 'UndefinedError', 'TemplateNotFound', - 'TemplatesNotFound', 'TemplateSyntaxError', 'TemplateAssertionError', - 'TemplateRuntimeError', - 'ModuleLoader', 'environmentfilter', 'contextfilter', 'Markup', 'escape', - 'environmentfunction', 'contextfunction', 'clear_caches', 'is_undefined', - 'evalcontextfilter', 'evalcontextfunction', 'make_logging_undefined', - 'select_autoescape', -] - - -def _patch_async(): - from jinja2.utils import have_async_gen - if have_async_gen: - from jinja2.asyncsupport import patch_all - patch_all() - - -_patch_async() -del _patch_async +from markupsafe import escape +from markupsafe import Markup + +from .bccache import BytecodeCache +from .bccache import FileSystemBytecodeCache +from .bccache import MemcachedBytecodeCache +from .environment import Environment +from .environment import Template +from .exceptions import TemplateAssertionError +from .exceptions import TemplateError +from .exceptions import TemplateNotFound +from .exceptions import TemplateRuntimeError +from .exceptions import TemplatesNotFound +from .exceptions import TemplateSyntaxError +from .exceptions import UndefinedError +from .filters import contextfilter +from .filters import environmentfilter +from .filters import evalcontextfilter +from .loaders import BaseLoader +from .loaders import ChoiceLoader +from .loaders import DictLoader +from .loaders import FileSystemLoader +from .loaders import FunctionLoader +from .loaders import ModuleLoader +from .loaders import PackageLoader +from .loaders import PrefixLoader +from .runtime import ChainableUndefined +from .runtime import DebugUndefined +from .runtime import make_logging_undefined +from .runtime import StrictUndefined +from .runtime import Undefined +from .utils import clear_caches +from .utils import contextfunction +from .utils import environmentfunction +from .utils import evalcontextfunction +from .utils import is_undefined +from .utils import select_autoescape + +__version__ = "2.11.1" diff --git a/pipenv/vendor/jinja2/_compat.py b/pipenv/vendor/jinja2/_compat.py index 4dbf6ea039..1f044954a0 100644 --- a/pipenv/vendor/jinja2/_compat.py +++ b/pipenv/vendor/jinja2/_compat.py @@ -1,22 +1,12 @@ # -*- coding: utf-8 -*- -""" - jinja2._compat - ~~~~~~~~~~~~~~ - - Some py2/py3 compatibility support based on a stripped down - version of six so we don't have to depend on a specific version - of it. - - :copyright: Copyright 2013 by the Jinja team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" +# flake8: noqa +import marshal import sys PY2 = sys.version_info[0] == 2 -PYPY = hasattr(sys, 'pypy_translation_info') +PYPY = hasattr(sys, "pypy_translation_info") _identity = lambda x: x - if not PY2: unichr = chr range_type = range @@ -30,6 +20,7 @@ import pickle from io import BytesIO, StringIO + NativeStringIO = StringIO def reraise(tp, value, tb=None): @@ -46,6 +37,9 @@ def reraise(tp, value, tb=None): implements_to_string = _identity encode_filename = _identity + marshal_dump = marshal.dump + marshal_load = marshal.load + else: unichr = unichr text_type = unicode @@ -59,11 +53,13 @@ def reraise(tp, value, tb=None): import cPickle as pickle from cStringIO import StringIO as BytesIO, StringIO + NativeStringIO = BytesIO - exec('def reraise(tp, value, tb=None):\n raise tp, value, tb') + exec("def reraise(tp, value, tb=None):\n raise tp, value, tb") from itertools import imap, izip, ifilter + intern = intern def implements_iterator(cls): @@ -73,14 +69,25 @@ def implements_iterator(cls): def implements_to_string(cls): cls.__unicode__ = cls.__str__ - cls.__str__ = lambda x: x.__unicode__().encode('utf-8') + cls.__str__ = lambda x: x.__unicode__().encode("utf-8") return cls def encode_filename(filename): if isinstance(filename, unicode): - return filename.encode('utf-8') + return filename.encode("utf-8") return filename + def marshal_dump(code, f): + if isinstance(f, file): + marshal.dump(code, f) + else: + f.write(marshal.dumps(code)) + + def marshal_load(f): + if isinstance(f, file): + return marshal.load(f) + return marshal.loads(f.read()) + def with_metaclass(meta, *bases): """Create a base class with a metaclass.""" @@ -90,7 +97,8 @@ def with_metaclass(meta, *bases): class metaclass(type): def __new__(cls, name, this_bases, d): return meta(name, bases, d) - return type.__new__(metaclass, 'temporary_class', (), {}) + + return type.__new__(metaclass, "temporary_class", (), {}) try: @@ -103,3 +111,22 @@ def __new__(cls, name, this_bases, d): from collections import abc except ImportError: import collections as abc + + +try: + from os import fspath +except ImportError: + try: + from pathlib import PurePath + except ImportError: + PurePath = None + + def fspath(path): + if hasattr(path, "__fspath__"): + return path.__fspath__() + + # Python 3.5 doesn't have __fspath__ yet, use str. + if PurePath is not None and isinstance(path, PurePath): + return str(path) + + return path diff --git a/pipenv/vendor/jinja2/_identifier.py b/pipenv/vendor/jinja2/_identifier.py index 2eac35d5c3..224d5449d1 100644 --- a/pipenv/vendor/jinja2/_identifier.py +++ b/pipenv/vendor/jinja2/_identifier.py @@ -1,2 +1,6 @@ +import re + # generated by scripts/generate_identifier_pattern.py -pattern = '·̀-ͯ·҃-֑҇-ׇֽֿׁׂׅׄؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ܑۭܰ-݊ަ-ް߫-߳ࠖ-࠙ࠛ-ࠣࠥ-ࠧࠩ-࡙࠭-࡛ࣔ-ࣣ࣡-ःऺ-़ा-ॏ॑-ॗॢॣঁ-ঃ়া-ৄেৈো-্ৗৢৣਁ-ਃ਼ਾ-ੂੇੈੋ-੍ੑੰੱੵઁ-ઃ઼ા-ૅે-ૉો-્ૢૣଁ-ଃ଼ା-ୄେୈୋ-୍ୖୗୢୣஂா-ூெ-ைொ-்ௗఀ-ఃా-ౄె-ైొ-్ౕౖౢౣಁ-ಃ಼ಾ-ೄೆ-ೈೊ-್ೕೖೢೣഁ-ഃാ-ൄെ-ൈൊ-്ൗൢൣංඃ්ා-ුූෘ-ෟෲෳัิ-ฺ็-๎ັິ-ູົຼ່-ໍ༹༘༙༵༷༾༿ཱ-྄྆྇ྍ-ྗྙ-ྼ࿆ါ-ှၖ-ၙၞ-ၠၢ-ၤၧ-ၭၱ-ၴႂ-ႍႏႚ-ႝ፝-፟ᜒ-᜔ᜲ-᜴ᝒᝓᝲᝳ឴-៓៝᠋-᠍ᢅᢆᢩᤠ-ᤫᤰ-᤻ᨗ-ᨛᩕ-ᩞ᩠-᩿᩼᪰-᪽ᬀ-ᬄ᬴-᭄᭫-᭳ᮀ-ᮂᮡ-ᮭ᯦-᯳ᰤ-᰷᳐-᳔᳒-᳨᳭ᳲ-᳴᳸᳹᷀-᷵᷻-᷿‿⁀⁔⃐-⃥⃜⃡-⃰℘℮⳯-⵿⳱ⷠ-〪ⷿ-゙゚〯꙯ꙴ-꙽ꚞꚟ꛰꛱ꠂ꠆ꠋꠣ-ꠧꢀꢁꢴ-ꣅ꣠-꣱ꤦ-꤭ꥇ-꥓ꦀ-ꦃ꦳-꧀ꧥꨩ-ꨶꩃꩌꩍꩻ-ꩽꪰꪲ-ꪴꪷꪸꪾ꪿꫁ꫫ-ꫯꫵ꫶ꯣ-ꯪ꯬꯭ﬞ︀-️︠-︯︳︴﹍-﹏_𐇽𐋠𐍶-𐍺𐨁-𐨃𐨅𐨆𐨌-𐨏𐨸-𐨿𐨺𐫦𐫥𑀀-𑀂𑀸-𑁆𑁿-𑂂𑂰-𑂺𑄀-𑄂𑄧-𑅳𑄴𑆀-𑆂𑆳-𑇊𑇀-𑇌𑈬-𑈷𑈾𑋟-𑋪𑌀-𑌃𑌼𑌾-𑍄𑍇𑍈𑍋-𑍍𑍗𑍢𑍣𑍦-𑍬𑍰-𑍴𑐵-𑑆𑒰-𑓃𑖯-𑖵𑖸-𑗀𑗜𑗝𑘰-𑙀𑚫-𑚷𑜝-𑜫𑰯-𑰶𑰸-𑰿𑲒-𑲧𑲩-𑲶𖫰-𖫴𖬰-𖬶𖽑-𖽾𖾏-𖾒𛲝𛲞𝅥-𝅩𝅭-𝅲𝅻-𝆂𝆅-𝆋𝆪-𝆭𝉂-𝉄𝨀-𝨶𝨻-𝩬𝩵𝪄𝪛-𝪟𝪡-𝪯𞀀-𞀆𞀈-𞀘𞀛-𞀡𞀣𞀤𞀦-𞣐𞀪-𞣖𞥄-𞥊󠄀-󠇯' +pattern = re.compile( + r"[\w·̀-ͯ·҃-֑҇-ׇֽֿׁׂׅׄؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ܑۭܰ-݊ަ-ް߫-߳ࠖ-࠙ࠛ-ࠣࠥ-ࠧࠩ-࡙࠭-࡛ࣔ-ࣣ࣡-ःऺ-़ा-ॏ॑-ॗॢॣঁ-ঃ়া-ৄেৈো-্ৗৢৣਁ-ਃ਼ਾ-ੂੇੈੋ-੍ੑੰੱੵઁ-ઃ઼ા-ૅે-ૉો-્ૢૣଁ-ଃ଼ା-ୄେୈୋ-୍ୖୗୢୣஂா-ூெ-ைொ-்ௗఀ-ఃా-ౄె-ైొ-్ౕౖౢౣಁ-ಃ಼ಾ-ೄೆ-ೈೊ-್ೕೖೢೣഁ-ഃാ-ൄെ-ൈൊ-്ൗൢൣංඃ්ා-ුූෘ-ෟෲෳัิ-ฺ็-๎ັິ-ູົຼ່-ໍ༹༘༙༵༷༾༿ཱ-྄྆྇ྍ-ྗྙ-ྼ࿆ါ-ှၖ-ၙၞ-ၠၢ-ၤၧ-ၭၱ-ၴႂ-ႍႏႚ-ႝ፝-፟ᜒ-᜔ᜲ-᜴ᝒᝓᝲᝳ឴-៓៝᠋-᠍ᢅᢆᢩᤠ-ᤫᤰ-᤻ᨗ-ᨛᩕ-ᩞ᩠-᩿᩼᪰-᪽ᬀ-ᬄ᬴-᭄᭫-᭳ᮀ-ᮂᮡ-ᮭ᯦-᯳ᰤ-᰷᳐-᳔᳒-᳨᳭ᳲ-᳴᳸᳹᷀-᷵᷻-᷿‿⁀⁔⃐-⃥⃜⃡-⃰℘℮⳯-⵿⳱ⷠ-〪ⷿ-゙゚〯꙯ꙴ-꙽ꚞꚟ꛰꛱ꠂ꠆ꠋꠣ-ꠧꢀꢁꢴ-ꣅ꣠-꣱ꤦ-꤭ꥇ-꥓ꦀ-ꦃ꦳-꧀ꧥꨩ-ꨶꩃꩌꩍꩻ-ꩽꪰꪲ-ꪴꪷꪸꪾ꪿꫁ꫫ-ꫯꫵ꫶ꯣ-ꯪ꯬꯭ﬞ︀-️︠-︯︳︴﹍-﹏_𐇽𐋠𐍶-𐍺𐨁-𐨃𐨅𐨆𐨌-𐨏𐨸-𐨿𐨺𐫦𐫥𑀀-𑀂𑀸-𑁆𑁿-𑂂𑂰-𑂺𑄀-𑄂𑄧-𑅳𑄴𑆀-𑆂𑆳-𑇊𑇀-𑇌𑈬-𑈷𑈾𑋟-𑋪𑌀-𑌃𑌼𑌾-𑍄𑍇𑍈𑍋-𑍍𑍗𑍢𑍣𑍦-𑍬𑍰-𑍴𑐵-𑑆𑒰-𑓃𑖯-𑖵𑖸-𑗀𑗜𑗝𑘰-𑙀𑚫-𑚷𑜝-𑜫𑰯-𑰶𑰸-𑰿𑲒-𑲧𑲩-𑲶𖫰-𖫴𖬰-𖬶𖽑-𖽾𖾏-𖾒𛲝𛲞𝅥-𝅩𝅭-𝅲𝅻-𝆂𝆅-𝆋𝆪-𝆭𝉂-𝉄𝨀-𝨶𝨻-𝩬𝩵𝪄𝪛-𝪟𝪡-𝪯𞀀-𞀆𞀈-𞀘𞀛-𞀡𞀣𞀤𞀦-𞣐𞀪-𞣖𞥄-𞥊󠄀-󠇯]+" # noqa: B950 +) diff --git a/pipenv/vendor/jinja2/asyncfilters.py b/pipenv/vendor/jinja2/asyncfilters.py index 5c1f46d7fa..d29f6c62d2 100644 --- a/pipenv/vendor/jinja2/asyncfilters.py +++ b/pipenv/vendor/jinja2/asyncfilters.py @@ -1,12 +1,13 @@ from functools import wraps -from jinja2.asyncsupport import auto_aiter -from jinja2 import filters +from . import filters +from .asyncsupport import auto_aiter +from .asyncsupport import auto_await async def auto_to_seq(value): seq = [] - if hasattr(value, '__aiter__'): + if hasattr(value, "__aiter__"): async for item in value: seq.append(item) else: @@ -16,8 +17,7 @@ async def auto_to_seq(value): async def async_select_or_reject(args, kwargs, modfunc, lookup_attr): - seq, func = filters.prepare_select_or_reject( - args, kwargs, modfunc, lookup_attr) + seq, func = filters.prepare_select_or_reject(args, kwargs, modfunc, lookup_attr) if seq: async for item in auto_aiter(seq): if func(item): @@ -26,14 +26,20 @@ async def async_select_or_reject(args, kwargs, modfunc, lookup_attr): def dualfilter(normal_filter, async_filter): wrap_evalctx = False - if getattr(normal_filter, 'environmentfilter', False): - is_async = lambda args: args[0].is_async + if getattr(normal_filter, "environmentfilter", False): + + def is_async(args): + return args[0].is_async + wrap_evalctx = False else: - if not getattr(normal_filter, 'evalcontextfilter', False) and \ - not getattr(normal_filter, 'contextfilter', False): + if not getattr(normal_filter, "evalcontextfilter", False) and not getattr( + normal_filter, "contextfilter", False + ): wrap_evalctx = True - is_async = lambda args: args[0].environment.is_async + + def is_async(args): + return args[0].environment.is_async @wraps(normal_filter) def wrapper(*args, **kwargs): @@ -55,6 +61,7 @@ def wrapper(*args, **kwargs): def asyncfiltervariant(original): def decorator(f): return dualfilter(original, f) + return decorator @@ -63,19 +70,22 @@ async def do_first(environment, seq): try: return await auto_aiter(seq).__anext__() except StopAsyncIteration: - return environment.undefined('No first item, sequence was empty.') + return environment.undefined("No first item, sequence was empty.") @asyncfiltervariant(filters.do_groupby) async def do_groupby(environment, value, attribute): expr = filters.make_attrgetter(environment, attribute) - return [filters._GroupTuple(key, await auto_to_seq(values)) - for key, values in filters.groupby(sorted( - await auto_to_seq(value), key=expr), expr)] + return [ + filters._GroupTuple(key, await auto_to_seq(values)) + for key, values in filters.groupby( + sorted(await auto_to_seq(value), key=expr), expr + ) + ] @asyncfiltervariant(filters.do_join) -async def do_join(eval_ctx, value, d=u'', attribute=None): +async def do_join(eval_ctx, value, d=u"", attribute=None): return filters.do_join(eval_ctx, await auto_to_seq(value), d, attribute) @@ -109,7 +119,7 @@ async def do_map(*args, **kwargs): seq, func = filters.prepare_map(args, kwargs) if seq: async for item in auto_aiter(seq): - yield func(item) + yield await auto_await(func(item)) @asyncfiltervariant(filters.do_sum) @@ -118,7 +128,10 @@ async def do_sum(environment, iterable, attribute=None, start=0): if attribute is not None: func = filters.make_attrgetter(environment, attribute) else: - func = lambda x: x + + def func(x): + return x + async for item in auto_aiter(iterable): rv += func(item) return rv @@ -130,17 +143,17 @@ async def do_slice(value, slices, fill_with=None): ASYNC_FILTERS = { - 'first': do_first, - 'groupby': do_groupby, - 'join': do_join, - 'list': do_list, + "first": do_first, + "groupby": do_groupby, + "join": do_join, + "list": do_list, # we intentionally do not support do_last because that would be # ridiculous - 'reject': do_reject, - 'rejectattr': do_rejectattr, - 'map': do_map, - 'select': do_select, - 'selectattr': do_selectattr, - 'sum': do_sum, - 'slice': do_slice, + "reject": do_reject, + "rejectattr": do_rejectattr, + "map": do_map, + "select": do_select, + "selectattr": do_selectattr, + "sum": do_sum, + "slice": do_slice, } diff --git a/pipenv/vendor/jinja2/asyncsupport.py b/pipenv/vendor/jinja2/asyncsupport.py index b1e7b5ce9a..78ba3739d8 100644 --- a/pipenv/vendor/jinja2/asyncsupport.py +++ b/pipenv/vendor/jinja2/asyncsupport.py @@ -1,29 +1,27 @@ # -*- coding: utf-8 -*- +"""The code for async support. Importing this patches Jinja on supported +Python versions. """ - jinja2.asyncsupport - ~~~~~~~~~~~~~~~~~~~ - - Has all the code for async support which is implemented as a patch - for supported Python versions. - - :copyright: (c) 2017 by the Jinja Team. - :license: BSD, see LICENSE for more details. -""" -import sys import asyncio import inspect from functools import update_wrapper -from jinja2.utils import concat, internalcode, Markup -from jinja2.environment import TemplateModule -from jinja2.runtime import LoopContextBase, _last_iteration +from markupsafe import Markup + +from .environment import TemplateModule +from .runtime import LoopContext +from .utils import concat +from .utils import internalcode +from .utils import missing async def concat_async(async_gen): rv = [] + async def collect(): async for event in async_gen: rv.append(event) + await collect() return concat(rv) @@ -34,10 +32,7 @@ async def generate_async(self, *args, **kwargs): async for event in self.root_render_func(self.new_context(vars)): yield event except Exception: - exc_info = sys.exc_info() - else: - return - yield self.environment.handle_exception(exc_info, True) + yield self.environment.handle_exception() def wrap_generate_func(original_generate): @@ -48,17 +43,18 @@ def _convert_generator(self, loop, args, kwargs): yield loop.run_until_complete(async_gen.__anext__()) except StopAsyncIteration: pass + def generate(self, *args, **kwargs): if not self.environment.is_async: return original_generate(self, *args, **kwargs) return _convert_generator(self, asyncio.get_event_loop(), args, kwargs) + return update_wrapper(generate, original_generate) async def render_async(self, *args, **kwargs): if not self.environment.is_async: - raise RuntimeError('The environment was not created with async mode ' - 'enabled.') + raise RuntimeError("The environment was not created with async mode enabled.") vars = dict(*args, **kwargs) ctx = self.new_context(vars) @@ -66,8 +62,7 @@ async def render_async(self, *args, **kwargs): try: return await concat_async(self.root_render_func(ctx)) except Exception: - exc_info = sys.exc_info() - return self.environment.handle_exception(exc_info, True) + return self.environment.handle_exception() def wrap_render_func(original_render): @@ -76,6 +71,7 @@ def render(self, *args, **kwargs): return original_render(self, *args, **kwargs) loop = asyncio.get_event_loop() return loop.run_until_complete(self.render_async(*args, **kwargs)) + return update_wrapper(render, original_render) @@ -109,6 +105,7 @@ def _invoke(self, arguments, autoescape): if not self._environment.is_async: return original_invoke(self, arguments, autoescape) return async_invoke(self, arguments, autoescape) + return update_wrapper(_invoke, original_invoke) @@ -124,9 +121,9 @@ def wrap_default_module(original_default_module): @internalcode def _get_default_module(self): if self.environment.is_async: - raise RuntimeError('Template module attribute is unavailable ' - 'in async mode') + raise RuntimeError("Template module attribute is unavailable in async mode") return original_default_module(self) + return _get_default_module @@ -139,30 +136,30 @@ async def make_module_async(self, vars=None, shared=False, locals=None): def patch_template(): - from jinja2 import Template + from . import Template + Template.generate = wrap_generate_func(Template.generate) - Template.generate_async = update_wrapper( - generate_async, Template.generate_async) - Template.render_async = update_wrapper( - render_async, Template.render_async) + Template.generate_async = update_wrapper(generate_async, Template.generate_async) + Template.render_async = update_wrapper(render_async, Template.render_async) Template.render = wrap_render_func(Template.render) - Template._get_default_module = wrap_default_module( - Template._get_default_module) + Template._get_default_module = wrap_default_module(Template._get_default_module) Template._get_default_module_async = get_default_module_async Template.make_module_async = update_wrapper( - make_module_async, Template.make_module_async) + make_module_async, Template.make_module_async + ) def patch_runtime(): - from jinja2.runtime import BlockReference, Macro - BlockReference.__call__ = wrap_block_reference_call( - BlockReference.__call__) + from .runtime import BlockReference, Macro + + BlockReference.__call__ = wrap_block_reference_call(BlockReference.__call__) Macro._invoke = wrap_macro_invoke(Macro._invoke) def patch_filters(): - from jinja2.filters import FILTERS - from jinja2.asyncfilters import ASYNC_FILTERS + from .filters import FILTERS + from .asyncfilters import ASYNC_FILTERS + FILTERS.update(ASYNC_FILTERS) @@ -179,7 +176,7 @@ async def auto_await(value): async def auto_aiter(iterable): - if hasattr(iterable, '__aiter__'): + if hasattr(iterable, "__aiter__"): async for item in iterable: yield item return @@ -187,70 +184,81 @@ async def auto_aiter(iterable): yield item -class AsyncLoopContext(LoopContextBase): - - def __init__(self, async_iterator, undefined, after, length, recurse=None, - depth0=0): - LoopContextBase.__init__(self, undefined, recurse, depth0) - self._async_iterator = async_iterator - self._after = after - self._length = length +class AsyncLoopContext(LoopContext): + _to_iterator = staticmethod(auto_aiter) @property - def length(self): - if self._length is None: - raise TypeError('Loop length for some iterators cannot be ' - 'lazily calculated in async mode') + async def length(self): + if self._length is not None: + return self._length + + try: + self._length = len(self._iterable) + except TypeError: + iterable = [x async for x in self._iterator] + self._iterator = self._to_iterator(iterable) + self._length = len(iterable) + self.index + (self._after is not missing) + return self._length - def __aiter__(self): - return AsyncLoopContextIterator(self) + @property + async def revindex0(self): + return await self.length - self.index + @property + async def revindex(self): + return await self.length - self.index0 + + async def _peek_next(self): + if self._after is not missing: + return self._after + + try: + self._after = await self._iterator.__anext__() + except StopAsyncIteration: + self._after = missing -class AsyncLoopContextIterator(object): - __slots__ = ('context',) + return self._after - def __init__(self, context): - self.context = context + @property + async def last(self): + return await self._peek_next() is missing + + @property + async def nextitem(self): + rv = await self._peek_next() + + if rv is missing: + return self._undefined("there is no next item") + + return rv def __aiter__(self): return self async def __anext__(self): - ctx = self.context - ctx.index0 += 1 - if ctx._after is _last_iteration: - raise StopAsyncIteration() - ctx._before = ctx._current - ctx._current = ctx._after - try: - ctx._after = await ctx._async_iterator.__anext__() - except StopAsyncIteration: - ctx._after = _last_iteration - return ctx._current, ctx + if self._after is not missing: + rv = self._after + self._after = missing + else: + rv = await self._iterator.__anext__() + + self.index0 += 1 + self._before = self._current + self._current = rv + return rv, self async def make_async_loop_context(iterable, undefined, recurse=None, depth0=0): - # Length is more complicated and less efficient in async mode. The - # reason for this is that we cannot know if length will be used - # upfront but because length is a property we cannot lazily execute it - # later. This means that we need to buffer it up and measure :( - # - # We however only do this for actual iterators, not for async - # iterators as blocking here does not seem like the best idea in the - # world. - try: - length = len(iterable) - except (TypeError, AttributeError): - if not hasattr(iterable, '__aiter__'): - iterable = tuple(iterable) - length = len(iterable) - else: - length = None - async_iterator = auto_aiter(iterable) - try: - after = await async_iterator.__anext__() - except StopAsyncIteration: - after = _last_iteration - return AsyncLoopContext(async_iterator, undefined, after, length, recurse, - depth0) + import warnings + + warnings.warn( + "This template must be recompiled with at least Jinja 2.11, or" + " it will fail in 3.0.", + DeprecationWarning, + stacklevel=2, + ) + return AsyncLoopContext(iterable, undefined, recurse, depth0) + + +patch_all() diff --git a/pipenv/vendor/jinja2/bccache.py b/pipenv/vendor/jinja2/bccache.py index 507a9b3dee..9c0661030f 100644 --- a/pipenv/vendor/jinja2/bccache.py +++ b/pipenv/vendor/jinja2/bccache.py @@ -1,60 +1,37 @@ # -*- coding: utf-8 -*- -""" - jinja2.bccache - ~~~~~~~~~~~~~~ - - This module implements the bytecode cache system Jinja is optionally - using. This is useful if you have very complex template situations and - the compiliation of all those templates slow down your application too - much. - - Situations where this is useful are often forking web applications that - are initialized on the first request. +"""The optional bytecode cache system. This is useful if you have very +complex template situations and the compilation of all those templates +slows down your application too much. - :copyright: (c) 2017 by the Jinja Team. - :license: BSD. +Situations where this is useful are often forking web applications that +are initialized on the first request. """ -from os import path, listdir +import errno +import fnmatch import os -import sys import stat -import errno -import marshal +import sys import tempfile -import fnmatch from hashlib import sha1 -from jinja2.utils import open_if_exists -from jinja2._compat import BytesIO, pickle, PY2, text_type - - -# marshal works better on 3.x, one hack less required -if not PY2: - marshal_dump = marshal.dump - marshal_load = marshal.load -else: - - def marshal_dump(code, f): - if isinstance(f, file): - marshal.dump(code, f) - else: - f.write(marshal.dumps(code)) - - def marshal_load(f): - if isinstance(f, file): - return marshal.load(f) - return marshal.loads(f.read()) - - -bc_version = 3 - -# magic version used to only change with new jinja versions. With 2.6 -# we change this to also take Python version changes into account. The -# reason for this is that Python tends to segfault if fed earlier bytecode -# versions because someone thought it would be a good idea to reuse opcodes -# or make Python incompatible with earlier versions. -bc_magic = 'j2'.encode('ascii') + \ - pickle.dumps(bc_version, 2) + \ - pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1]) +from os import listdir +from os import path + +from ._compat import BytesIO +from ._compat import marshal_dump +from ._compat import marshal_load +from ._compat import pickle +from ._compat import text_type +from .utils import open_if_exists + +bc_version = 4 +# Magic bytes to identify Jinja bytecode cache files. Contains the +# Python major and minor version to avoid loading incompatible bytecode +# if a project upgrades its Python version. +bc_magic = ( + b"j2" + + pickle.dumps(bc_version, 2) + + pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1], 2) +) class Bucket(object): @@ -98,7 +75,7 @@ def load_bytecode(self, f): def write_bytecode(self, f): """Dump the bytecode into the file or file like object passed.""" if self.code is None: - raise TypeError('can\'t write empty bucket') + raise TypeError("can't write empty bucket") f.write(bc_magic) pickle.dump(self.checksum, f, 2) marshal_dump(self.code, f) @@ -140,7 +117,7 @@ def dump_bytecode(self, bucket): bucket.write_bytecode(f) A more advanced version of a filesystem based bytecode cache is part of - Jinja2. + Jinja. """ def load_bytecode(self, bucket): @@ -158,24 +135,24 @@ def dump_bytecode(self, bucket): raise NotImplementedError() def clear(self): - """Clears the cache. This method is not used by Jinja2 but should be + """Clears the cache. This method is not used by Jinja but should be implemented to allow applications to clear the bytecode cache used by a particular environment. """ def get_cache_key(self, name, filename=None): """Returns the unique hash key for this template name.""" - hash = sha1(name.encode('utf-8')) + hash = sha1(name.encode("utf-8")) if filename is not None: - filename = '|' + filename + filename = "|" + filename if isinstance(filename, text_type): - filename = filename.encode('utf-8') + filename = filename.encode("utf-8") hash.update(filename) return hash.hexdigest() def get_source_checksum(self, source): """Returns a checksum for the source.""" - return sha1(source.encode('utf-8')).hexdigest() + return sha1(source.encode("utf-8")).hexdigest() def get_bucket(self, environment, name, filename, source): """Return a cache bucket for the given template. All arguments are @@ -210,7 +187,7 @@ class FileSystemBytecodeCache(BytecodeCache): This bytecode cache supports clearing of the cache using the clear method. """ - def __init__(self, directory=None, pattern='__jinja2_%s.cache'): + def __init__(self, directory=None, pattern="__jinja2_%s.cache"): if directory is None: directory = self._get_default_cache_dir() self.directory = directory @@ -218,19 +195,21 @@ def __init__(self, directory=None, pattern='__jinja2_%s.cache'): def _get_default_cache_dir(self): def _unsafe_dir(): - raise RuntimeError('Cannot determine safe temp directory. You ' - 'need to explicitly provide one.') + raise RuntimeError( + "Cannot determine safe temp directory. You " + "need to explicitly provide one." + ) tmpdir = tempfile.gettempdir() # On windows the temporary directory is used specific unless # explicitly forced otherwise. We can just use that. - if os.name == 'nt': + if os.name == "nt": return tmpdir - if not hasattr(os, 'getuid'): + if not hasattr(os, "getuid"): _unsafe_dir() - dirname = '_jinja2-cache-%d' % os.getuid() + dirname = "_jinja2-cache-%d" % os.getuid() actual_dir = os.path.join(tmpdir, dirname) try: @@ -241,18 +220,22 @@ def _unsafe_dir(): try: os.chmod(actual_dir, stat.S_IRWXU) actual_dir_stat = os.lstat(actual_dir) - if actual_dir_stat.st_uid != os.getuid() \ - or not stat.S_ISDIR(actual_dir_stat.st_mode) \ - or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU: + if ( + actual_dir_stat.st_uid != os.getuid() + or not stat.S_ISDIR(actual_dir_stat.st_mode) + or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU + ): _unsafe_dir() except OSError as e: if e.errno != errno.EEXIST: raise actual_dir_stat = os.lstat(actual_dir) - if actual_dir_stat.st_uid != os.getuid() \ - or not stat.S_ISDIR(actual_dir_stat.st_mode) \ - or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU: + if ( + actual_dir_stat.st_uid != os.getuid() + or not stat.S_ISDIR(actual_dir_stat.st_mode) + or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU + ): _unsafe_dir() return actual_dir @@ -261,7 +244,7 @@ def _get_cache_filename(self, bucket): return path.join(self.directory, self.pattern % bucket.key) def load_bytecode(self, bucket): - f = open_if_exists(self._get_cache_filename(bucket), 'rb') + f = open_if_exists(self._get_cache_filename(bucket), "rb") if f is not None: try: bucket.load_bytecode(f) @@ -269,7 +252,7 @@ def load_bytecode(self, bucket): f.close() def dump_bytecode(self, bucket): - f = open(self._get_cache_filename(bucket), 'wb') + f = open(self._get_cache_filename(bucket), "wb") try: bucket.write_bytecode(f) finally: @@ -280,7 +263,8 @@ def clear(self): # write access on the file system and the function does not exist # normally. from os import remove - files = fnmatch.filter(listdir(self.directory), self.pattern % '*') + + files = fnmatch.filter(listdir(self.directory), self.pattern % "*") for filename in files: try: remove(path.join(self.directory, filename)) @@ -333,8 +317,13 @@ class MemcachedBytecodeCache(BytecodeCache): `ignore_memcache_errors` parameter. """ - def __init__(self, client, prefix='jinja2/bytecode/', timeout=None, - ignore_memcache_errors=True): + def __init__( + self, + client, + prefix="jinja2/bytecode/", + timeout=None, + ignore_memcache_errors=True, + ): self.client = client self.prefix = prefix self.timeout = timeout diff --git a/pipenv/vendor/jinja2/compiler.py b/pipenv/vendor/jinja2/compiler.py index d534a82739..f450ec6e31 100644 --- a/pipenv/vendor/jinja2/compiler.py +++ b/pipenv/vendor/jinja2/compiler.py @@ -1,59 +1,62 @@ # -*- coding: utf-8 -*- -""" - jinja2.compiler - ~~~~~~~~~~~~~~~ - - Compiles nodes into python code. - - :copyright: (c) 2017 by the Jinja Team. - :license: BSD, see LICENSE for more details. -""" +"""Compiles nodes from the parser into Python code.""" +from collections import namedtuple +from functools import update_wrapper from itertools import chain -from copy import deepcopy from keyword import iskeyword as is_python_keyword -from functools import update_wrapper -from jinja2 import nodes -from jinja2.nodes import EvalContext -from jinja2.visitor import NodeVisitor -from jinja2.optimizer import Optimizer -from jinja2.exceptions import TemplateAssertionError -from jinja2.utils import Markup, concat, escape -from jinja2._compat import range_type, text_type, string_types, \ - iteritems, NativeStringIO, imap, izip -from jinja2.idtracking import Symbols, VAR_LOAD_PARAMETER, \ - VAR_LOAD_RESOLVE, VAR_LOAD_ALIAS, VAR_LOAD_UNDEFINED +from markupsafe import escape +from markupsafe import Markup + +from . import nodes +from ._compat import imap +from ._compat import iteritems +from ._compat import izip +from ._compat import NativeStringIO +from ._compat import range_type +from ._compat import string_types +from ._compat import text_type +from .exceptions import TemplateAssertionError +from .idtracking import Symbols +from .idtracking import VAR_LOAD_ALIAS +from .idtracking import VAR_LOAD_PARAMETER +from .idtracking import VAR_LOAD_RESOLVE +from .idtracking import VAR_LOAD_UNDEFINED +from .nodes import EvalContext +from .optimizer import Optimizer +from .utils import concat +from .visitor import NodeVisitor operators = { - 'eq': '==', - 'ne': '!=', - 'gt': '>', - 'gteq': '>=', - 'lt': '<', - 'lteq': '<=', - 'in': 'in', - 'notin': 'not in' + "eq": "==", + "ne": "!=", + "gt": ">", + "gteq": ">=", + "lt": "<", + "lteq": "<=", + "in": "in", + "notin": "not in", } # what method to iterate over items do we want to use for dict iteration # in generated code? on 2.x let's go with iteritems, on 3.x with items -if hasattr(dict, 'iteritems'): - dict_item_iter = 'iteritems' +if hasattr(dict, "iteritems"): + dict_item_iter = "iteritems" else: - dict_item_iter = 'items' + dict_item_iter = "items" -code_features = ['division'] +code_features = ["division"] # does this python version support generator stops? (PEP 0479) try: - exec('from __future__ import generator_stop') - code_features.append('generator_stop') + exec("from __future__ import generator_stop") + code_features.append("generator_stop") except SyntaxError: pass # does this python version support yield from? try: - exec('def f(): yield from x()') + exec("def f(): yield from x()") except SyntaxError: supports_yield_from = False else: @@ -68,17 +71,19 @@ def new_func(self, node, frame, **kwargs): if new_node != node: return self.visit(new_node, frame) return f(self, node, frame, **kwargs) + return update_wrapper(new_func, f) -def generate(node, environment, name, filename, stream=None, - defer_init=False, optimized=True): +def generate( + node, environment, name, filename, stream=None, defer_init=False, optimized=True +): """Generate the python source for a node tree.""" if not isinstance(node, nodes.Template): - raise TypeError('Can\'t compile non template nodes') - generator = environment.code_generator_class(environment, name, filename, - stream, defer_init, - optimized) + raise TypeError("Can't compile non template nodes") + generator = environment.code_generator_class( + environment, name, filename, stream, defer_init, optimized + ) generator.visit(node) if stream is None: return generator.stream.getvalue() @@ -119,7 +124,6 @@ def find_undeclared(nodes, names): class MacroRef(object): - def __init__(self, node): self.node = node self.accesses_caller = False @@ -132,8 +136,7 @@ class Frame(object): def __init__(self, eval_ctx, parent=None, level=None): self.eval_ctx = eval_ctx - self.symbols = Symbols(parent and parent.symbols or None, - level=level) + self.symbols = Symbols(parent and parent.symbols or None, level=level) # a toplevel frame is the root + soft frames such as if conditions. self.toplevel = False @@ -223,7 +226,7 @@ def __init__(self, names): self.undeclared = set() def visit_Name(self, node): - if node.ctx == 'load' and node.name in self.names: + if node.ctx == "load" and node.name in self.names: self.undeclared.add(node.name) if self.undeclared == self.names: raise VisitorExit() @@ -242,9 +245,9 @@ class CompilerExit(Exception): class CodeGenerator(NodeVisitor): - - def __init__(self, environment, name, filename, stream=None, - defer_init=False, optimized=True): + def __init__( + self, environment, name, filename, stream=None, defer_init=False, optimized=True + ): if stream is None: stream = NativeStringIO() self.environment = environment @@ -306,7 +309,7 @@ def __init__(self, environment, name, filename, stream=None, self._param_def_block = [] # Tracks the current context. - self._context_reference_stack = ['context'] + self._context_reference_stack = ["context"] # -- Various compilation helpers @@ -317,30 +320,30 @@ def fail(self, msg, lineno): def temporary_identifier(self): """Get a new unique identifier.""" self._last_identifier += 1 - return 't_%d' % self._last_identifier + return "t_%d" % self._last_identifier def buffer(self, frame): """Enable buffering for the frame from that point onwards.""" frame.buffer = self.temporary_identifier() - self.writeline('%s = []' % frame.buffer) + self.writeline("%s = []" % frame.buffer) def return_buffer_contents(self, frame, force_unescaped=False): """Return the buffer contents of the frame.""" if not force_unescaped: if frame.eval_ctx.volatile: - self.writeline('if context.eval_ctx.autoescape:') + self.writeline("if context.eval_ctx.autoescape:") self.indent() - self.writeline('return Markup(concat(%s))' % frame.buffer) + self.writeline("return Markup(concat(%s))" % frame.buffer) self.outdent() - self.writeline('else:') + self.writeline("else:") self.indent() - self.writeline('return concat(%s)' % frame.buffer) + self.writeline("return concat(%s)" % frame.buffer) self.outdent() return elif frame.eval_ctx.autoescape: - self.writeline('return Markup(concat(%s))' % frame.buffer) + self.writeline("return Markup(concat(%s))" % frame.buffer) return - self.writeline('return concat(%s)' % frame.buffer) + self.writeline("return concat(%s)" % frame.buffer) def indent(self): """Indent by one.""" @@ -353,14 +356,14 @@ def outdent(self, step=1): def start_write(self, frame, node=None): """Yield or write into the frame buffer.""" if frame.buffer is None: - self.writeline('yield ', node) + self.writeline("yield ", node) else: - self.writeline('%s.append(' % frame.buffer, node) + self.writeline("%s.append(" % frame.buffer, node) def end_write(self, frame): """End the writing process started by `start_write`.""" if frame.buffer is not None: - self.write(')') + self.write(")") def simple_write(self, s, frame, node=None): """Simple shortcut for start_write + write + end_write.""" @@ -373,7 +376,7 @@ def blockvisit(self, nodes, frame): is no buffer a dummy ``if 0: yield None`` is written automatically. """ try: - self.writeline('pass') + self.writeline("pass") for node in nodes: self.visit(node, frame) except CompilerExit: @@ -383,14 +386,13 @@ def write(self, x): """Write a string into the output stream.""" if self._new_lines: if not self._first_write: - self.stream.write('\n' * self._new_lines) + self.stream.write("\n" * self._new_lines) self.code_lineno += self._new_lines if self._write_debug_info is not None: - self.debug_info.append((self._write_debug_info, - self.code_lineno)) + self.debug_info.append((self._write_debug_info, self.code_lineno)) self._write_debug_info = None self._first_write = False - self.stream.write(' ' * self._indentation) + self.stream.write(" " * self._indentation) self._new_lines = 0 self.stream.write(x) @@ -410,7 +412,7 @@ def signature(self, node, frame, extra_kwargs=None): """Writes a function call to the stream for the current node. A leading comma is added automatically. The extra keyword arguments may not include python keywords otherwise a syntax - error could occour. The extra keyword arguments should be given + error could occur. The extra keyword arguments should be given as python dict. """ # if any of the given keyword arguments is a python keyword @@ -422,41 +424,41 @@ def signature(self, node, frame, extra_kwargs=None): break for arg in node.args: - self.write(', ') + self.write(", ") self.visit(arg, frame) if not kwarg_workaround: for kwarg in node.kwargs: - self.write(', ') + self.write(", ") self.visit(kwarg, frame) if extra_kwargs is not None: for key, value in iteritems(extra_kwargs): - self.write(', %s=%s' % (key, value)) + self.write(", %s=%s" % (key, value)) if node.dyn_args: - self.write(', *') + self.write(", *") self.visit(node.dyn_args, frame) if kwarg_workaround: if node.dyn_kwargs is not None: - self.write(', **dict({') + self.write(", **dict({") else: - self.write(', **{') + self.write(", **{") for kwarg in node.kwargs: - self.write('%r: ' % kwarg.key) + self.write("%r: " % kwarg.key) self.visit(kwarg.value, frame) - self.write(', ') + self.write(", ") if extra_kwargs is not None: for key, value in iteritems(extra_kwargs): - self.write('%r: %s, ' % (key, value)) + self.write("%r: %s, " % (key, value)) if node.dyn_kwargs is not None: - self.write('}, **') + self.write("}, **") self.visit(node.dyn_kwargs, frame) - self.write(')') + self.write(")") else: - self.write('}') + self.write("}") elif node.dyn_kwargs is not None: - self.write(', **') + self.write(", **") self.visit(node.dyn_kwargs, frame) def pull_dependencies(self, nodes): @@ -464,13 +466,14 @@ def pull_dependencies(self, nodes): visitor = DependencyFinderVisitor() for node in nodes: visitor.visit(node) - for dependency in 'filters', 'tests': + for dependency in "filters", "tests": mapping = getattr(self, dependency) for name in getattr(visitor, dependency): if name not in mapping: mapping[name] = self.temporary_identifier() - self.writeline('%s = environment.%s[%r]' % - (mapping[name], dependency, name)) + self.writeline( + "%s = environment.%s[%r]" % (mapping[name], dependency, name) + ) def enter_frame(self, frame): undefs = [] @@ -478,16 +481,15 @@ def enter_frame(self, frame): if action == VAR_LOAD_PARAMETER: pass elif action == VAR_LOAD_RESOLVE: - self.writeline('%s = %s(%r)' % - (target, self.get_resolve_func(), param)) + self.writeline("%s = %s(%r)" % (target, self.get_resolve_func(), param)) elif action == VAR_LOAD_ALIAS: - self.writeline('%s = %s' % (target, param)) + self.writeline("%s = %s" % (target, param)) elif action == VAR_LOAD_UNDEFINED: undefs.append(target) else: - raise NotImplementedError('unknown load instruction') + raise NotImplementedError("unknown load instruction") if undefs: - self.writeline('%s = missing' % ' = '.join(undefs)) + self.writeline("%s = missing" % " = ".join(undefs)) def leave_frame(self, frame, with_python_scope=False): if not with_python_scope: @@ -495,12 +497,12 @@ def leave_frame(self, frame, with_python_scope=False): for target, _ in iteritems(frame.symbols.loads): undefs.append(target) if undefs: - self.writeline('%s = missing' % ' = '.join(undefs)) + self.writeline("%s = missing" % " = ".join(undefs)) def func(self, name): if self.environment.is_async: - return 'async def %s' % name - return 'def %s' % name + return "async def %s" % name + return "def %s" % name def macro_body(self, node, frame): """Dump the function def of a macro or call block.""" @@ -512,16 +514,16 @@ def macro_body(self, node, frame): skip_special_params = set() args = [] for idx, arg in enumerate(node.args): - if arg.name == 'caller': + if arg.name == "caller": explicit_caller = idx - if arg.name in ('kwargs', 'varargs'): + if arg.name in ("kwargs", "varargs"): skip_special_params.add(arg.name) args.append(frame.symbols.ref(arg.name)) - undeclared = find_undeclared(node.body, ('caller', 'kwargs', 'varargs')) + undeclared = find_undeclared(node.body, ("caller", "kwargs", "varargs")) - if 'caller' in undeclared: - # In older Jinja2 versions there was a bug that allowed caller + if "caller" in undeclared: + # In older Jinja versions there was a bug that allowed caller # to retain the special behavior even if it was mentioned in # the argument list. However thankfully this was only really # working if it was the last argument. So we are explicitly @@ -531,23 +533,26 @@ def macro_body(self, node, frame): try: node.defaults[explicit_caller - len(node.args)] except IndexError: - self.fail('When defining macros or call blocks the ' - 'special "caller" argument must be omitted ' - 'or be given a default.', node.lineno) + self.fail( + "When defining macros or call blocks the " + 'special "caller" argument must be omitted ' + "or be given a default.", + node.lineno, + ) else: - args.append(frame.symbols.declare_parameter('caller')) + args.append(frame.symbols.declare_parameter("caller")) macro_ref.accesses_caller = True - if 'kwargs' in undeclared and not 'kwargs' in skip_special_params: - args.append(frame.symbols.declare_parameter('kwargs')) + if "kwargs" in undeclared and "kwargs" not in skip_special_params: + args.append(frame.symbols.declare_parameter("kwargs")) macro_ref.accesses_kwargs = True - if 'varargs' in undeclared and not 'varargs' in skip_special_params: - args.append(frame.symbols.declare_parameter('varargs')) + if "varargs" in undeclared and "varargs" not in skip_special_params: + args.append(frame.symbols.declare_parameter("varargs")) macro_ref.accesses_varargs = True # macros are delayed, they never require output checks frame.require_output_check = False frame.symbols.analyze_node(node) - self.writeline('%s(%s):' % (self.func('macro'), ', '.join(args)), node) + self.writeline("%s(%s):" % (self.func("macro"), ", ".join(args)), node) self.indent() self.buffer(frame) @@ -556,17 +561,17 @@ def macro_body(self, node, frame): self.push_parameter_definitions(frame) for idx, arg in enumerate(node.args): ref = frame.symbols.ref(arg.name) - self.writeline('if %s is missing:' % ref) + self.writeline("if %s is missing:" % ref) self.indent() try: default = node.defaults[idx - len(node.args)] except IndexError: - self.writeline('%s = undefined(%r, name=%r)' % ( - ref, - 'parameter %r was not provided' % arg.name, - arg.name)) + self.writeline( + "%s = undefined(%r, name=%r)" + % (ref, "parameter %r was not provided" % arg.name, arg.name) + ) else: - self.writeline('%s = ' % ref) + self.writeline("%s = " % ref) self.visit(default, frame) self.mark_parameter_stored(ref) self.outdent() @@ -581,35 +586,46 @@ def macro_body(self, node, frame): def macro_def(self, macro_ref, frame): """Dump the macro definition for the def created by macro_body.""" - arg_tuple = ', '.join(repr(x.name) for x in macro_ref.node.args) - name = getattr(macro_ref.node, 'name', None) + arg_tuple = ", ".join(repr(x.name) for x in macro_ref.node.args) + name = getattr(macro_ref.node, "name", None) if len(macro_ref.node.args) == 1: - arg_tuple += ',' - self.write('Macro(environment, macro, %r, (%s), %r, %r, %r, ' - 'context.eval_ctx.autoescape)' % - (name, arg_tuple, macro_ref.accesses_kwargs, - macro_ref.accesses_varargs, macro_ref.accesses_caller)) + arg_tuple += "," + self.write( + "Macro(environment, macro, %r, (%s), %r, %r, %r, " + "context.eval_ctx.autoescape)" + % ( + name, + arg_tuple, + macro_ref.accesses_kwargs, + macro_ref.accesses_varargs, + macro_ref.accesses_caller, + ) + ) def position(self, node): """Return a human readable position for the node.""" - rv = 'line %d' % node.lineno + rv = "line %d" % node.lineno if self.name is not None: - rv += ' in ' + repr(self.name) + rv += " in " + repr(self.name) return rv def dump_local_context(self, frame): - return '{%s}' % ', '.join( - '%r: %s' % (name, target) for name, target - in iteritems(frame.symbols.dump_stores())) + return "{%s}" % ", ".join( + "%r: %s" % (name, target) + for name, target in iteritems(frame.symbols.dump_stores()) + ) def write_commons(self): """Writes a common preamble that is used by root and block functions. Primarily this sets up common local helpers and enforces a generator through a dead branch. """ - self.writeline('resolve = context.resolve_or_missing') - self.writeline('undefined = environment.undefined') - self.writeline('if 0: yield None') + self.writeline("resolve = context.resolve_or_missing") + self.writeline("undefined = environment.undefined") + # always use the standard Undefined class for the implicit else of + # conditional expressions + self.writeline("cond_expr_undefined = Undefined") + self.writeline("if 0: yield None") def push_parameter_definitions(self, frame): """Pushes all parameter targets from the given frame into a local @@ -642,12 +658,12 @@ def get_context_ref(self): def get_resolve_func(self): target = self._context_reference_stack[-1] - if target == 'context': - return 'resolve' - return '%s.resolve' % target + if target == "context": + return "resolve" + return "%s.resolve" % target def derive_context(self, frame): - return '%s.derived(%s)' % ( + return "%s.derived(%s)" % ( self.get_context_ref(), self.dump_local_context(frame), ) @@ -669,44 +685,48 @@ def pop_assign_tracking(self, frame): vars = self._assign_stack.pop() if not frame.toplevel or not vars: return - public_names = [x for x in vars if x[:1] != '_'] + public_names = [x for x in vars if x[:1] != "_"] if len(vars) == 1: name = next(iter(vars)) ref = frame.symbols.ref(name) - self.writeline('context.vars[%r] = %s' % (name, ref)) + self.writeline("context.vars[%r] = %s" % (name, ref)) else: - self.writeline('context.vars.update({') + self.writeline("context.vars.update({") for idx, name in enumerate(vars): if idx: - self.write(', ') + self.write(", ") ref = frame.symbols.ref(name) - self.write('%r: %s' % (name, ref)) - self.write('})') + self.write("%r: %s" % (name, ref)) + self.write("})") if public_names: if len(public_names) == 1: - self.writeline('context.exported_vars.add(%r)' % - public_names[0]) + self.writeline("context.exported_vars.add(%r)" % public_names[0]) else: - self.writeline('context.exported_vars.update((%s))' % - ', '.join(imap(repr, public_names))) + self.writeline( + "context.exported_vars.update((%s))" + % ", ".join(imap(repr, public_names)) + ) # -- Statement Visitors def visit_Template(self, node, frame=None): - assert frame is None, 'no root frame allowed' + assert frame is None, "no root frame allowed" eval_ctx = EvalContext(self.environment, self.name) - from jinja2.runtime import __all__ as exported - self.writeline('from __future__ import %s' % ', '.join(code_features)) - self.writeline('from jinja2.runtime import ' + ', '.join(exported)) + from .runtime import exported + + self.writeline("from __future__ import %s" % ", ".join(code_features)) + self.writeline("from jinja2.runtime import " + ", ".join(exported)) if self.environment.is_async: - self.writeline('from jinja2.asyncsupport import auto_await, ' - 'auto_aiter, make_async_loop_context') + self.writeline( + "from jinja2.asyncsupport import auto_await, " + "auto_aiter, AsyncLoopContext" + ) # if we want a deferred initialization we cannot move the # environment into a local name - envenv = not self.defer_init and ', environment=environment' or '' + envenv = not self.defer_init and ", environment=environment" or "" # do we have an extends tag at all? If not, we can save some # overhead by just not processing any inheritance code. @@ -715,7 +735,7 @@ def visit_Template(self, node, frame=None): # find all blocks for block in node.find_all(nodes.Block): if block.name in self.blocks: - self.fail('block %r defined twice' % block.name, block.lineno) + self.fail("block %r defined twice" % block.name, block.lineno) self.blocks[block.name] = block # find all imports and import them @@ -723,32 +743,32 @@ def visit_Template(self, node, frame=None): if import_.importname not in self.import_aliases: imp = import_.importname self.import_aliases[imp] = alias = self.temporary_identifier() - if '.' in imp: - module, obj = imp.rsplit('.', 1) - self.writeline('from %s import %s as %s' % - (module, obj, alias)) + if "." in imp: + module, obj = imp.rsplit(".", 1) + self.writeline("from %s import %s as %s" % (module, obj, alias)) else: - self.writeline('import %s as %s' % (imp, alias)) + self.writeline("import %s as %s" % (imp, alias)) # add the load name - self.writeline('name = %r' % self.name) + self.writeline("name = %r" % self.name) # generate the root render function. - self.writeline('%s(context, missing=missing%s):' % - (self.func('root'), envenv), extra=1) + self.writeline( + "%s(context, missing=missing%s):" % (self.func("root"), envenv), extra=1 + ) self.indent() self.write_commons() # process the root frame = Frame(eval_ctx) - if 'self' in find_undeclared(node.body, ('self',)): - ref = frame.symbols.declare_parameter('self') - self.writeline('%s = TemplateReference(context)' % ref) + if "self" in find_undeclared(node.body, ("self",)): + ref = frame.symbols.declare_parameter("self") + self.writeline("%s = TemplateReference(context)" % ref) frame.symbols.analyze_node(node) frame.toplevel = frame.rootlevel = True frame.require_output_check = have_extends and not self.has_known_extends if have_extends: - self.writeline('parent_template = None') + self.writeline("parent_template = None") self.enter_frame(frame) self.pull_dependencies(node.body) self.blockvisit(node.body, frame) @@ -759,39 +779,42 @@ def visit_Template(self, node, frame=None): if have_extends: if not self.has_known_extends: self.indent() - self.writeline('if parent_template is not None:') + self.writeline("if parent_template is not None:") self.indent() if supports_yield_from and not self.environment.is_async: - self.writeline('yield from parent_template.' - 'root_render_func(context)') + self.writeline("yield from parent_template.root_render_func(context)") else: - self.writeline('%sfor event in parent_template.' - 'root_render_func(context):' % - (self.environment.is_async and 'async ' or '')) + self.writeline( + "%sfor event in parent_template." + "root_render_func(context):" + % (self.environment.is_async and "async " or "") + ) self.indent() - self.writeline('yield event') + self.writeline("yield event") self.outdent() self.outdent(1 + (not self.has_known_extends)) # at this point we now have the blocks collected and can visit them too. for name, block in iteritems(self.blocks): - self.writeline('%s(context, missing=missing%s):' % - (self.func('block_' + name), envenv), - block, 1) + self.writeline( + "%s(context, missing=missing%s):" + % (self.func("block_" + name), envenv), + block, + 1, + ) self.indent() self.write_commons() # It's important that we do not make this frame a child of the # toplevel template. This would cause a variety of # interesting issues with identifier tracking. block_frame = Frame(eval_ctx) - undeclared = find_undeclared(block.body, ('self', 'super')) - if 'self' in undeclared: - ref = block_frame.symbols.declare_parameter('self') - self.writeline('%s = TemplateReference(context)' % ref) - if 'super' in undeclared: - ref = block_frame.symbols.declare_parameter('super') - self.writeline('%s = context.super(%r, ' - 'block_%s)' % (ref, name, name)) + undeclared = find_undeclared(block.body, ("self", "super")) + if "self" in undeclared: + ref = block_frame.symbols.declare_parameter("self") + self.writeline("%s = TemplateReference(context)" % ref) + if "super" in undeclared: + ref = block_frame.symbols.declare_parameter("super") + self.writeline("%s = context.super(%r, block_%s)" % (ref, name, name)) block_frame.symbols.analyze_node(block) block_frame.block = name self.enter_frame(block_frame) @@ -800,13 +823,15 @@ def visit_Template(self, node, frame=None): self.leave_frame(block_frame, with_python_scope=True) self.outdent() - self.writeline('blocks = {%s}' % ', '.join('%r: block_%s' % (x, x) - for x in self.blocks), - extra=1) + self.writeline( + "blocks = {%s}" % ", ".join("%r: block_%s" % (x, x) for x in self.blocks), + extra=1, + ) # add a function that returns the debug info - self.writeline('debug_info = %r' % '&'.join('%s=%s' % x for x - in self.debug_info)) + self.writeline( + "debug_info = %r" % "&".join("%s=%s" % x for x in self.debug_info) + ) def visit_Block(self, node, frame): """Call a block and register it for the template.""" @@ -817,7 +842,7 @@ def visit_Block(self, node, frame): if self.has_known_extends: return if self.extends_so_far > 0: - self.writeline('if parent_template is None:') + self.writeline("if parent_template is None:") self.indent() level += 1 @@ -826,16 +851,22 @@ def visit_Block(self, node, frame): else: context = self.get_context_ref() - if supports_yield_from and not self.environment.is_async and \ - frame.buffer is None: - self.writeline('yield from context.blocks[%r][0](%s)' % ( - node.name, context), node) + if ( + supports_yield_from + and not self.environment.is_async + and frame.buffer is None + ): + self.writeline( + "yield from context.blocks[%r][0](%s)" % (node.name, context), node + ) else: - loop = self.environment.is_async and 'async for' or 'for' - self.writeline('%s event in context.blocks[%r][0](%s):' % ( - loop, node.name, context), node) + loop = self.environment.is_async and "async for" or "for" + self.writeline( + "%s event in context.blocks[%r][0](%s):" % (loop, node.name, context), + node, + ) self.indent() - self.simple_write('event', frame) + self.simple_write("event", frame) self.outdent() self.outdent(level) @@ -843,8 +874,7 @@ def visit_Block(self, node, frame): def visit_Extends(self, node, frame): """Calls the extender.""" if not frame.toplevel: - self.fail('cannot use extend from a non top-level scope', - node.lineno) + self.fail("cannot use extend from a non top-level scope", node.lineno) # if the number of extends statements in general is zero so # far, we don't have to add a check if something extended @@ -856,10 +886,9 @@ def visit_Extends(self, node, frame): # time too, but i welcome it not to confuse users by throwing the # same error at different times just "because we can". if not self.has_known_extends: - self.writeline('if parent_template is not None:') + self.writeline("if parent_template is not None:") self.indent() - self.writeline('raise TemplateRuntimeError(%r)' % - 'extended multiple times') + self.writeline("raise TemplateRuntimeError(%r)" % "extended multiple times") # if we have a known extends already we don't need that code here # as we know that the template execution will end here. @@ -868,14 +897,14 @@ def visit_Extends(self, node, frame): else: self.outdent() - self.writeline('parent_template = environment.get_template(', node) + self.writeline("parent_template = environment.get_template(", node) self.visit(node.template, frame) - self.write(', %r)' % self.name) - self.writeline('for name, parent_block in parent_template.' - 'blocks.%s():' % dict_item_iter) + self.write(", %r)" % self.name) + self.writeline( + "for name, parent_block in parent_template.blocks.%s():" % dict_item_iter + ) self.indent() - self.writeline('context.blocks.setdefault(name, []).' - 'append(parent_block)') + self.writeline("context.blocks.setdefault(name, []).append(parent_block)") self.outdent() # if this extends statement was in the root level we can take @@ -890,52 +919,56 @@ def visit_Extends(self, node, frame): def visit_Include(self, node, frame): """Handles includes.""" if node.ignore_missing: - self.writeline('try:') + self.writeline("try:") self.indent() - func_name = 'get_or_select_template' + func_name = "get_or_select_template" if isinstance(node.template, nodes.Const): if isinstance(node.template.value, string_types): - func_name = 'get_template' + func_name = "get_template" elif isinstance(node.template.value, (tuple, list)): - func_name = 'select_template' + func_name = "select_template" elif isinstance(node.template, (nodes.Tuple, nodes.List)): - func_name = 'select_template' + func_name = "select_template" - self.writeline('template = environment.%s(' % func_name, node) + self.writeline("template = environment.%s(" % func_name, node) self.visit(node.template, frame) - self.write(', %r)' % self.name) + self.write(", %r)" % self.name) if node.ignore_missing: self.outdent() - self.writeline('except TemplateNotFound:') + self.writeline("except TemplateNotFound:") self.indent() - self.writeline('pass') + self.writeline("pass") self.outdent() - self.writeline('else:') + self.writeline("else:") self.indent() skip_event_yield = False if node.with_context: - loop = self.environment.is_async and 'async for' or 'for' - self.writeline('%s event in template.root_render_func(' - 'template.new_context(context.get_all(), True, ' - '%s)):' % (loop, self.dump_local_context(frame))) + loop = self.environment.is_async and "async for" or "for" + self.writeline( + "%s event in template.root_render_func(" + "template.new_context(context.get_all(), True, " + "%s)):" % (loop, self.dump_local_context(frame)) + ) elif self.environment.is_async: - self.writeline('for event in (await ' - 'template._get_default_module_async())' - '._body_stream:') + self.writeline( + "for event in (await " + "template._get_default_module_async())" + "._body_stream:" + ) else: if supports_yield_from: - self.writeline('yield from template._get_default_module()' - '._body_stream') + self.writeline("yield from template._get_default_module()._body_stream") skip_event_yield = True else: - self.writeline('for event in template._get_default_module()' - '._body_stream:') + self.writeline( + "for event in template._get_default_module()._body_stream:" + ) if not skip_event_yield: self.indent() - self.simple_write('event', frame) + self.simple_write("event", frame) self.outdent() if node.ignore_missing: @@ -943,40 +976,50 @@ def visit_Include(self, node, frame): def visit_Import(self, node, frame): """Visit regular imports.""" - self.writeline('%s = ' % frame.symbols.ref(node.target), node) + self.writeline("%s = " % frame.symbols.ref(node.target), node) if frame.toplevel: - self.write('context.vars[%r] = ' % node.target) + self.write("context.vars[%r] = " % node.target) if self.environment.is_async: - self.write('await ') - self.write('environment.get_template(') + self.write("await ") + self.write("environment.get_template(") self.visit(node.template, frame) - self.write(', %r).' % self.name) + self.write(", %r)." % self.name) if node.with_context: - self.write('make_module%s(context.get_all(), True, %s)' - % (self.environment.is_async and '_async' or '', - self.dump_local_context(frame))) + self.write( + "make_module%s(context.get_all(), True, %s)" + % ( + self.environment.is_async and "_async" or "", + self.dump_local_context(frame), + ) + ) elif self.environment.is_async: - self.write('_get_default_module_async()') + self.write("_get_default_module_async()") else: - self.write('_get_default_module()') - if frame.toplevel and not node.target.startswith('_'): - self.writeline('context.exported_vars.discard(%r)' % node.target) + self.write("_get_default_module()") + if frame.toplevel and not node.target.startswith("_"): + self.writeline("context.exported_vars.discard(%r)" % node.target) def visit_FromImport(self, node, frame): """Visit named imports.""" self.newline(node) - self.write('included_template = %senvironment.get_template(' - % (self.environment.is_async and 'await ' or '')) + self.write( + "included_template = %senvironment.get_template(" + % (self.environment.is_async and "await " or "") + ) self.visit(node.template, frame) - self.write(', %r).' % self.name) + self.write(", %r)." % self.name) if node.with_context: - self.write('make_module%s(context.get_all(), True, %s)' - % (self.environment.is_async and '_async' or '', - self.dump_local_context(frame))) + self.write( + "make_module%s(context.get_all(), True, %s)" + % ( + self.environment.is_async and "_async" or "", + self.dump_local_context(frame), + ) + ) elif self.environment.is_async: - self.write('_get_default_module_async()') + self.write("_get_default_module_async()") else: - self.write('_get_default_module()') + self.write("_get_default_module()") var_names = [] discarded_names = [] @@ -985,41 +1028,51 @@ def visit_FromImport(self, node, frame): name, alias = name else: alias = name - self.writeline('%s = getattr(included_template, ' - '%r, missing)' % (frame.symbols.ref(alias), name)) - self.writeline('if %s is missing:' % frame.symbols.ref(alias)) + self.writeline( + "%s = getattr(included_template, " + "%r, missing)" % (frame.symbols.ref(alias), name) + ) + self.writeline("if %s is missing:" % frame.symbols.ref(alias)) self.indent() - self.writeline('%s = undefined(%r %% ' - 'included_template.__name__, ' - 'name=%r)' % - (frame.symbols.ref(alias), - 'the template %%r (imported on %s) does ' - 'not export the requested name %s' % ( - self.position(node), - repr(name) - ), name)) + self.writeline( + "%s = undefined(%r %% " + "included_template.__name__, " + "name=%r)" + % ( + frame.symbols.ref(alias), + "the template %%r (imported on %s) does " + "not export the requested name %s" + % (self.position(node), repr(name)), + name, + ) + ) self.outdent() if frame.toplevel: var_names.append(alias) - if not alias.startswith('_'): + if not alias.startswith("_"): discarded_names.append(alias) if var_names: if len(var_names) == 1: name = var_names[0] - self.writeline('context.vars[%r] = %s' % - (name, frame.symbols.ref(name))) + self.writeline( + "context.vars[%r] = %s" % (name, frame.symbols.ref(name)) + ) else: - self.writeline('context.vars.update({%s})' % ', '.join( - '%r: %s' % (name, frame.symbols.ref(name)) for name in var_names - )) + self.writeline( + "context.vars.update({%s})" + % ", ".join( + "%r: %s" % (name, frame.symbols.ref(name)) for name in var_names + ) + ) if discarded_names: if len(discarded_names) == 1: - self.writeline('context.exported_vars.discard(%r)' % - discarded_names[0]) + self.writeline("context.exported_vars.discard(%r)" % discarded_names[0]) else: - self.writeline('context.exported_vars.difference_' - 'update((%s))' % ', '.join(imap(repr, discarded_names))) + self.writeline( + "context.exported_vars.difference_" + "update((%s))" % ", ".join(imap(repr, discarded_names)) + ) def visit_For(self, node, frame): loop_frame = frame.inner() @@ -1029,35 +1082,35 @@ def visit_For(self, node, frame): # try to figure out if we have an extended loop. An extended loop # is necessary if the loop is in recursive mode if the special loop # variable is accessed in the body. - extended_loop = node.recursive or 'loop' in \ - find_undeclared(node.iter_child_nodes( - only=('body',)), ('loop',)) + extended_loop = node.recursive or "loop" in find_undeclared( + node.iter_child_nodes(only=("body",)), ("loop",) + ) loop_ref = None if extended_loop: - loop_ref = loop_frame.symbols.declare_parameter('loop') + loop_ref = loop_frame.symbols.declare_parameter("loop") - loop_frame.symbols.analyze_node(node, for_branch='body') + loop_frame.symbols.analyze_node(node, for_branch="body") if node.else_: - else_frame.symbols.analyze_node(node, for_branch='else') + else_frame.symbols.analyze_node(node, for_branch="else") if node.test: loop_filter_func = self.temporary_identifier() - test_frame.symbols.analyze_node(node, for_branch='test') - self.writeline('%s(fiter):' % self.func(loop_filter_func), node.test) + test_frame.symbols.analyze_node(node, for_branch="test") + self.writeline("%s(fiter):" % self.func(loop_filter_func), node.test) self.indent() self.enter_frame(test_frame) - self.writeline(self.environment.is_async and 'async for ' or 'for ') + self.writeline(self.environment.is_async and "async for " or "for ") self.visit(node.target, loop_frame) - self.write(' in ') - self.write(self.environment.is_async and 'auto_aiter(fiter)' or 'fiter') - self.write(':') + self.write(" in ") + self.write(self.environment.is_async and "auto_aiter(fiter)" or "fiter") + self.write(":") self.indent() - self.writeline('if ', node.test) + self.writeline("if ", node.test) self.visit(node.test, test_frame) - self.write(':') + self.write(":") self.indent() - self.writeline('yield ') + self.writeline("yield ") self.visit(node.target, loop_frame) self.outdent(3) self.leave_frame(test_frame, with_python_scope=True) @@ -1066,8 +1119,9 @@ def visit_For(self, node, frame): # variables at that point. Because loops can be nested but the loop # variable is a special one we have to enforce aliasing for it. if node.recursive: - self.writeline('%s(reciter, loop_render_func, depth=0):' % - self.func('loop'), node) + self.writeline( + "%s(reciter, loop_render_func, depth=0):" % self.func("loop"), node + ) self.indent() self.buffer(loop_frame) @@ -1077,57 +1131,60 @@ def visit_For(self, node, frame): # make sure the loop variable is a special one and raise a template # assertion error if a loop tries to write to loop if extended_loop: - self.writeline('%s = missing' % loop_ref) + self.writeline("%s = missing" % loop_ref) for name in node.find_all(nodes.Name): - if name.ctx == 'store' and name.name == 'loop': - self.fail('Can\'t assign to special loop variable ' - 'in for-loop target', name.lineno) + if name.ctx == "store" and name.name == "loop": + self.fail( + "Can't assign to special loop variable in for-loop target", + name.lineno, + ) if node.else_: iteration_indicator = self.temporary_identifier() - self.writeline('%s = 1' % iteration_indicator) + self.writeline("%s = 1" % iteration_indicator) - self.writeline(self.environment.is_async and 'async for ' or 'for ', node) + self.writeline(self.environment.is_async and "async for " or "for ", node) self.visit(node.target, loop_frame) if extended_loop: if self.environment.is_async: - self.write(', %s in await make_async_loop_context(' % loop_ref) + self.write(", %s in AsyncLoopContext(" % loop_ref) else: - self.write(', %s in LoopContext(' % loop_ref) + self.write(", %s in LoopContext(" % loop_ref) else: - self.write(' in ') + self.write(" in ") if node.test: - self.write('%s(' % loop_filter_func) + self.write("%s(" % loop_filter_func) if node.recursive: - self.write('reciter') + self.write("reciter") else: if self.environment.is_async and not extended_loop: - self.write('auto_aiter(') + self.write("auto_aiter(") self.visit(node.iter, frame) if self.environment.is_async and not extended_loop: - self.write(')') + self.write(")") if node.test: - self.write(')') + self.write(")") if node.recursive: - self.write(', undefined, loop_render_func, depth):') + self.write(", undefined, loop_render_func, depth):") else: - self.write(extended_loop and ', undefined):' or ':') + self.write(extended_loop and ", undefined):" or ":") self.indent() self.enter_frame(loop_frame) self.blockvisit(node.body, loop_frame) if node.else_: - self.writeline('%s = 0' % iteration_indicator) + self.writeline("%s = 0" % iteration_indicator) self.outdent() - self.leave_frame(loop_frame, with_python_scope=node.recursive - and not node.else_) + self.leave_frame( + loop_frame, with_python_scope=node.recursive and not node.else_ + ) if node.else_: - self.writeline('if %s:' % iteration_indicator) + self.writeline("if %s:" % iteration_indicator) self.indent() self.enter_frame(else_frame) self.blockvisit(node.else_, else_frame) @@ -1141,33 +1198,33 @@ def visit_For(self, node, frame): self.outdent() self.start_write(frame, node) if self.environment.is_async: - self.write('await ') - self.write('loop(') + self.write("await ") + self.write("loop(") if self.environment.is_async: - self.write('auto_aiter(') + self.write("auto_aiter(") self.visit(node.iter, frame) if self.environment.is_async: - self.write(')') - self.write(', loop)') + self.write(")") + self.write(", loop)") self.end_write(frame) def visit_If(self, node, frame): if_frame = frame.soft() - self.writeline('if ', node) + self.writeline("if ", node) self.visit(node.test, if_frame) - self.write(':') + self.write(":") self.indent() self.blockvisit(node.body, if_frame) self.outdent() for elif_ in node.elif_: - self.writeline('elif ', elif_) + self.writeline("elif ", elif_) self.visit(elif_.test, if_frame) - self.write(':') + self.write(":") self.indent() self.blockvisit(elif_.body, if_frame) self.outdent() if node.else_: - self.writeline('else:') + self.writeline("else:") self.indent() self.blockvisit(node.else_, if_frame) self.outdent() @@ -1176,16 +1233,15 @@ def visit_Macro(self, node, frame): macro_frame, macro_ref = self.macro_body(node, frame) self.newline() if frame.toplevel: - if not node.name.startswith('_'): - self.write('context.exported_vars.add(%r)' % node.name) - ref = frame.symbols.ref(node.name) - self.writeline('context.vars[%r] = ' % node.name) - self.write('%s = ' % frame.symbols.ref(node.name)) + if not node.name.startswith("_"): + self.write("context.exported_vars.add(%r)" % node.name) + self.writeline("context.vars[%r] = " % node.name) + self.write("%s = " % frame.symbols.ref(node.name)) self.macro_def(macro_ref, macro_frame) def visit_CallBlock(self, node, frame): call_frame, macro_ref = self.macro_body(node, frame) - self.writeline('caller = ') + self.writeline("caller = ") self.macro_def(macro_ref, call_frame) self.start_write(frame, node) self.visit_Call(node.call, frame, forward_caller=True) @@ -1206,10 +1262,10 @@ def visit_With(self, node, frame): with_frame = frame.inner() with_frame.symbols.analyze_node(node) self.enter_frame(with_frame) - for idx, (target, expr) in enumerate(izip(node.targets, node.values)): + for target, expr in izip(node.targets, node.values): self.newline() self.visit(target, with_frame) - self.write(' = ') + self.write(" = ") self.visit(expr, frame) self.blockvisit(node.body, with_frame) self.leave_frame(with_frame) @@ -1218,156 +1274,187 @@ def visit_ExprStmt(self, node, frame): self.newline(node) self.visit(node.node, frame) - def visit_Output(self, node, frame): - # if we have a known extends statement, we don't output anything - # if we are in a require_output_check section - if self.has_known_extends and frame.require_output_check: - return + _FinalizeInfo = namedtuple("_FinalizeInfo", ("const", "src")) + #: The default finalize function if the environment isn't configured + #: with one. Or if the environment has one, this is called on that + #: function's output for constants. + _default_finalize = text_type + _finalize = None + + def _make_finalize(self): + """Build the finalize function to be used on constants and at + runtime. Cached so it's only created once for all output nodes. + + Returns a ``namedtuple`` with the following attributes: + + ``const`` + A function to finalize constant data at compile time. + + ``src`` + Source code to output around nodes to be evaluated at + runtime. + """ + if self._finalize is not None: + return self._finalize + + finalize = default = self._default_finalize + src = None - allow_constant_finalize = True if self.environment.finalize: - func = self.environment.finalize - if getattr(func, 'contextfunction', False) or \ - getattr(func, 'evalcontextfunction', False): - allow_constant_finalize = False - elif getattr(func, 'environmentfunction', False): - finalize = lambda x: text_type( - self.environment.finalize(self.environment, x)) - else: - finalize = lambda x: text_type(self.environment.finalize(x)) + src = "environment.finalize(" + env_finalize = self.environment.finalize + + def finalize(value): + return default(env_finalize(value)) + + if getattr(env_finalize, "contextfunction", False): + src += "context, " + finalize = None # noqa: F811 + elif getattr(env_finalize, "evalcontextfunction", False): + src += "context.eval_ctx, " + finalize = None + elif getattr(env_finalize, "environmentfunction", False): + src += "environment, " + + def finalize(value): + return default(env_finalize(self.environment, value)) + + self._finalize = self._FinalizeInfo(finalize, src) + return self._finalize + + def _output_const_repr(self, group): + """Given a group of constant values converted from ``Output`` + child nodes, produce a string to write to the template module + source. + """ + return repr(concat(group)) + + def _output_child_to_const(self, node, frame, finalize): + """Try to optimize a child of an ``Output`` node by trying to + convert it to constant, finalized data at compile time. + + If :exc:`Impossible` is raised, the node is not constant and + will be evaluated at runtime. Any other exception will also be + evaluated at runtime for easier debugging. + """ + const = node.as_const(frame.eval_ctx) + + if frame.eval_ctx.autoescape: + const = escape(const) + + # Template data doesn't go through finalize. + if isinstance(node, nodes.TemplateData): + return text_type(const) + + return finalize.const(const) + + def _output_child_pre(self, node, frame, finalize): + """Output extra source code before visiting a child of an + ``Output`` node. + """ + if frame.eval_ctx.volatile: + self.write("(escape if context.eval_ctx.autoescape else to_string)(") + elif frame.eval_ctx.autoescape: + self.write("escape(") else: - finalize = text_type + self.write("to_string(") + + if finalize.src is not None: + self.write(finalize.src) + + def _output_child_post(self, node, frame, finalize): + """Output extra source code after visiting a child of an + ``Output`` node. + """ + self.write(")") + + if finalize.src is not None: + self.write(")") - # if we are inside a frame that requires output checking, we do so - outdent_later = False + def visit_Output(self, node, frame): + # If an extends is active, don't render outside a block. if frame.require_output_check: - self.writeline('if parent_template is None:') + # A top-level extends is known to exist at compile time. + if self.has_known_extends: + return + + self.writeline("if parent_template is None:") self.indent() - outdent_later = True - # try to evaluate as many chunks as possible into a static - # string at compile time. + finalize = self._make_finalize() body = [] + + # Evaluate constants at compile time if possible. Each item in + # body will be either a list of static data or a node to be + # evaluated at runtime. for child in node.nodes: try: - if not allow_constant_finalize: + if not ( + # If the finalize function requires runtime context, + # constants can't be evaluated at compile time. + finalize.const + # Unless it's basic template data that won't be + # finalized anyway. + or isinstance(child, nodes.TemplateData) + ): raise nodes.Impossible() - const = child.as_const(frame.eval_ctx) - except nodes.Impossible: - body.append(child) - continue - # the frame can't be volatile here, becaus otherwise the - # as_const() function would raise an Impossible exception - # at that point. - try: - if frame.eval_ctx.autoescape: - if hasattr(const, '__html__'): - const = const.__html__() - else: - const = escape(const) - const = finalize(const) - except Exception: - # if something goes wrong here we evaluate the node - # at runtime for easier debugging + + const = self._output_child_to_const(child, frame, finalize) + except (nodes.Impossible, Exception): + # The node was not constant and needs to be evaluated at + # runtime. Or another error was raised, which is easier + # to debug at runtime. body.append(child) continue + if body and isinstance(body[-1], list): body[-1].append(const) else: body.append([const]) - # if we have less than 3 nodes or a buffer we yield or extend/append - if len(body) < 3 or frame.buffer is not None: - if frame.buffer is not None: - # for one item we append, for more we extend - if len(body) == 1: - self.writeline('%s.append(' % frame.buffer) + if frame.buffer is not None: + if len(body) == 1: + self.writeline("%s.append(" % frame.buffer) + else: + self.writeline("%s.extend((" % frame.buffer) + + self.indent() + + for item in body: + if isinstance(item, list): + # A group of constant data to join and output. + val = self._output_const_repr(item) + + if frame.buffer is None: + self.writeline("yield " + val) else: - self.writeline('%s.extend((' % frame.buffer) - self.indent() - for item in body: - if isinstance(item, list): - val = repr(concat(item)) - if frame.buffer is None: - self.writeline('yield ' + val) - else: - self.writeline(val + ',') + self.writeline(val + ",") + else: + if frame.buffer is None: + self.writeline("yield ", item) else: - if frame.buffer is None: - self.writeline('yield ', item) - else: - self.newline(item) - close = 1 - if frame.eval_ctx.volatile: - self.write('(escape if context.eval_ctx.autoescape' - ' else to_string)(') - elif frame.eval_ctx.autoescape: - self.write('escape(') - else: - self.write('to_string(') - if self.environment.finalize is not None: - self.write('environment.finalize(') - if getattr(self.environment.finalize, - "contextfunction", False): - self.write('context, ') - close += 1 - self.visit(item, frame) - self.write(')' * close) - if frame.buffer is not None: - self.write(',') - if frame.buffer is not None: - # close the open parentheses - self.outdent() - self.writeline(len(body) == 1 and ')' or '))') + self.newline(item) - # otherwise we create a format string as this is faster in that case - else: - format = [] - arguments = [] - for item in body: - if isinstance(item, list): - format.append(concat(item).replace('%', '%%')) - else: - format.append('%s') - arguments.append(item) - self.writeline('yield ') - self.write(repr(concat(format)) + ' % (') - self.indent() - for argument in arguments: - self.newline(argument) - close = 0 - if frame.eval_ctx.volatile: - self.write('(escape if context.eval_ctx.autoescape else' - ' to_string)(') - close += 1 - elif frame.eval_ctx.autoescape: - self.write('escape(') - close += 1 - if self.environment.finalize is not None: - self.write('environment.finalize(') - if getattr(self.environment.finalize, - 'contextfunction', False): - self.write('context, ') - elif getattr(self.environment.finalize, - 'evalcontextfunction', False): - self.write('context.eval_ctx, ') - elif getattr(self.environment.finalize, - 'environmentfunction', False): - self.write('environment, ') - close += 1 - self.visit(argument, frame) - self.write(')' * close + ', ') + # A node to be evaluated at runtime. + self._output_child_pre(item, frame, finalize) + self.visit(item, frame) + self._output_child_post(item, frame, finalize) + + if frame.buffer is not None: + self.write(",") + + if frame.buffer is not None: self.outdent() - self.writeline(')') + self.writeline(")" if len(body) == 1 else "))") - if outdent_later: + if frame.require_output_check: self.outdent() def visit_Assign(self, node, frame): self.push_assign_tracking() self.newline(node) self.visit(node.target, frame) - self.write(' = ') + self.write(" = ") self.visit(node.node, frame) self.pop_assign_tracking(frame) @@ -1384,20 +1471,19 @@ def visit_AssignBlock(self, node, frame): self.blockvisit(node.body, block_frame) self.newline(node) self.visit(node.target, frame) - self.write(' = (Markup if context.eval_ctx.autoescape ' - 'else identity)(') + self.write(" = (Markup if context.eval_ctx.autoescape else identity)(") if node.filter is not None: self.visit_Filter(node.filter, block_frame) else: - self.write('concat(%s)' % block_frame.buffer) - self.write(')') + self.write("concat(%s)" % block_frame.buffer) + self.write(")") self.pop_assign_tracking(frame) self.leave_frame(block_frame) # -- Expression Visitors def visit_Name(self, node, frame): - if node.ctx == 'store' and frame.toplevel: + if node.ctx == "store" and frame.toplevel: if self._assign_stack: self._assign_stack[-1].add(node.name) ref = frame.symbols.ref(node.name) @@ -1405,12 +1491,17 @@ def visit_Name(self, node, frame): # If we are looking up a variable we might have to deal with the # case where it's undefined. We can skip that case if the load # instruction indicates a parameter which are always defined. - if node.ctx == 'load': + if node.ctx == "load": load = frame.symbols.find_load(ref) - if not (load is not None and load[0] == VAR_LOAD_PARAMETER and \ - not self.parameter_is_undeclared(ref)): - self.write('(undefined(name=%r) if %s is missing else %s)' % - (node.name, ref, ref)) + if not ( + load is not None + and load[0] == VAR_LOAD_PARAMETER + and not self.parameter_is_undeclared(ref) + ): + self.write( + "(undefined(name=%r) if %s is missing else %s)" + % (node.name, ref, ref) + ) return self.write(ref) @@ -1420,12 +1511,14 @@ def visit_NSRef(self, node, frame): # `foo.bar` notation they will be parsed as a normal attribute access # when used anywhere but in a `set` context ref = frame.symbols.ref(node.name) - self.writeline('if not isinstance(%s, Namespace):' % ref) + self.writeline("if not isinstance(%s, Namespace):" % ref) self.indent() - self.writeline('raise TemplateRuntimeError(%r)' % - 'cannot assign attribute on non-namespace object') + self.writeline( + "raise TemplateRuntimeError(%r)" + % "cannot assign attribute on non-namespace object" + ) self.outdent() - self.writeline('%s[%r]' % (ref, node.attr)) + self.writeline("%s[%r]" % (ref, node.attr)) def visit_Const(self, node, frame): val = node.as_const(frame.eval_ctx) @@ -1438,230 +1531,256 @@ def visit_TemplateData(self, node, frame): try: self.write(repr(node.as_const(frame.eval_ctx))) except nodes.Impossible: - self.write('(Markup if context.eval_ctx.autoescape else identity)(%r)' - % node.data) + self.write( + "(Markup if context.eval_ctx.autoescape else identity)(%r)" % node.data + ) def visit_Tuple(self, node, frame): - self.write('(') + self.write("(") idx = -1 for idx, item in enumerate(node.items): if idx: - self.write(', ') + self.write(", ") self.visit(item, frame) - self.write(idx == 0 and ',)' or ')') + self.write(idx == 0 and ",)" or ")") def visit_List(self, node, frame): - self.write('[') + self.write("[") for idx, item in enumerate(node.items): if idx: - self.write(', ') + self.write(", ") self.visit(item, frame) - self.write(']') + self.write("]") def visit_Dict(self, node, frame): - self.write('{') + self.write("{") for idx, item in enumerate(node.items): if idx: - self.write(', ') + self.write(", ") self.visit(item.key, frame) - self.write(': ') + self.write(": ") self.visit(item.value, frame) - self.write('}') + self.write("}") - def binop(operator, interceptable=True): + def binop(operator, interceptable=True): # noqa: B902 @optimizeconst def visitor(self, node, frame): - if self.environment.sandboxed and \ - operator in self.environment.intercepted_binops: - self.write('environment.call_binop(context, %r, ' % operator) + if ( + self.environment.sandboxed + and operator in self.environment.intercepted_binops + ): + self.write("environment.call_binop(context, %r, " % operator) self.visit(node.left, frame) - self.write(', ') + self.write(", ") self.visit(node.right, frame) else: - self.write('(') + self.write("(") self.visit(node.left, frame) - self.write(' %s ' % operator) + self.write(" %s " % operator) self.visit(node.right, frame) - self.write(')') + self.write(")") + return visitor - def uaop(operator, interceptable=True): + def uaop(operator, interceptable=True): # noqa: B902 @optimizeconst def visitor(self, node, frame): - if self.environment.sandboxed and \ - operator in self.environment.intercepted_unops: - self.write('environment.call_unop(context, %r, ' % operator) + if ( + self.environment.sandboxed + and operator in self.environment.intercepted_unops + ): + self.write("environment.call_unop(context, %r, " % operator) self.visit(node.node, frame) else: - self.write('(' + operator) + self.write("(" + operator) self.visit(node.node, frame) - self.write(')') + self.write(")") + return visitor - visit_Add = binop('+') - visit_Sub = binop('-') - visit_Mul = binop('*') - visit_Div = binop('/') - visit_FloorDiv = binop('//') - visit_Pow = binop('**') - visit_Mod = binop('%') - visit_And = binop('and', interceptable=False) - visit_Or = binop('or', interceptable=False) - visit_Pos = uaop('+') - visit_Neg = uaop('-') - visit_Not = uaop('not ', interceptable=False) + visit_Add = binop("+") + visit_Sub = binop("-") + visit_Mul = binop("*") + visit_Div = binop("/") + visit_FloorDiv = binop("//") + visit_Pow = binop("**") + visit_Mod = binop("%") + visit_And = binop("and", interceptable=False) + visit_Or = binop("or", interceptable=False) + visit_Pos = uaop("+") + visit_Neg = uaop("-") + visit_Not = uaop("not ", interceptable=False) del binop, uaop @optimizeconst def visit_Concat(self, node, frame): if frame.eval_ctx.volatile: - func_name = '(context.eval_ctx.volatile and' \ - ' markup_join or unicode_join)' + func_name = "(context.eval_ctx.volatile and markup_join or unicode_join)" elif frame.eval_ctx.autoescape: - func_name = 'markup_join' + func_name = "markup_join" else: - func_name = 'unicode_join' - self.write('%s((' % func_name) + func_name = "unicode_join" + self.write("%s((" % func_name) for arg in node.nodes: self.visit(arg, frame) - self.write(', ') - self.write('))') + self.write(", ") + self.write("))") @optimizeconst def visit_Compare(self, node, frame): + self.write("(") self.visit(node.expr, frame) for op in node.ops: self.visit(op, frame) + self.write(")") def visit_Operand(self, node, frame): - self.write(' %s ' % operators[node.op]) + self.write(" %s " % operators[node.op]) self.visit(node.expr, frame) @optimizeconst def visit_Getattr(self, node, frame): - self.write('environment.getattr(') + if self.environment.is_async: + self.write("(await auto_await(") + + self.write("environment.getattr(") self.visit(node.node, frame) - self.write(', %r)' % node.attr) + self.write(", %r)" % node.attr) + + if self.environment.is_async: + self.write("))") @optimizeconst def visit_Getitem(self, node, frame): # slices bypass the environment getitem method. if isinstance(node.arg, nodes.Slice): self.visit(node.node, frame) - self.write('[') + self.write("[") self.visit(node.arg, frame) - self.write(']') + self.write("]") else: - self.write('environment.getitem(') + if self.environment.is_async: + self.write("(await auto_await(") + + self.write("environment.getitem(") self.visit(node.node, frame) - self.write(', ') + self.write(", ") self.visit(node.arg, frame) - self.write(')') + self.write(")") + + if self.environment.is_async: + self.write("))") def visit_Slice(self, node, frame): if node.start is not None: self.visit(node.start, frame) - self.write(':') + self.write(":") if node.stop is not None: self.visit(node.stop, frame) if node.step is not None: - self.write(':') + self.write(":") self.visit(node.step, frame) @optimizeconst def visit_Filter(self, node, frame): if self.environment.is_async: - self.write('await auto_await(') - self.write(self.filters[node.name] + '(') + self.write("await auto_await(") + self.write(self.filters[node.name] + "(") func = self.environment.filters.get(node.name) if func is None: - self.fail('no filter named %r' % node.name, node.lineno) - if getattr(func, 'contextfilter', False): - self.write('context, ') - elif getattr(func, 'evalcontextfilter', False): - self.write('context.eval_ctx, ') - elif getattr(func, 'environmentfilter', False): - self.write('environment, ') + self.fail("no filter named %r" % node.name, node.lineno) + if getattr(func, "contextfilter", False): + self.write("context, ") + elif getattr(func, "evalcontextfilter", False): + self.write("context.eval_ctx, ") + elif getattr(func, "environmentfilter", False): + self.write("environment, ") # if the filter node is None we are inside a filter block # and want to write to the current buffer if node.node is not None: self.visit(node.node, frame) elif frame.eval_ctx.volatile: - self.write('(context.eval_ctx.autoescape and' - ' Markup(concat(%s)) or concat(%s))' % - (frame.buffer, frame.buffer)) + self.write( + "(context.eval_ctx.autoescape and" + " Markup(concat(%s)) or concat(%s))" % (frame.buffer, frame.buffer) + ) elif frame.eval_ctx.autoescape: - self.write('Markup(concat(%s))' % frame.buffer) + self.write("Markup(concat(%s))" % frame.buffer) else: - self.write('concat(%s)' % frame.buffer) + self.write("concat(%s)" % frame.buffer) self.signature(node, frame) - self.write(')') + self.write(")") if self.environment.is_async: - self.write(')') + self.write(")") @optimizeconst def visit_Test(self, node, frame): - self.write(self.tests[node.name] + '(') + self.write(self.tests[node.name] + "(") if node.name not in self.environment.tests: - self.fail('no test named %r' % node.name, node.lineno) + self.fail("no test named %r" % node.name, node.lineno) self.visit(node.node, frame) self.signature(node, frame) - self.write(')') + self.write(")") @optimizeconst def visit_CondExpr(self, node, frame): def write_expr2(): if node.expr2 is not None: return self.visit(node.expr2, frame) - self.write('undefined(%r)' % ('the inline if-' - 'expression on %s evaluated to false and ' - 'no else section was defined.' % self.position(node))) - - self.write('(') + self.write( + "cond_expr_undefined(%r)" + % ( + "the inline if-" + "expression on %s evaluated to false and " + "no else section was defined." % self.position(node) + ) + ) + + self.write("(") self.visit(node.expr1, frame) - self.write(' if ') + self.write(" if ") self.visit(node.test, frame) - self.write(' else ') + self.write(" else ") write_expr2() - self.write(')') + self.write(")") @optimizeconst def visit_Call(self, node, frame, forward_caller=False): if self.environment.is_async: - self.write('await auto_await(') + self.write("await auto_await(") if self.environment.sandboxed: - self.write('environment.call(context, ') + self.write("environment.call(context, ") else: - self.write('context.call(') + self.write("context.call(") self.visit(node.node, frame) - extra_kwargs = forward_caller and {'caller': 'caller'} or None + extra_kwargs = forward_caller and {"caller": "caller"} or None self.signature(node, frame, extra_kwargs) - self.write(')') + self.write(")") if self.environment.is_async: - self.write(')') + self.write(")") def visit_Keyword(self, node, frame): - self.write(node.key + '=') + self.write(node.key + "=") self.visit(node.value, frame) # -- Unused nodes for extensions def visit_MarkSafe(self, node, frame): - self.write('Markup(') + self.write("Markup(") self.visit(node.expr, frame) - self.write(')') + self.write(")") def visit_MarkSafeIfAutoescape(self, node, frame): - self.write('(context.eval_ctx.autoescape and Markup or identity)(') + self.write("(context.eval_ctx.autoescape and Markup or identity)(") self.visit(node.expr, frame) - self.write(')') + self.write(")") def visit_EnvironmentAttribute(self, node, frame): - self.write('environment.' + node.name) + self.write("environment." + node.name) def visit_ExtensionAttribute(self, node, frame): - self.write('environment.extensions[%r].%s' % (node.identifier, node.name)) + self.write("environment.extensions[%r].%s" % (node.identifier, node.name)) def visit_ImportedName(self, node, frame): self.write(self.import_aliases[node.importname]) @@ -1670,13 +1789,16 @@ def visit_InternalName(self, node, frame): self.write(node.name) def visit_ContextReference(self, node, frame): - self.write('context') + self.write("context") + + def visit_DerivedContextReference(self, node, frame): + self.write(self.derive_context(frame)) def visit_Continue(self, node, frame): - self.writeline('continue', node) + self.writeline("continue", node) def visit_Break(self, node, frame): - self.writeline('break', node) + self.writeline("break", node) def visit_Scope(self, node, frame): scope_frame = frame.inner() @@ -1687,8 +1809,8 @@ def visit_Scope(self, node, frame): def visit_OverlayScope(self, node, frame): ctx = self.temporary_identifier() - self.writeline('%s = %s' % (ctx, self.derive_context(frame))) - self.writeline('%s.vars = ' % ctx) + self.writeline("%s = %s" % (ctx, self.derive_context(frame))) + self.writeline("%s.vars = " % ctx) self.visit(node.context, frame) self.push_context_reference(ctx) @@ -1701,7 +1823,7 @@ def visit_OverlayScope(self, node, frame): def visit_EvalContextModifier(self, node, frame): for keyword in node.options: - self.writeline('context.eval_ctx.%s = ' % keyword.key) + self.writeline("context.eval_ctx.%s = " % keyword.key) self.visit(keyword.value, frame) try: val = keyword.value.as_const(frame.eval_ctx) @@ -1713,9 +1835,9 @@ def visit_EvalContextModifier(self, node, frame): def visit_ScopedEvalContextModifier(self, node, frame): old_ctx_name = self.temporary_identifier() saved_ctx = frame.eval_ctx.save() - self.writeline('%s = context.eval_ctx.save()' % old_ctx_name) + self.writeline("%s = context.eval_ctx.save()" % old_ctx_name) self.visit_EvalContextModifier(node, frame) for child in node.body: self.visit(child, frame) frame.eval_ctx.revert(saved_ctx) - self.writeline('context.eval_ctx.revert(%s)' % old_ctx_name) + self.writeline("context.eval_ctx.revert(%s)" % old_ctx_name) diff --git a/pipenv/vendor/jinja2/constants.py b/pipenv/vendor/jinja2/constants.py index 11efd1ed15..bf7f2ca721 100644 --- a/pipenv/vendor/jinja2/constants.py +++ b/pipenv/vendor/jinja2/constants.py @@ -1,17 +1,6 @@ # -*- coding: utf-8 -*- -""" - jinja.constants - ~~~~~~~~~~~~~~~ - - Various constants. - - :copyright: (c) 2017 by the Jinja Team. - :license: BSD, see LICENSE for more details. -""" - - #: list of lorem ipsum words used by the lipsum() helper function -LOREM_IPSUM_WORDS = u'''\ +LOREM_IPSUM_WORDS = u"""\ a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at auctor augue bibendum blandit class commodo condimentum congue consectetuer consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus @@ -29,4 +18,4 @@ sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus -viverra volutpat vulputate''' +viverra volutpat vulputate""" diff --git a/pipenv/vendor/jinja2/debug.py b/pipenv/vendor/jinja2/debug.py index d3c1a3a875..d2c5a06bf6 100644 --- a/pipenv/vendor/jinja2/debug.py +++ b/pipenv/vendor/jinja2/debug.py @@ -1,378 +1,271 @@ -# -*- coding: utf-8 -*- -""" - jinja2.debug - ~~~~~~~~~~~~ - - Implements the debug interface for Jinja. This module does some pretty - ugly stuff with the Python traceback system in order to achieve tracebacks - with correct line numbers, locals and contents. - - :copyright: (c) 2017 by the Jinja Team. - :license: BSD, see LICENSE for more details. -""" import sys -import traceback -from types import TracebackType, CodeType -from jinja2.utils import missing, internal_code -from jinja2.exceptions import TemplateSyntaxError -from jinja2._compat import iteritems, reraise, PY2 +from types import CodeType -# on pypy we can take advantage of transparent proxies -try: - from __pypy__ import tproxy -except ImportError: - tproxy = None +from . import TemplateSyntaxError +from ._compat import PYPY +from .utils import internal_code +from .utils import missing -# how does the raise helper look like? -try: - exec("raise TypeError, 'foo'") -except SyntaxError: - raise_helper = 'raise __jinja_exception__[1]' -except TypeError: - raise_helper = 'raise __jinja_exception__[0], __jinja_exception__[1]' +def rewrite_traceback_stack(source=None): + """Rewrite the current exception to replace any tracebacks from + within compiled template code with tracebacks that look like they + came from the template source. + This must be called within an ``except`` block. -class TracebackFrameProxy(object): - """Proxies a traceback frame.""" + :param exc_info: A :meth:`sys.exc_info` tuple. If not provided, + the current ``exc_info`` is used. + :param source: For ``TemplateSyntaxError``, the original source if + known. + :return: A :meth:`sys.exc_info` tuple that can be re-raised. + """ + exc_type, exc_value, tb = sys.exc_info() - def __init__(self, tb): - self.tb = tb - self._tb_next = None + if isinstance(exc_value, TemplateSyntaxError) and not exc_value.translated: + exc_value.translated = True + exc_value.source = source - @property - def tb_next(self): - return self._tb_next + try: + # Remove the old traceback on Python 3, otherwise the frames + # from the compiler still show up. + exc_value.with_traceback(None) + except AttributeError: + pass - def set_next(self, next): - if tb_set_next is not None: - try: - tb_set_next(self.tb, next and next.tb or None) - except Exception: - # this function can fail due to all the hackery it does - # on various python implementations. We just catch errors - # down and ignore them if necessary. - pass - self._tb_next = next - - @property - def is_jinja_frame(self): - return '__jinja_template__' in self.tb.tb_frame.f_globals - - def __getattr__(self, name): - return getattr(self.tb, name) - - -def make_frame_proxy(frame): - proxy = TracebackFrameProxy(frame) - if tproxy is None: - return proxy - def operation_handler(operation, *args, **kwargs): - if operation in ('__getattribute__', '__getattr__'): - return getattr(proxy, args[0]) - elif operation == '__setattr__': - proxy.__setattr__(*args, **kwargs) - else: - return getattr(proxy, operation)(*args, **kwargs) - return tproxy(TracebackType, operation_handler) - - -class ProcessedTraceback(object): - """Holds a Jinja preprocessed traceback for printing or reraising.""" - - def __init__(self, exc_type, exc_value, frames): - assert frames, 'no frames for this traceback?' - self.exc_type = exc_type - self.exc_value = exc_value - self.frames = frames - - # newly concatenate the frames (which are proxies) - prev_tb = None - for tb in self.frames: - if prev_tb is not None: - prev_tb.set_next(tb) - prev_tb = tb - prev_tb.set_next(None) - - def render_as_text(self, limit=None): - """Return a string with the traceback.""" - lines = traceback.format_exception(self.exc_type, self.exc_value, - self.frames[0], limit=limit) - return ''.join(lines).rstrip() - - def render_as_html(self, full=False): - """Return a unicode string with the traceback as rendered HTML.""" - from jinja2.debugrenderer import render_traceback - return u'%s\n\n<!--\n%s\n-->' % ( - render_traceback(self, full=full), - self.render_as_text().decode('utf-8', 'replace') + # Outside of runtime, so the frame isn't executing template + # code, but it still needs to point at the template. + tb = fake_traceback( + exc_value, None, exc_value.filename or "<unknown>", exc_value.lineno ) - - @property - def is_template_syntax_error(self): - """`True` if this is a template syntax error.""" - return isinstance(self.exc_value, TemplateSyntaxError) - - @property - def exc_info(self): - """Exception info tuple with a proxy around the frame objects.""" - return self.exc_type, self.exc_value, self.frames[0] - - @property - def standard_exc_info(self): - """Standard python exc_info for re-raising""" - tb = self.frames[0] - # the frame will be an actual traceback (or transparent proxy) if - # we are on pypy or a python implementation with support for tproxy - if type(tb) is not TracebackType: - tb = tb.tb - return self.exc_type, self.exc_value, tb - - -def make_traceback(exc_info, source_hint=None): - """Creates a processed traceback object from the exc_info.""" - exc_type, exc_value, tb = exc_info - if isinstance(exc_value, TemplateSyntaxError): - exc_info = translate_syntax_error(exc_value, source_hint) - initial_skip = 0 else: - initial_skip = 1 - return translate_exception(exc_info, initial_skip) - - -def translate_syntax_error(error, source=None): - """Rewrites a syntax error to please traceback systems.""" - error.source = source - error.translated = True - exc_info = (error.__class__, error, None) - filename = error.filename - if filename is None: - filename = '<unknown>' - return fake_exc_info(exc_info, filename, error.lineno) + # Skip the frame for the render function. + tb = tb.tb_next + stack = [] -def translate_exception(exc_info, initial_skip=0): - """If passed an exc_info it will automatically rewrite the exceptions - all the way down to the correct line numbers and frames. - """ - tb = exc_info[2] - frames = [] - - # skip some internal frames if wanted - for x in range(initial_skip): - if tb is not None: - tb = tb.tb_next - initial_tb = tb - + # Build the stack of traceback object, replacing any in template + # code with the source file and line information. while tb is not None: - # skip frames decorated with @internalcode. These are internal - # calls we can't avoid and that are useless in template debugging - # output. + # Skip frames decorated with @internalcode. These are internal + # calls that aren't useful in template debugging output. if tb.tb_frame.f_code in internal_code: tb = tb.tb_next continue - # save a reference to the next frame if we override the current - # one with a faked one. - next = tb.tb_next + template = tb.tb_frame.f_globals.get("__jinja_template__") - # fake template exceptions - template = tb.tb_frame.f_globals.get('__jinja_template__') if template is not None: lineno = template.get_corresponding_lineno(tb.tb_lineno) - tb = fake_exc_info(exc_info[:2] + (tb,), template.filename, - lineno)[2] + fake_tb = fake_traceback(exc_value, tb, template.filename, lineno) + stack.append(fake_tb) + else: + stack.append(tb) - frames.append(make_frame_proxy(tb)) - tb = next + tb = tb.tb_next - # if we don't have any exceptions in the frames left, we have to - # reraise it unchanged. - # XXX: can we backup here? when could this happen? - if not frames: - reraise(exc_info[0], exc_info[1], exc_info[2]) + tb_next = None - return ProcessedTraceback(exc_info[0], exc_info[1], frames) + # Assign tb_next in reverse to avoid circular references. + for tb in reversed(stack): + tb_next = tb_set_next(tb, tb_next) + return exc_type, exc_value, tb_next -def get_jinja_locals(real_locals): - ctx = real_locals.get('context') - if ctx: - locals = ctx.get_all().copy() + +def fake_traceback(exc_value, tb, filename, lineno): + """Produce a new traceback object that looks like it came from the + template source instead of the compiled code. The filename, line + number, and location name will point to the template, and the local + variables will be the current template context. + + :param exc_value: The original exception to be re-raised to create + the new traceback. + :param tb: The original traceback to get the local variables and + code info from. + :param filename: The template filename. + :param lineno: The line number in the template source. + """ + if tb is not None: + # Replace the real locals with the context that would be + # available at that point in the template. + locals = get_template_locals(tb.tb_frame.f_locals) + locals.pop("__jinja_exception__", None) else: locals = {} + globals = { + "__name__": filename, + "__file__": filename, + "__jinja_exception__": exc_value, + } + # Raise an exception at the correct line number. + code = compile("\n" * (lineno - 1) + "raise __jinja_exception__", filename, "exec") + + # Build a new code object that points to the template file and + # replaces the location with a block name. + try: + location = "template" + + if tb is not None: + function = tb.tb_frame.f_code.co_name + + if function == "root": + location = "top-level template code" + elif function.startswith("block_"): + location = 'block "%s"' % function[6:] + + # Collect arguments for the new code object. CodeType only + # accepts positional arguments, and arguments were inserted in + # new Python versions. + code_args = [] + + for attr in ( + "argcount", + "posonlyargcount", # Python 3.8 + "kwonlyargcount", # Python 3 + "nlocals", + "stacksize", + "flags", + "code", # codestring + "consts", # constants + "names", + "varnames", + ("filename", filename), + ("name", location), + "firstlineno", + "lnotab", + "freevars", + "cellvars", + ): + if isinstance(attr, tuple): + # Replace with given value. + code_args.append(attr[1]) + continue + + try: + # Copy original value if it exists. + code_args.append(getattr(code, "co_" + attr)) + except AttributeError: + # Some arguments were added later. + continue + + code = CodeType(*code_args) + except Exception: + # Some environments such as Google App Engine don't support + # modifying code objects. + pass + + # Execute the new code, which is guaranteed to raise, and return + # the new traceback without this frame. + try: + exec(code, globals, locals) + except BaseException: + return sys.exc_info()[2].tb_next + + +def get_template_locals(real_locals): + """Based on the runtime locals, get the context that would be + available at that point in the template. + """ + # Start with the current template context. + ctx = real_locals.get("context") + + if ctx: + data = ctx.get_all().copy() + else: + data = {} + + # Might be in a derived context that only sets local variables + # rather than pushing a context. Local variables follow the scheme + # l_depth_name. Find the highest-depth local that has a value for + # each name. local_overrides = {} - for name, value in iteritems(real_locals): - if not name.startswith('l_') or value is missing: + for name, value in real_locals.items(): + if not name.startswith("l_") or value is missing: + # Not a template variable, or no longer relevant. continue + try: - _, depth, name = name.split('_', 2) + _, depth, name = name.split("_", 2) depth = int(depth) except ValueError: continue + cur_depth = local_overrides.get(name, (-1,))[0] + if cur_depth < depth: local_overrides[name] = (depth, value) - for name, (_, value) in iteritems(local_overrides): + # Modify the context with any derived context. + for name, (_, value) in local_overrides.items(): if value is missing: - locals.pop(name, None) + data.pop(name, None) else: - locals[name] = value + data[name] = value - return locals + return data -def fake_exc_info(exc_info, filename, lineno): - """Helper for `translate_exception`.""" - exc_type, exc_value, tb = exc_info +if sys.version_info >= (3, 7): + # tb_next is directly assignable as of Python 3.7 + def tb_set_next(tb, tb_next): + tb.tb_next = tb_next + return tb - # figure the real context out - if tb is not None: - locals = get_jinja_locals(tb.tb_frame.f_locals) - # if there is a local called __jinja_exception__, we get - # rid of it to not break the debug functionality. - locals.pop('__jinja_exception__', None) - else: - locals = {} - - # assamble fake globals we need - globals = { - '__name__': filename, - '__file__': filename, - '__jinja_exception__': exc_info[:2], - - # we don't want to keep the reference to the template around - # to not cause circular dependencies, but we mark it as Jinja - # frame for the ProcessedTraceback - '__jinja_template__': None - } - - # and fake the exception - code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec') - - # if it's possible, change the name of the code. This won't work - # on some python environments such as google appengine +elif PYPY: + # PyPy might have special support, and won't work with ctypes. try: - if tb is None: - location = 'template' - else: - function = tb.tb_frame.f_code.co_name - if function == 'root': - location = 'top-level template code' - elif function.startswith('block_'): - location = 'block "%s"' % function[6:] - else: - location = 'template' - - if PY2: - code = CodeType(0, code.co_nlocals, code.co_stacksize, - code.co_flags, code.co_code, code.co_consts, - code.co_names, code.co_varnames, filename, - location, code.co_firstlineno, - code.co_lnotab, (), ()) - else: - code = CodeType(0, code.co_kwonlyargcount, - code.co_nlocals, code.co_stacksize, - code.co_flags, code.co_code, code.co_consts, - code.co_names, code.co_varnames, filename, - location, code.co_firstlineno, - code.co_lnotab, (), ()) - except Exception as e: - pass + import tputil + except ImportError: + # Without tproxy support, use the original traceback. + def tb_set_next(tb, tb_next): + return tb - # execute the code and catch the new traceback - try: - exec(code, globals, locals) - except: - exc_info = sys.exc_info() - new_tb = exc_info[2].tb_next + else: + # With tproxy support, create a proxy around the traceback that + # returns the new tb_next. + def tb_set_next(tb, tb_next): + def controller(op): + if op.opname == "__getattribute__" and op.args[0] == "tb_next": + return tb_next - # return without this frame - return exc_info[:2] + (new_tb,) + return op.delegate() + return tputil.make_proxy(controller, obj=tb) -def _init_ugly_crap(): - """This function implements a few ugly things so that we can patch the - traceback objects. The function returned allows resetting `tb_next` on - any python traceback object. Do not attempt to use this on non cpython - interpreters - """ - import ctypes - from types import TracebackType - if PY2: - # figure out size of _Py_ssize_t for Python 2: - if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'): - _Py_ssize_t = ctypes.c_int64 - else: - _Py_ssize_t = ctypes.c_int - else: - # platform ssize_t on Python 3 - _Py_ssize_t = ctypes.c_ssize_t +else: + # Use ctypes to assign tb_next at the C level since it's read-only + # from Python. + import ctypes - # regular python - class _PyObject(ctypes.Structure): - pass - _PyObject._fields_ = [ - ('ob_refcnt', _Py_ssize_t), - ('ob_type', ctypes.POINTER(_PyObject)) - ] - - # python with trace - if hasattr(sys, 'getobjects'): - class _PyObject(ctypes.Structure): - pass - _PyObject._fields_ = [ - ('_ob_next', ctypes.POINTER(_PyObject)), - ('_ob_prev', ctypes.POINTER(_PyObject)), - ('ob_refcnt', _Py_ssize_t), - ('ob_type', ctypes.POINTER(_PyObject)) + class _CTraceback(ctypes.Structure): + _fields_ = [ + # Extra PyObject slots when compiled with Py_TRACE_REFS. + ( + "PyObject_HEAD", + ctypes.c_byte * (32 if hasattr(sys, "getobjects") else 16), + ), + # Only care about tb_next as an object, not a traceback. + ("tb_next", ctypes.py_object), ] - class _Traceback(_PyObject): - pass - _Traceback._fields_ = [ - ('tb_next', ctypes.POINTER(_Traceback)), - ('tb_frame', ctypes.POINTER(_PyObject)), - ('tb_lasti', ctypes.c_int), - ('tb_lineno', ctypes.c_int) - ] - - def tb_set_next(tb, next): - """Set the tb_next attribute of a traceback object.""" - if not (isinstance(tb, TracebackType) and - (next is None or isinstance(next, TracebackType))): - raise TypeError('tb_set_next arguments must be traceback objects') - obj = _Traceback.from_address(id(tb)) - if tb.tb_next is not None: - old = _Traceback.from_address(id(tb.tb_next)) - old.ob_refcnt -= 1 - if next is None: - obj.tb_next = ctypes.POINTER(_Traceback)() - else: - next = _Traceback.from_address(id(next)) - next.ob_refcnt += 1 - obj.tb_next = ctypes.pointer(next) + def tb_set_next(tb, tb_next): + c_tb = _CTraceback.from_address(id(tb)) - return tb_set_next + # Clear out the old tb_next. + if tb.tb_next is not None: + c_tb_next = ctypes.py_object(tb.tb_next) + c_tb.tb_next = ctypes.py_object() + ctypes.pythonapi.Py_DecRef(c_tb_next) + # Assign the new tb_next. + if tb_next is not None: + c_tb_next = ctypes.py_object(tb_next) + ctypes.pythonapi.Py_IncRef(c_tb_next) + c_tb.tb_next = c_tb_next -# try to get a tb_set_next implementation if we don't have transparent -# proxies. -tb_set_next = None -if tproxy is None: - # traceback.tb_next can be modified since CPython 3.7 - if sys.version_info >= (3, 7): - def tb_set_next(tb, next): - tb.tb_next = next - else: - # On Python 3.6 and older, use ctypes - try: - tb_set_next = _init_ugly_crap() - except Exception: - pass -del _init_ugly_crap + return tb diff --git a/pipenv/vendor/jinja2/defaults.py b/pipenv/vendor/jinja2/defaults.py index 7c93dec0ae..8e0e7d7710 100644 --- a/pipenv/vendor/jinja2/defaults.py +++ b/pipenv/vendor/jinja2/defaults.py @@ -1,56 +1,44 @@ # -*- coding: utf-8 -*- -""" - jinja2.defaults - ~~~~~~~~~~~~~~~ - - Jinja default filters and tags. - - :copyright: (c) 2017 by the Jinja Team. - :license: BSD, see LICENSE for more details. -""" -from jinja2._compat import range_type -from jinja2.utils import generate_lorem_ipsum, Cycler, Joiner, Namespace - +from ._compat import range_type +from .filters import FILTERS as DEFAULT_FILTERS # noqa: F401 +from .tests import TESTS as DEFAULT_TESTS # noqa: F401 +from .utils import Cycler +from .utils import generate_lorem_ipsum +from .utils import Joiner +from .utils import Namespace # defaults for the parser / lexer -BLOCK_START_STRING = '{%' -BLOCK_END_STRING = '%}' -VARIABLE_START_STRING = '{{' -VARIABLE_END_STRING = '}}' -COMMENT_START_STRING = '{#' -COMMENT_END_STRING = '#}' +BLOCK_START_STRING = "{%" +BLOCK_END_STRING = "%}" +VARIABLE_START_STRING = "{{" +VARIABLE_END_STRING = "}}" +COMMENT_START_STRING = "{#" +COMMENT_END_STRING = "#}" LINE_STATEMENT_PREFIX = None LINE_COMMENT_PREFIX = None TRIM_BLOCKS = False LSTRIP_BLOCKS = False -NEWLINE_SEQUENCE = '\n' +NEWLINE_SEQUENCE = "\n" KEEP_TRAILING_NEWLINE = False - # default filters, tests and namespace -from jinja2.filters import FILTERS as DEFAULT_FILTERS -from jinja2.tests import TESTS as DEFAULT_TESTS + DEFAULT_NAMESPACE = { - 'range': range_type, - 'dict': dict, - 'lipsum': generate_lorem_ipsum, - 'cycler': Cycler, - 'joiner': Joiner, - 'namespace': Namespace + "range": range_type, + "dict": dict, + "lipsum": generate_lorem_ipsum, + "cycler": Cycler, + "joiner": Joiner, + "namespace": Namespace, } - # default policies DEFAULT_POLICIES = { - 'compiler.ascii_str': True, - 'urlize.rel': 'noopener', - 'urlize.target': None, - 'truncate.leeway': 5, - 'json.dumps_function': None, - 'json.dumps_kwargs': {'sort_keys': True}, - 'ext.i18n.trimmed': False, + "compiler.ascii_str": True, + "urlize.rel": "noopener", + "urlize.target": None, + "truncate.leeway": 5, + "json.dumps_function": None, + "json.dumps_kwargs": {"sort_keys": True}, + "ext.i18n.trimmed": False, } - - -# export all constants -__all__ = tuple(x for x in locals().keys() if x.isupper()) diff --git a/pipenv/vendor/jinja2/environment.py b/pipenv/vendor/jinja2/environment.py index 549d9afab4..bf44b9deb4 100644 --- a/pipenv/vendor/jinja2/environment.py +++ b/pipenv/vendor/jinja2/environment.py @@ -1,60 +1,83 @@ # -*- coding: utf-8 -*- -""" - jinja2.environment - ~~~~~~~~~~~~~~~~~~ - - Provides a class that holds runtime and parsing time options. - - :copyright: (c) 2017 by the Jinja Team. - :license: BSD, see LICENSE for more details. +"""Classes for managing templates and their runtime and compile time +options. """ import os import sys import weakref -from functools import reduce, partial -from jinja2 import nodes -from jinja2.defaults import BLOCK_START_STRING, \ - BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \ - COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \ - LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \ - DEFAULT_FILTERS, DEFAULT_TESTS, DEFAULT_NAMESPACE, \ - DEFAULT_POLICIES, KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS -from jinja2.lexer import get_lexer, TokenStream -from jinja2.parser import Parser -from jinja2.nodes import EvalContext -from jinja2.compiler import generate, CodeGenerator -from jinja2.runtime import Undefined, new_context, Context -from jinja2.exceptions import TemplateSyntaxError, TemplateNotFound, \ - TemplatesNotFound, TemplateRuntimeError -from jinja2.utils import import_string, LRUCache, Markup, missing, \ - concat, consume, internalcode, have_async_gen -from jinja2._compat import imap, ifilter, string_types, iteritems, \ - text_type, reraise, implements_iterator, implements_to_string, \ - encode_filename, PY2, PYPY - +from functools import partial +from functools import reduce + +from markupsafe import Markup + +from . import nodes +from ._compat import encode_filename +from ._compat import implements_iterator +from ._compat import implements_to_string +from ._compat import iteritems +from ._compat import PY2 +from ._compat import PYPY +from ._compat import reraise +from ._compat import string_types +from ._compat import text_type +from .compiler import CodeGenerator +from .compiler import generate +from .defaults import BLOCK_END_STRING +from .defaults import BLOCK_START_STRING +from .defaults import COMMENT_END_STRING +from .defaults import COMMENT_START_STRING +from .defaults import DEFAULT_FILTERS +from .defaults import DEFAULT_NAMESPACE +from .defaults import DEFAULT_POLICIES +from .defaults import DEFAULT_TESTS +from .defaults import KEEP_TRAILING_NEWLINE +from .defaults import LINE_COMMENT_PREFIX +from .defaults import LINE_STATEMENT_PREFIX +from .defaults import LSTRIP_BLOCKS +from .defaults import NEWLINE_SEQUENCE +from .defaults import TRIM_BLOCKS +from .defaults import VARIABLE_END_STRING +from .defaults import VARIABLE_START_STRING +from .exceptions import TemplateNotFound +from .exceptions import TemplateRuntimeError +from .exceptions import TemplatesNotFound +from .exceptions import TemplateSyntaxError +from .exceptions import UndefinedError +from .lexer import get_lexer +from .lexer import TokenStream +from .nodes import EvalContext +from .parser import Parser +from .runtime import Context +from .runtime import new_context +from .runtime import Undefined +from .utils import concat +from .utils import consume +from .utils import have_async_gen +from .utils import import_string +from .utils import internalcode +from .utils import LRUCache +from .utils import missing # for direct template usage we have up to ten living environments _spontaneous_environments = LRUCache(10) -# the function to create jinja traceback objects. This is dynamically -# imported on the first exception in the exception handler. -_make_traceback = None +def get_spontaneous_environment(cls, *args): + """Return a new spontaneous environment. A spontaneous environment + is used for templates created directly rather than through an + existing environment. -def get_spontaneous_environment(*args): - """Return a new spontaneous environment. A spontaneous environment is an - unnamed and unaccessible (in theory) environment that is used for - templates generated from a string and not from the file system. + :param cls: Environment class to create. + :param args: Positional arguments passed to environment. """ + key = (cls, args) + try: - env = _spontaneous_environments.get(args) - except TypeError: - return Environment(*args) - if env is not None: + return _spontaneous_environments[key] + except KeyError: + _spontaneous_environments[key] = env = cls(*args) + env.shared = True return env - _spontaneous_environments[args] = env = Environment(*args) - env.shared = True - return env def create_cache(size): @@ -93,20 +116,25 @@ def fail_for_missing_callable(string, name): try: name._fail_with_undefined_error() except Exception as e: - msg = '%s (%s; did you forget to quote the callable name?)' % (msg, e) + msg = "%s (%s; did you forget to quote the callable name?)" % (msg, e) raise TemplateRuntimeError(msg) def _environment_sanity_check(environment): """Perform a sanity check on the environment.""" - assert issubclass(environment.undefined, Undefined), 'undefined must ' \ - 'be a subclass of undefined because filters depend on it.' - assert environment.block_start_string != \ - environment.variable_start_string != \ - environment.comment_start_string, 'block, variable and comment ' \ - 'start strings must be different' - assert environment.newline_sequence in ('\r', '\r\n', '\n'), \ - 'newline_sequence set to unknown line ending string.' + assert issubclass( + environment.undefined, Undefined + ), "undefined must be a subclass of undefined because filters depend on it." + assert ( + environment.block_start_string + != environment.variable_start_string + != environment.comment_start_string + ), "block, variable and comment start strings must be different" + assert environment.newline_sequence in ( + "\r", + "\r\n", + "\n", + ), "newline_sequence set to unknown line ending string." return environment @@ -191,7 +219,7 @@ class Environment(object): `autoescape` If set to ``True`` the XML/HTML autoescaping feature is enabled by default. For more details about autoescaping see - :class:`~jinja2.utils.Markup`. As of Jinja 2.4 this can also + :class:`~markupsafe.Markup`. As of Jinja 2.4 this can also be a callable that is passed the template name and has to return ``True`` or ``False`` depending on autoescape should be enabled by default. @@ -249,10 +277,6 @@ class Environment(object): #: must not be modified shared = False - #: these are currently EXPERIMENTAL undocumented features. - exception_handler = None - exception_formatter = None - #: the class that is used for code generation. See #: :class:`~jinja2.compiler.CodeGenerator` for more information. code_generator_class = CodeGenerator @@ -261,29 +285,31 @@ class Environment(object): #: :class:`~jinja2.runtime.Context` for more information. context_class = Context - def __init__(self, - block_start_string=BLOCK_START_STRING, - block_end_string=BLOCK_END_STRING, - variable_start_string=VARIABLE_START_STRING, - variable_end_string=VARIABLE_END_STRING, - comment_start_string=COMMENT_START_STRING, - comment_end_string=COMMENT_END_STRING, - line_statement_prefix=LINE_STATEMENT_PREFIX, - line_comment_prefix=LINE_COMMENT_PREFIX, - trim_blocks=TRIM_BLOCKS, - lstrip_blocks=LSTRIP_BLOCKS, - newline_sequence=NEWLINE_SEQUENCE, - keep_trailing_newline=KEEP_TRAILING_NEWLINE, - extensions=(), - optimized=True, - undefined=Undefined, - finalize=None, - autoescape=False, - loader=None, - cache_size=400, - auto_reload=True, - bytecode_cache=None, - enable_async=False): + def __init__( + self, + block_start_string=BLOCK_START_STRING, + block_end_string=BLOCK_END_STRING, + variable_start_string=VARIABLE_START_STRING, + variable_end_string=VARIABLE_END_STRING, + comment_start_string=COMMENT_START_STRING, + comment_end_string=COMMENT_END_STRING, + line_statement_prefix=LINE_STATEMENT_PREFIX, + line_comment_prefix=LINE_COMMENT_PREFIX, + trim_blocks=TRIM_BLOCKS, + lstrip_blocks=LSTRIP_BLOCKS, + newline_sequence=NEWLINE_SEQUENCE, + keep_trailing_newline=KEEP_TRAILING_NEWLINE, + extensions=(), + optimized=True, + undefined=Undefined, + finalize=None, + autoescape=False, + loader=None, + cache_size=400, + auto_reload=True, + bytecode_cache=None, + enable_async=False, + ): # !!Important notice!! # The constructor accepts quite a few arguments that should be # passed by keyword rather than position. However it's important to @@ -334,6 +360,9 @@ def __init__(self, self.enable_async = enable_async self.is_async = self.enable_async and have_async_gen + if self.is_async: + # runs patch_all() to enable async support + from . import asyncsupport # noqa: F401 _environment_sanity_check(self) @@ -353,15 +382,28 @@ def extend(self, **attributes): if not hasattr(self, key): setattr(self, key, value) - def overlay(self, block_start_string=missing, block_end_string=missing, - variable_start_string=missing, variable_end_string=missing, - comment_start_string=missing, comment_end_string=missing, - line_statement_prefix=missing, line_comment_prefix=missing, - trim_blocks=missing, lstrip_blocks=missing, - extensions=missing, optimized=missing, - undefined=missing, finalize=missing, autoescape=missing, - loader=missing, cache_size=missing, auto_reload=missing, - bytecode_cache=missing): + def overlay( + self, + block_start_string=missing, + block_end_string=missing, + variable_start_string=missing, + variable_end_string=missing, + comment_start_string=missing, + comment_end_string=missing, + line_statement_prefix=missing, + line_comment_prefix=missing, + trim_blocks=missing, + lstrip_blocks=missing, + extensions=missing, + optimized=missing, + undefined=missing, + finalize=missing, + autoescape=missing, + loader=missing, + cache_size=missing, + auto_reload=missing, + bytecode_cache=missing, + ): """Create a new overlay environment that shares all the data with the current environment except for cache and the overridden attributes. Extensions cannot be removed for an overlayed environment. An overlayed @@ -374,7 +416,7 @@ def overlay(self, block_start_string=missing, block_end_string=missing, through. """ args = dict(locals()) - del args['self'], args['cache_size'], args['extensions'] + del args["self"], args["cache_size"], args["extensions"] rv = object.__new__(self.__class__) rv.__dict__.update(self.__dict__) @@ -402,8 +444,7 @@ def overlay(self, block_start_string=missing, block_end_string=missing, def iter_extensions(self): """Iterates over the extensions by priority.""" - return iter(sorted(self.extensions.values(), - key=lambda x: x.priority)) + return iter(sorted(self.extensions.values(), key=lambda x: x.priority)) def getitem(self, obj, argument): """Get an item or attribute of an object but prefer the item.""" @@ -435,8 +476,9 @@ def getattr(self, obj, attribute): except (TypeError, LookupError, AttributeError): return self.undefined(obj=obj, name=attribute) - def call_filter(self, name, value, args=None, kwargs=None, - context=None, eval_ctx=None): + def call_filter( + self, name, value, args=None, kwargs=None, context=None, eval_ctx=None + ): """Invokes a filter on a value the same way the compiler does it. Note that on Python 3 this might return a coroutine in case the @@ -448,21 +490,22 @@ def call_filter(self, name, value, args=None, kwargs=None, """ func = self.filters.get(name) if func is None: - fail_for_missing_callable('no filter named %r', name) + fail_for_missing_callable("no filter named %r", name) args = [value] + list(args or ()) - if getattr(func, 'contextfilter', False): + if getattr(func, "contextfilter", False): if context is None: - raise TemplateRuntimeError('Attempted to invoke context ' - 'filter without context') + raise TemplateRuntimeError( + "Attempted to invoke context filter without context" + ) args.insert(0, context) - elif getattr(func, 'evalcontextfilter', False): + elif getattr(func, "evalcontextfilter", False): if eval_ctx is None: if context is not None: eval_ctx = context.eval_ctx else: eval_ctx = EvalContext(self) args.insert(0, eval_ctx) - elif getattr(func, 'environmentfilter', False): + elif getattr(func, "environmentfilter", False): args.insert(0, self) return func(*args, **(kwargs or {})) @@ -473,7 +516,7 @@ def call_test(self, name, value, args=None, kwargs=None): """ func = self.tests.get(name) if func is None: - fail_for_missing_callable('no test named %r', name) + fail_for_missing_callable("no test named %r", name) return func(value, *(args or ()), **(kwargs or {})) @internalcode @@ -483,14 +526,13 @@ def parse(self, source, name=None, filename=None): executable source- or bytecode. This is useful for debugging or to extract information from templates. - If you are :ref:`developing Jinja2 extensions <writing-extensions>` + If you are :ref:`developing Jinja extensions <writing-extensions>` this gives you a good overview of the node tree generated. """ try: return self._parse(source, name, filename) except TemplateSyntaxError: - exc_info = sys.exc_info() - self.handle_exception(exc_info, source_hint=source) + self.handle_exception(source=source) def _parse(self, source, name, filename): """Internal parsing function used by `parse` and `compile`.""" @@ -510,16 +552,18 @@ def lex(self, source, name=None, filename=None): try: return self.lexer.tokeniter(source, name, filename) except TemplateSyntaxError: - exc_info = sys.exc_info() - self.handle_exception(exc_info, source_hint=source) + self.handle_exception(source=source) def preprocess(self, source, name=None, filename=None): """Preprocesses the source with all extensions. This is automatically called for all parsing and compiling methods but *not* for :meth:`lex` because there you usually only want the actual source tokenized. """ - return reduce(lambda s, e: e.preprocess(s, name, filename), - self.iter_extensions(), text_type(source)) + return reduce( + lambda s, e: e.preprocess(s, name, filename), + self.iter_extensions(), + text_type(source), + ) def _tokenize(self, source, name, filename=None, state=None): """Called by the parser to do the preprocessing and filtering @@ -539,8 +583,14 @@ def _generate(self, source, name, filename, defer_init=False): .. versionadded:: 2.5 """ - return generate(source, self, name, filename, defer_init=defer_init, - optimized=self.optimized) + return generate( + source, + self, + name, + filename, + defer_init=defer_init, + optimized=self.optimized, + ) def _compile(self, source, filename): """Internal hook that can be overridden to hook a different compile @@ -548,11 +598,10 @@ def _compile(self, source, filename): .. versionadded:: 2.5 """ - return compile(source, filename, 'exec') + return compile(source, filename, "exec") @internalcode - def compile(self, source, name=None, filename=None, raw=False, - defer_init=False): + def compile(self, source, name=None, filename=None, raw=False, defer_init=False): """Compile a node or template source code. The `name` parameter is the load name of the template after it was joined using :meth:`join_path` if necessary, not the filename on the file system. @@ -577,18 +626,16 @@ def compile(self, source, name=None, filename=None, raw=False, if isinstance(source, string_types): source_hint = source source = self._parse(source, name, filename) - source = self._generate(source, name, filename, - defer_init=defer_init) + source = self._generate(source, name, filename, defer_init=defer_init) if raw: return source if filename is None: - filename = '<template>' + filename = "<template>" else: filename = encode_filename(filename) return self._compile(source, filename) except TemplateSyntaxError: - exc_info = sys.exc_info() - self.handle_exception(exc_info, source_hint=source_hint) + self.handle_exception(source=source_hint) def compile_expression(self, source, undefined_to_none=True): """A handy helper method that returns a callable that accepts keyword @@ -618,26 +665,32 @@ def compile_expression(self, source, undefined_to_none=True): .. versionadded:: 2.1 """ - parser = Parser(self, source, state='variable') - exc_info = None + parser = Parser(self, source, state="variable") try: expr = parser.parse_expression() if not parser.stream.eos: - raise TemplateSyntaxError('chunk after expression', - parser.stream.current.lineno, - None, None) + raise TemplateSyntaxError( + "chunk after expression", parser.stream.current.lineno, None, None + ) expr.set_environment(self) except TemplateSyntaxError: - exc_info = sys.exc_info() - if exc_info is not None: - self.handle_exception(exc_info, source_hint=source) - body = [nodes.Assign(nodes.Name('result', 'store'), expr, lineno=1)] + if sys.exc_info() is not None: + self.handle_exception(source=source) + + body = [nodes.Assign(nodes.Name("result", "store"), expr, lineno=1)] template = self.from_string(nodes.Template(body, lineno=1)) return TemplateExpression(template, undefined_to_none) - def compile_templates(self, target, extensions=None, filter_func=None, - zip='deflated', log_function=None, - ignore_errors=True, py_compile=False): + def compile_templates( + self, + target, + extensions=None, + filter_func=None, + zip="deflated", + log_function=None, + ignore_errors=True, + py_compile=False, + ): """Finds all the templates the loader can find, compiles them and stores them in `target`. If `zip` is `None`, instead of in a zipfile, the templates will be stored in a directory. @@ -660,42 +713,52 @@ def compile_templates(self, target, extensions=None, filter_func=None, .. versionadded:: 2.4 """ - from jinja2.loaders import ModuleLoader + from .loaders import ModuleLoader if log_function is None: - log_function = lambda x: None + + def log_function(x): + pass if py_compile: if not PY2 or PYPY: - from warnings import warn - warn(Warning('py_compile has no effect on pypy or Python 3')) + import warnings + + warnings.warn( + "'py_compile=True' has no effect on PyPy or Python" + " 3 and will be removed in version 3.0", + DeprecationWarning, + stacklevel=2, + ) py_compile = False else: import imp import marshal - py_header = imp.get_magic() + \ - u'\xff\xff\xff\xff'.encode('iso-8859-15') + + py_header = imp.get_magic() + u"\xff\xff\xff\xff".encode("iso-8859-15") # Python 3.3 added a source filesize to the header if sys.version_info >= (3, 3): - py_header += u'\x00\x00\x00\x00'.encode('iso-8859-15') + py_header += u"\x00\x00\x00\x00".encode("iso-8859-15") - def write_file(filename, data, mode): + def write_file(filename, data): if zip: info = ZipInfo(filename) info.external_attr = 0o755 << 16 zip_file.writestr(info, data) else: - f = open(os.path.join(target, filename), mode) - try: + if isinstance(data, text_type): + data = data.encode("utf8") + + with open(os.path.join(target, filename), "wb") as f: f.write(data) - finally: - f.close() if zip is not None: from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED - zip_file = ZipFile(target, 'w', dict(deflated=ZIP_DEFLATED, - stored=ZIP_STORED)[zip]) + + zip_file = ZipFile( + target, "w", dict(deflated=ZIP_DEFLATED, stored=ZIP_STORED)[zip] + ) log_function('Compiling into Zip archive "%s"' % target) else: if not os.path.isdir(target): @@ -717,18 +780,16 @@ def write_file(filename, data, mode): if py_compile: c = self._compile(code, encode_filename(filename)) - write_file(filename + 'c', py_header + - marshal.dumps(c), 'wb') - log_function('Byte-compiled "%s" as %s' % - (name, filename + 'c')) + write_file(filename + "c", py_header + marshal.dumps(c)) + log_function('Byte-compiled "%s" as %s' % (name, filename + "c")) else: - write_file(filename, code, 'w') + write_file(filename, code) log_function('Compiled "%s" as %s' % (name, filename)) finally: if zip: zip_file.close() - log_function('Finished compiling templates') + log_function("Finished compiling templates") def list_templates(self, extensions=None, filter_func=None): """Returns a list of templates for this environment. This requires @@ -746,38 +807,29 @@ def list_templates(self, extensions=None, filter_func=None): .. versionadded:: 2.4 """ - x = self.loader.list_templates() + names = self.loader.list_templates() + if extensions is not None: if filter_func is not None: - raise TypeError('either extensions or filter_func ' - 'can be passed, but not both') - filter_func = lambda x: '.' in x and \ - x.rsplit('.', 1)[1] in extensions + raise TypeError( + "either extensions or filter_func can be passed, but not both" + ) + + def filter_func(x): + return "." in x and x.rsplit(".", 1)[1] in extensions + if filter_func is not None: - x = list(ifilter(filter_func, x)) - return x + names = [name for name in names if filter_func(name)] + + return names - def handle_exception(self, exc_info=None, rendered=False, source_hint=None): + def handle_exception(self, source=None): """Exception handling helper. This is used internally to either raise rewritten exceptions or return a rendered traceback for the template. """ - global _make_traceback - if exc_info is None: - exc_info = sys.exc_info() - - # the debugging module is imported when it's used for the first time. - # we're doing a lot of stuff there and for applications that do not - # get any exceptions in template rendering there is no need to load - # all of that. - if _make_traceback is None: - from jinja2.debug import make_traceback as _make_traceback - traceback = _make_traceback(exc_info, source_hint) - if rendered and self.exception_formatter is not None: - return self.exception_formatter(traceback) - if self.exception_handler is not None: - self.exception_handler(traceback) - exc_type, exc_value, tb = traceback.standard_exc_info - reraise(exc_type, exc_value, tb) + from .debug import rewrite_traceback_stack + + reraise(*rewrite_traceback_stack(source=source)) def join_path(self, template, parent): """Join a template with the parent. By default all the lookups are @@ -794,12 +846,13 @@ def join_path(self, template, parent): @internalcode def _load_template(self, name, globals): if self.loader is None: - raise TypeError('no loader for this environment specified') + raise TypeError("no loader for this environment specified") cache_key = (weakref.ref(self.loader), name) if self.cache is not None: template = self.cache.get(cache_key) - if template is not None and (not self.auto_reload or - template.is_up_to_date): + if template is not None and ( + not self.auto_reload or template.is_up_to_date + ): return template template = self.loader.load(self, name, globals) if self.cache is not None: @@ -835,15 +888,24 @@ def select_template(self, names, parent=None, globals=None): before it fails. If it cannot find any of the templates, it will raise a :exc:`TemplatesNotFound` exception. - .. versionadded:: 2.3 + .. versionchanged:: 2.11 + If names is :class:`Undefined`, an :exc:`UndefinedError` is + raised instead. If no templates were found and names + contains :class:`Undefined`, the message is more helpful. .. versionchanged:: 2.4 If `names` contains a :class:`Template` object it is returned from the function unchanged. + + .. versionadded:: 2.3 """ + if isinstance(names, Undefined): + names._fail_with_undefined_error() + if not names: - raise TemplatesNotFound(message=u'Tried to select from an empty list ' - u'of templates.') + raise TemplatesNotFound( + message=u"Tried to select from an empty list " u"of templates." + ) globals = self.make_globals(globals) for name in names: if isinstance(name, Template): @@ -852,20 +914,19 @@ def select_template(self, names, parent=None, globals=None): name = self.join_path(name, parent) try: return self._load_template(name, globals) - except TemplateNotFound: + except (TemplateNotFound, UndefinedError): pass raise TemplatesNotFound(names) @internalcode - def get_or_select_template(self, template_name_or_list, - parent=None, globals=None): + def get_or_select_template(self, template_name_or_list, parent=None, globals=None): """Does a typecheck and dispatches to :meth:`select_template` if an iterable of template names is given, otherwise to :meth:`get_template`. .. versionadded:: 2.3 """ - if isinstance(template_name_or_list, string_types): + if isinstance(template_name_or_list, (string_types, Undefined)): return self.get_template(template_name_or_list, parent, globals) elif isinstance(template_name_or_list, Template): return template_name_or_list @@ -916,32 +977,57 @@ class Template(object): StopIteration """ - def __new__(cls, source, - block_start_string=BLOCK_START_STRING, - block_end_string=BLOCK_END_STRING, - variable_start_string=VARIABLE_START_STRING, - variable_end_string=VARIABLE_END_STRING, - comment_start_string=COMMENT_START_STRING, - comment_end_string=COMMENT_END_STRING, - line_statement_prefix=LINE_STATEMENT_PREFIX, - line_comment_prefix=LINE_COMMENT_PREFIX, - trim_blocks=TRIM_BLOCKS, - lstrip_blocks=LSTRIP_BLOCKS, - newline_sequence=NEWLINE_SEQUENCE, - keep_trailing_newline=KEEP_TRAILING_NEWLINE, - extensions=(), - optimized=True, - undefined=Undefined, - finalize=None, - autoescape=False, - enable_async=False): + #: Type of environment to create when creating a template directly + #: rather than through an existing environment. + environment_class = Environment + + def __new__( + cls, + source, + block_start_string=BLOCK_START_STRING, + block_end_string=BLOCK_END_STRING, + variable_start_string=VARIABLE_START_STRING, + variable_end_string=VARIABLE_END_STRING, + comment_start_string=COMMENT_START_STRING, + comment_end_string=COMMENT_END_STRING, + line_statement_prefix=LINE_STATEMENT_PREFIX, + line_comment_prefix=LINE_COMMENT_PREFIX, + trim_blocks=TRIM_BLOCKS, + lstrip_blocks=LSTRIP_BLOCKS, + newline_sequence=NEWLINE_SEQUENCE, + keep_trailing_newline=KEEP_TRAILING_NEWLINE, + extensions=(), + optimized=True, + undefined=Undefined, + finalize=None, + autoescape=False, + enable_async=False, + ): env = get_spontaneous_environment( - block_start_string, block_end_string, variable_start_string, - variable_end_string, comment_start_string, comment_end_string, - line_statement_prefix, line_comment_prefix, trim_blocks, - lstrip_blocks, newline_sequence, keep_trailing_newline, - frozenset(extensions), optimized, undefined, finalize, autoescape, - None, 0, False, None, enable_async) + cls.environment_class, + block_start_string, + block_end_string, + variable_start_string, + variable_end_string, + comment_start_string, + comment_end_string, + line_statement_prefix, + line_comment_prefix, + trim_blocks, + lstrip_blocks, + newline_sequence, + keep_trailing_newline, + frozenset(extensions), + optimized, + undefined, + finalize, + autoescape, + None, + 0, + False, + None, + enable_async, + ) return env.from_string(source, template_class=cls) @classmethod @@ -949,10 +1035,7 @@ def from_code(cls, environment, code, globals, uptodate=None): """Creates a template object from compiled code and the globals. This is used by the loaders and environment to create a template object. """ - namespace = { - 'environment': environment, - '__file__': code.co_filename - } + namespace = {"environment": environment, "__file__": code.co_filename} exec(code, namespace) rv = cls._from_namespace(environment, namespace, globals) rv._uptodate = uptodate @@ -972,21 +1055,21 @@ def _from_namespace(cls, environment, namespace, globals): t = object.__new__(cls) t.environment = environment t.globals = globals - t.name = namespace['name'] - t.filename = namespace['__file__'] - t.blocks = namespace['blocks'] + t.name = namespace["name"] + t.filename = namespace["__file__"] + t.blocks = namespace["blocks"] # render function and module - t.root_render_func = namespace['root'] + t.root_render_func = namespace["root"] t._module = None # debug and loader helpers - t._debug_info = namespace['debug_info'] + t._debug_info = namespace["debug_info"] t._uptodate = None # store the reference - namespace['environment'] = environment - namespace['__jinja_template__'] = t + namespace["environment"] = environment + namespace["__jinja_template__"] = t return t @@ -1004,8 +1087,7 @@ def render(self, *args, **kwargs): try: return concat(self.root_render_func(self.new_context(vars))) except Exception: - exc_info = sys.exc_info() - return self.environment.handle_exception(exc_info, True) + self.environment.handle_exception() def render_async(self, *args, **kwargs): """This works similar to :meth:`render` but returns a coroutine @@ -1017,8 +1099,9 @@ def render_async(self, *args, **kwargs): await template.render_async(knights='that say nih; asynchronously') """ # see asyncsupport for the actual implementation - raise NotImplementedError('This feature is not available for this ' - 'version of Python') + raise NotImplementedError( + "This feature is not available for this version of Python" + ) def stream(self, *args, **kwargs): """Works exactly like :meth:`generate` but returns a @@ -1039,29 +1122,28 @@ def generate(self, *args, **kwargs): for event in self.root_render_func(self.new_context(vars)): yield event except Exception: - exc_info = sys.exc_info() - else: - return - yield self.environment.handle_exception(exc_info, True) + yield self.environment.handle_exception() def generate_async(self, *args, **kwargs): """An async version of :meth:`generate`. Works very similarly but returns an async iterator instead. """ # see asyncsupport for the actual implementation - raise NotImplementedError('This feature is not available for this ' - 'version of Python') + raise NotImplementedError( + "This feature is not available for this version of Python" + ) def new_context(self, vars=None, shared=False, locals=None): """Create a new :class:`Context` for this template. The vars provided will be passed to the template. Per default the globals are added to the context. If shared is set to `True` the data - is passed as it to the context without adding the globals. + is passed as is to the context without adding the globals. `locals` can be a dict of local variables for internal usage. """ - return new_context(self.environment, self.name, self.blocks, - vars, shared, self.globals, locals) + return new_context( + self.environment, self.name, self.blocks, vars, shared, self.globals, locals + ) def make_module(self, vars=None, shared=False, locals=None): """This method works like the :attr:`module` attribute when called @@ -1074,13 +1156,14 @@ def make_module(self, vars=None, shared=False, locals=None): def make_module_async(self, vars=None, shared=False, locals=None): """As template module creation can invoke template code for - asynchronous exections this method must be used instead of the + asynchronous executions this method must be used instead of the normal :meth:`make_module` one. Likewise the module attribute becomes unavailable in async mode. """ # see asyncsupport for the actual implementation - raise NotImplementedError('This feature is not available for this ' - 'version of Python') + raise NotImplementedError( + "This feature is not available for this version of Python" + ) @internalcode def _get_default_module(self): @@ -1124,15 +1207,16 @@ def is_up_to_date(self): @property def debug_info(self): """The debug info mapping.""" - return [tuple(imap(int, x.split('='))) for x in - self._debug_info.split('&')] + if self._debug_info: + return [tuple(map(int, x.split("="))) for x in self._debug_info.split("&")] + return [] def __repr__(self): if self.name is None: - name = 'memory:%x' % id(self) + name = "memory:%x" % id(self) else: name = repr(self.name) - return '<%s %s>' % (self.__class__.__name__, name) + return "<%s %s>" % (self.__class__.__name__, name) @implements_to_string @@ -1145,10 +1229,12 @@ class TemplateModule(object): def __init__(self, template, context, body_stream=None): if body_stream is None: if context.environment.is_async: - raise RuntimeError('Async mode requires a body stream ' - 'to be passed to a template module. Use ' - 'the async methods of the API you are ' - 'using.') + raise RuntimeError( + "Async mode requires a body stream " + "to be passed to a template module. Use " + "the async methods of the API you are " + "using." + ) body_stream = list(template.root_render_func(context)) self._body_stream = body_stream self.__dict__.update(context.get_exported()) @@ -1162,10 +1248,10 @@ def __str__(self): def __repr__(self): if self.__name__ is None: - name = 'memory:%x' % id(self) + name = "memory:%x" % id(self) else: name = repr(self.__name__) - return '<%s %s>' % (self.__class__.__name__, name) + return "<%s %s>" % (self.__class__.__name__, name) class TemplateExpression(object): @@ -1181,7 +1267,7 @@ def __init__(self, template, undefined_to_none): def __call__(self, *args, **kwargs): context = self._template.new_context(dict(*args, **kwargs)) consume(self._template.root_render_func(context)) - rv = context.vars['result'] + rv = context.vars["result"] if self._undefined_to_none and isinstance(rv, Undefined): rv = None return rv @@ -1203,7 +1289,7 @@ def __init__(self, gen): self._gen = gen self.disable_buffering() - def dump(self, fp, encoding=None, errors='strict'): + def dump(self, fp, encoding=None, errors="strict"): """Dump the complete stream into a file or file-like object. Per default unicode strings are written, if you want to encode before writing specify an `encoding`. @@ -1215,15 +1301,15 @@ def dump(self, fp, encoding=None, errors='strict'): close = False if isinstance(fp, string_types): if encoding is None: - encoding = 'utf-8' - fp = open(fp, 'wb') + encoding = "utf-8" + fp = open(fp, "wb") close = True try: if encoding is not None: iterable = (x.encode(encoding, errors) for x in self) else: iterable = self - if hasattr(fp, 'writelines'): + if hasattr(fp, "writelines"): fp.writelines(iterable) else: for item in iterable: @@ -1259,7 +1345,7 @@ def _buffered_generator(self, size): def enable_buffering(self, size=5): """Enable buffering. Buffer `size` items before yielding them.""" if size <= 1: - raise ValueError('buffer size too small') + raise ValueError("buffer size too small") self.buffered = True self._next = partial(next, self._buffered_generator(size)) diff --git a/pipenv/vendor/jinja2/exceptions.py b/pipenv/vendor/jinja2/exceptions.py index c018a33e32..0bf2003e30 100644 --- a/pipenv/vendor/jinja2/exceptions.py +++ b/pipenv/vendor/jinja2/exceptions.py @@ -1,23 +1,18 @@ # -*- coding: utf-8 -*- -""" - jinja2.exceptions - ~~~~~~~~~~~~~~~~~ - - Jinja exceptions. - - :copyright: (c) 2017 by the Jinja Team. - :license: BSD, see LICENSE for more details. -""" -from jinja2._compat import imap, text_type, PY2, implements_to_string +from ._compat import imap +from ._compat import implements_to_string +from ._compat import PY2 +from ._compat import text_type class TemplateError(Exception): """Baseclass for all template errors.""" if PY2: + def __init__(self, message=None): if message is not None: - message = text_type(message).encode('utf-8') + message = text_type(message).encode("utf-8") Exception.__init__(self, message) @property @@ -25,11 +20,13 @@ def message(self): if self.args: message = self.args[0] if message is not None: - return message.decode('utf-8', 'replace') + return message.decode("utf-8", "replace") def __unicode__(self): - return self.message or u'' + return self.message or u"" + else: + def __init__(self, message=None): Exception.__init__(self, message) @@ -43,16 +40,28 @@ def message(self): @implements_to_string class TemplateNotFound(IOError, LookupError, TemplateError): - """Raised if a template does not exist.""" + """Raised if a template does not exist. + + .. versionchanged:: 2.11 + If the given name is :class:`Undefined` and no message was + provided, an :exc:`UndefinedError` is raised. + """ # looks weird, but removes the warning descriptor that just # bogusly warns us about message being deprecated message = None def __init__(self, name, message=None): - IOError.__init__(self) + IOError.__init__(self, name) + if message is None: + from .runtime import Undefined + + if isinstance(name, Undefined): + name._fail_with_undefined_error() + message = name + self.message = message self.name = name self.templates = [name] @@ -66,13 +75,28 @@ class TemplatesNotFound(TemplateNotFound): are selected. This is a subclass of :class:`TemplateNotFound` exception, so just catching the base exception will catch both. + .. versionchanged:: 2.11 + If a name in the list of names is :class:`Undefined`, a message + about it being undefined is shown rather than the empty string. + .. versionadded:: 2.2 """ def __init__(self, names=(), message=None): if message is None: - message = u'none of the templates given were found: ' + \ - u', '.join(imap(text_type, names)) + from .runtime import Undefined + + parts = [] + + for name in names: + if isinstance(name, Undefined): + parts.append(name._undefined_message) + else: + parts.append(name) + + message = u"none of the templates given were found: " + u", ".join( + imap(text_type, parts) + ) TemplateNotFound.__init__(self, names and names[-1] or None, message) self.templates = list(names) @@ -98,11 +122,11 @@ def __str__(self): return self.message # otherwise attach some stuff - location = 'line %d' % self.lineno + location = "line %d" % self.lineno name = self.filename or self.name if name: location = 'File "%s", %s' % (name, location) - lines = [self.message, ' ' + location] + lines = [self.message, " " + location] # if the source is set, add the line to the output if self.source is not None: @@ -111,9 +135,16 @@ def __str__(self): except IndexError: line = None if line: - lines.append(' ' + line.strip()) + lines.append(" " + line.strip()) + + return u"\n".join(lines) - return u'\n'.join(lines) + def __reduce__(self): + # https://bugs.python.org/issue1692335 Exceptions that take + # multiple required arguments have problems with pickling. + # Without this, raises TypeError: __init__() missing 1 required + # positional argument: 'lineno' + return self.__class__, (self.message, self.lineno, self.name, self.filename) class TemplateAssertionError(TemplateSyntaxError): diff --git a/pipenv/vendor/jinja2/ext.py b/pipenv/vendor/jinja2/ext.py index 0734a84f73..9141be4dac 100644 --- a/pipenv/vendor/jinja2/ext.py +++ b/pipenv/vendor/jinja2/ext.py @@ -1,42 +1,49 @@ # -*- coding: utf-8 -*- -""" - jinja2.ext - ~~~~~~~~~~ - - Jinja extensions allow to add custom tags similar to the way django custom - tags work. By default two example extensions exist: an i18n and a cache - extension. - - :copyright: (c) 2017 by the Jinja Team. - :license: BSD. -""" +"""Extension API for adding custom tags and behavior.""" +import pprint import re - -from jinja2 import nodes -from jinja2.defaults import BLOCK_START_STRING, \ - BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \ - COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \ - LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \ - KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS -from jinja2.environment import Environment -from jinja2.runtime import concat -from jinja2.exceptions import TemplateAssertionError, TemplateSyntaxError -from jinja2.utils import contextfunction, import_string, Markup -from jinja2._compat import with_metaclass, string_types, iteritems - +from sys import version_info + +from markupsafe import Markup + +from . import nodes +from ._compat import iteritems +from ._compat import string_types +from ._compat import with_metaclass +from .defaults import BLOCK_END_STRING +from .defaults import BLOCK_START_STRING +from .defaults import COMMENT_END_STRING +from .defaults import COMMENT_START_STRING +from .defaults import KEEP_TRAILING_NEWLINE +from .defaults import LINE_COMMENT_PREFIX +from .defaults import LINE_STATEMENT_PREFIX +from .defaults import LSTRIP_BLOCKS +from .defaults import NEWLINE_SEQUENCE +from .defaults import TRIM_BLOCKS +from .defaults import VARIABLE_END_STRING +from .defaults import VARIABLE_START_STRING +from .environment import Environment +from .exceptions import TemplateAssertionError +from .exceptions import TemplateSyntaxError +from .nodes import ContextReference +from .runtime import concat +from .utils import contextfunction +from .utils import import_string # the only real useful gettext functions for a Jinja template. Note # that ugettext must be assigned to gettext as Jinja doesn't support # non unicode strings. -GETTEXT_FUNCTIONS = ('_', 'gettext', 'ngettext') +GETTEXT_FUNCTIONS = ("_", "gettext", "ngettext") + +_ws_re = re.compile(r"\s*\n\s*") class ExtensionRegistry(type): """Gives the extension an unique identifier.""" - def __new__(cls, name, bases, d): - rv = type.__new__(cls, name, bases, d) - rv.identifier = rv.__module__ + '.' + rv.__name__ + def __new__(mcs, name, bases, d): + rv = type.__new__(mcs, name, bases, d) + rv.identifier = rv.__module__ + "." + rv.__name__ return rv @@ -91,10 +98,6 @@ def filter_stream(self, stream): to filter tokens returned. This method has to return an iterable of :class:`~jinja2.lexer.Token`\\s, but it doesn't have to return a :class:`~jinja2.lexer.TokenStream`. - - In the `ext` folder of the Jinja2 source distribution there is a file - called `inlinegettext.py` which implements a filter that utilizes this - method. """ return stream @@ -116,8 +119,9 @@ def attr(self, name, lineno=None): """ return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno) - def call_method(self, name, args=None, kwargs=None, dyn_args=None, - dyn_kwargs=None, lineno=None): + def call_method( + self, name, args=None, kwargs=None, dyn_args=None, dyn_kwargs=None, lineno=None + ): """Call a method of the extension. This is a shortcut for :meth:`attr` + :class:`jinja2.nodes.Call`. """ @@ -125,13 +129,19 @@ def call_method(self, name, args=None, kwargs=None, dyn_args=None, args = [] if kwargs is None: kwargs = [] - return nodes.Call(self.attr(name, lineno=lineno), args, kwargs, - dyn_args, dyn_kwargs, lineno=lineno) + return nodes.Call( + self.attr(name, lineno=lineno), + args, + kwargs, + dyn_args, + dyn_kwargs, + lineno=lineno, + ) @contextfunction def _gettext_alias(__context, *args, **kwargs): - return __context.call(__context.resolve('gettext'), *args, **kwargs) + return __context.call(__context.resolve("gettext"), *args, **kwargs) def _make_new_gettext(func): @@ -140,24 +150,31 @@ def gettext(__context, __string, **variables): rv = __context.call(func, __string) if __context.eval_ctx.autoescape: rv = Markup(rv) + # Always treat as a format string, even if there are no + # variables. This makes translation strings more consistent + # and predictable. This requires escaping return rv % variables + return gettext def _make_new_ngettext(func): @contextfunction def ngettext(__context, __singular, __plural, __num, **variables): - variables.setdefault('num', __num) + variables.setdefault("num", __num) rv = __context.call(func, __singular, __plural, __num) if __context.eval_ctx.autoescape: rv = Markup(rv) + # Always treat as a format string, see gettext comment above. return rv % variables + return ngettext class InternationalizationExtension(Extension): - """This extension adds gettext support to Jinja2.""" - tags = set(['trans']) + """This extension adds gettext support to Jinja.""" + + tags = {"trans"} # TODO: the i18n extension is currently reevaluating values in a few # situations. Take this example: @@ -168,30 +185,28 @@ class InternationalizationExtension(Extension): def __init__(self, environment): Extension.__init__(self, environment) - environment.globals['_'] = _gettext_alias + environment.globals["_"] = _gettext_alias environment.extend( install_gettext_translations=self._install, install_null_translations=self._install_null, install_gettext_callables=self._install_callables, uninstall_gettext_translations=self._uninstall, extract_translations=self._extract, - newstyle_gettext=False + newstyle_gettext=False, ) def _install(self, translations, newstyle=None): - gettext = getattr(translations, 'ugettext', None) + gettext = getattr(translations, "ugettext", None) if gettext is None: gettext = translations.gettext - ngettext = getattr(translations, 'ungettext', None) + ngettext = getattr(translations, "ungettext", None) if ngettext is None: ngettext = translations.ngettext self._install_callables(gettext, ngettext, newstyle) def _install_null(self, newstyle=None): self._install_callables( - lambda x: x, - lambda s, p, n: (n != 1 and (p,) or (s,))[0], - newstyle + lambda x: x, lambda s, p, n: (n != 1 and (p,) or (s,))[0], newstyle ) def _install_callables(self, gettext, ngettext, newstyle=None): @@ -200,13 +215,10 @@ def _install_callables(self, gettext, ngettext, newstyle=None): if self.environment.newstyle_gettext: gettext = _make_new_gettext(gettext) ngettext = _make_new_ngettext(ngettext) - self.environment.globals.update( - gettext=gettext, - ngettext=ngettext - ) + self.environment.globals.update(gettext=gettext, ngettext=ngettext) def _uninstall(self, translations): - for key in 'gettext', 'ngettext': + for key in "gettext", "ngettext": self.environment.globals.pop(key, None) def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS): @@ -226,41 +238,44 @@ def parse(self, parser): plural_expr_assignment = None variables = {} trimmed = None - while parser.stream.current.type != 'block_end': + while parser.stream.current.type != "block_end": if variables: - parser.stream.expect('comma') + parser.stream.expect("comma") # skip colon for python compatibility - if parser.stream.skip_if('colon'): + if parser.stream.skip_if("colon"): break - name = parser.stream.expect('name') + name = parser.stream.expect("name") if name.value in variables: - parser.fail('translatable variable %r defined twice.' % - name.value, name.lineno, - exc=TemplateAssertionError) + parser.fail( + "translatable variable %r defined twice." % name.value, + name.lineno, + exc=TemplateAssertionError, + ) # expressions - if parser.stream.current.type == 'assign': + if parser.stream.current.type == "assign": next(parser.stream) variables[name.value] = var = parser.parse_expression() - elif trimmed is None and name.value in ('trimmed', 'notrimmed'): - trimmed = name.value == 'trimmed' + elif trimmed is None and name.value in ("trimmed", "notrimmed"): + trimmed = name.value == "trimmed" continue else: - variables[name.value] = var = nodes.Name(name.value, 'load') + variables[name.value] = var = nodes.Name(name.value, "load") if plural_expr is None: if isinstance(var, nodes.Call): - plural_expr = nodes.Name('_trans', 'load') + plural_expr = nodes.Name("_trans", "load") variables[name.value] = plural_expr plural_expr_assignment = nodes.Assign( - nodes.Name('_trans', 'store'), var) + nodes.Name("_trans", "store"), var + ) else: plural_expr = var - num_called_num = name.value == 'num' + num_called_num = name.value == "num" - parser.stream.expect('block_end') + parser.stream.expect("block_end") plural = None have_plural = False @@ -271,22 +286,24 @@ def parse(self, parser): if singular_names: referenced.update(singular_names) if plural_expr is None: - plural_expr = nodes.Name(singular_names[0], 'load') - num_called_num = singular_names[0] == 'num' + plural_expr = nodes.Name(singular_names[0], "load") + num_called_num = singular_names[0] == "num" # if we have a pluralize block, we parse that too - if parser.stream.current.test('name:pluralize'): + if parser.stream.current.test("name:pluralize"): have_plural = True next(parser.stream) - if parser.stream.current.type != 'block_end': - name = parser.stream.expect('name') + if parser.stream.current.type != "block_end": + name = parser.stream.expect("name") if name.value not in variables: - parser.fail('unknown variable %r for pluralization' % - name.value, name.lineno, - exc=TemplateAssertionError) + parser.fail( + "unknown variable %r for pluralization" % name.value, + name.lineno, + exc=TemplateAssertionError, + ) plural_expr = variables[name.value] - num_called_num = name.value == 'num' - parser.stream.expect('block_end') + num_called_num = name.value == "num" + parser.stream.expect("block_end") plural_names, plural = self._parse_block(parser, False) next(parser.stream) referenced.update(plural_names) @@ -296,88 +313,97 @@ def parse(self, parser): # register free names as simple name expressions for var in referenced: if var not in variables: - variables[var] = nodes.Name(var, 'load') + variables[var] = nodes.Name(var, "load") if not have_plural: plural_expr = None elif plural_expr is None: - parser.fail('pluralize without variables', lineno) + parser.fail("pluralize without variables", lineno) if trimmed is None: - trimmed = self.environment.policies['ext.i18n.trimmed'] + trimmed = self.environment.policies["ext.i18n.trimmed"] if trimmed: singular = self._trim_whitespace(singular) if plural: plural = self._trim_whitespace(plural) - node = self._make_node(singular, plural, variables, plural_expr, - bool(referenced), - num_called_num and have_plural) + node = self._make_node( + singular, + plural, + variables, + plural_expr, + bool(referenced), + num_called_num and have_plural, + ) node.set_lineno(lineno) if plural_expr_assignment is not None: return [plural_expr_assignment, node] else: return node - def _trim_whitespace(self, string, _ws_re=re.compile(r'\s*\n\s*')): - return _ws_re.sub(' ', string.strip()) + def _trim_whitespace(self, string, _ws_re=_ws_re): + return _ws_re.sub(" ", string.strip()) def _parse_block(self, parser, allow_pluralize): """Parse until the next block tag with a given name.""" referenced = [] buf = [] while 1: - if parser.stream.current.type == 'data': - buf.append(parser.stream.current.value.replace('%', '%%')) + if parser.stream.current.type == "data": + buf.append(parser.stream.current.value.replace("%", "%%")) next(parser.stream) - elif parser.stream.current.type == 'variable_begin': + elif parser.stream.current.type == "variable_begin": next(parser.stream) - name = parser.stream.expect('name').value + name = parser.stream.expect("name").value referenced.append(name) - buf.append('%%(%s)s' % name) - parser.stream.expect('variable_end') - elif parser.stream.current.type == 'block_begin': + buf.append("%%(%s)s" % name) + parser.stream.expect("variable_end") + elif parser.stream.current.type == "block_begin": next(parser.stream) - if parser.stream.current.test('name:endtrans'): + if parser.stream.current.test("name:endtrans"): break - elif parser.stream.current.test('name:pluralize'): + elif parser.stream.current.test("name:pluralize"): if allow_pluralize: break - parser.fail('a translatable section can have only one ' - 'pluralize section') - parser.fail('control structures in translatable sections are ' - 'not allowed') + parser.fail( + "a translatable section can have only one pluralize section" + ) + parser.fail( + "control structures in translatable sections are not allowed" + ) elif parser.stream.eos: - parser.fail('unclosed translation block') + parser.fail("unclosed translation block") else: - assert False, 'internal parser error' + raise RuntimeError("internal parser error") return referenced, concat(buf) - def _make_node(self, singular, plural, variables, plural_expr, - vars_referenced, num_called_num): + def _make_node( + self, singular, plural, variables, plural_expr, vars_referenced, num_called_num + ): """Generates a useful node from the data provided.""" # no variables referenced? no need to escape for old style # gettext invocations only if there are vars. if not vars_referenced and not self.environment.newstyle_gettext: - singular = singular.replace('%%', '%') + singular = singular.replace("%%", "%") if plural: - plural = plural.replace('%%', '%') + plural = plural.replace("%%", "%") # singular only: if plural_expr is None: - gettext = nodes.Name('gettext', 'load') - node = nodes.Call(gettext, [nodes.Const(singular)], - [], None, None) + gettext = nodes.Name("gettext", "load") + node = nodes.Call(gettext, [nodes.Const(singular)], [], None, None) # singular and plural else: - ngettext = nodes.Name('ngettext', 'load') - node = nodes.Call(ngettext, [ - nodes.Const(singular), - nodes.Const(plural), - plural_expr - ], [], None, None) + ngettext = nodes.Name("ngettext", "load") + node = nodes.Call( + ngettext, + [nodes.Const(singular), nodes.Const(plural), plural_expr], + [], + None, + None, + ) # in case newstyle gettext is used, the method is powerful # enough to handle the variable expansion and autoescape @@ -386,7 +412,7 @@ def _make_node(self, singular, plural, variables, plural_expr, for key, value in iteritems(variables): # the function adds that later anyways in case num was # called num, so just skip it. - if num_called_num and key == 'num': + if num_called_num and key == "num": continue node.kwargs.append(nodes.Keyword(key, value)) @@ -396,18 +422,24 @@ def _make_node(self, singular, plural, variables, plural_expr, # environment with autoescaping turned on node = nodes.MarkSafeIfAutoescape(node) if variables: - node = nodes.Mod(node, nodes.Dict([ - nodes.Pair(nodes.Const(key), value) - for key, value in variables.items() - ])) + node = nodes.Mod( + node, + nodes.Dict( + [ + nodes.Pair(nodes.Const(key), value) + for key, value in variables.items() + ] + ), + ) return nodes.Output([node]) class ExprStmtExtension(Extension): - """Adds a `do` tag to Jinja2 that works like the print statement just + """Adds a `do` tag to Jinja that works like the print statement just that it doesn't print the return value. """ - tags = set(['do']) + + tags = set(["do"]) def parse(self, parser): node = nodes.ExprStmt(lineno=next(parser.stream).lineno) @@ -417,11 +449,12 @@ def parse(self, parser): class LoopControlExtension(Extension): """Adds break and continue to the template engine.""" - tags = set(['break', 'continue']) + + tags = set(["break", "continue"]) def parse(self, parser): token = next(parser.stream) - if token.value == 'break': + if token.value == "break": return nodes.Break(lineno=token.lineno) return nodes.Continue(lineno=token.lineno) @@ -434,8 +467,50 @@ class AutoEscapeExtension(Extension): pass -def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS, - babel_style=True): +class DebugExtension(Extension): + """A ``{% debug %}`` tag that dumps the available variables, + filters, and tests. + + .. code-block:: html+jinja + + <pre>{% debug %}</pre> + + .. code-block:: text + + {'context': {'cycler': <class 'jinja2.utils.Cycler'>, + ..., + 'namespace': <class 'jinja2.utils.Namespace'>}, + 'filters': ['abs', 'attr', 'batch', 'capitalize', 'center', 'count', 'd', + ..., 'urlencode', 'urlize', 'wordcount', 'wordwrap', 'xmlattr'], + 'tests': ['!=', '<', '<=', '==', '>', '>=', 'callable', 'defined', + ..., 'odd', 'sameas', 'sequence', 'string', 'undefined', 'upper']} + + .. versionadded:: 2.11.0 + """ + + tags = {"debug"} + + def parse(self, parser): + lineno = parser.stream.expect("name:debug").lineno + context = ContextReference() + result = self.call_method("_render", [context], lineno=lineno) + return nodes.Output([result], lineno=lineno) + + def _render(self, context): + result = { + "context": context.get_all(), + "filters": sorted(self.environment.filters.keys()), + "tests": sorted(self.environment.tests.keys()), + } + + # Set the depth since the intent is to show the top few names. + if version_info[:2] >= (3, 4): + return pprint.pformat(result, depth=3, compact=True) + else: + return pprint.pformat(result, depth=3) + + +def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS, babel_style=True): """Extract localizable strings from the given template node. Per default this function returns matches in babel style that means non string parameters as well as keyword arguments are returned as `None`. This @@ -471,19 +546,20 @@ def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS, extraction interface or extract comments yourself. """ for node in node.find_all(nodes.Call): - if not isinstance(node.node, nodes.Name) or \ - node.node.name not in gettext_functions: + if ( + not isinstance(node.node, nodes.Name) + or node.node.name not in gettext_functions + ): continue strings = [] for arg in node.args: - if isinstance(arg, nodes.Const) and \ - isinstance(arg.value, string_types): + if isinstance(arg, nodes.Const) and isinstance(arg.value, string_types): strings.append(arg.value) else: strings.append(None) - for arg in node.kwargs: + for _ in node.kwargs: strings.append(None) if node.dyn_args is not None: strings.append(None) @@ -517,9 +593,10 @@ def __init__(self, tokens, comment_tags): def find_backwards(self, offset): try: - for _, token_type, token_value in \ - reversed(self.tokens[self.offset:offset]): - if token_type in ('comment', 'linecomment'): + for _, token_type, token_value in reversed( + self.tokens[self.offset : offset] + ): + if token_type in ("comment", "linecomment"): try: prefix, comment = token_value.split(None, 1) except ValueError: @@ -533,7 +610,7 @@ def find_backwards(self, offset): def find_comments(self, lineno): if not self.comment_tags or self.last_lineno > lineno: return [] - for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset:]): + for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset :]): if token_lineno > lineno: return self.find_backwards(self.offset + idx) return self.find_backwards(len(self.tokens)) @@ -545,7 +622,7 @@ def babel_extract(fileobj, keywords, comment_tags, options): .. versionchanged:: 2.3 Basic support for translation comments was added. If `comment_tags` is now set to a list of keywords for extraction, the extractor will - try to find the best preceeding comment that begins with one of the + try to find the best preceding comment that begins with one of the keywords. For best results, make sure to not have more than one gettext call in one line of code and the matching comment in the same line or the line before. @@ -568,7 +645,7 @@ def babel_extract(fileobj, keywords, comment_tags, options): (comments will be empty currently) """ extensions = set() - for extension in options.get('extensions', '').split(','): + for extension in options.get("extensions", "").split(","): extension = extension.strip() if not extension: continue @@ -577,38 +654,37 @@ def babel_extract(fileobj, keywords, comment_tags, options): extensions.add(InternationalizationExtension) def getbool(options, key, default=False): - return options.get(key, str(default)).lower() in \ - ('1', 'on', 'yes', 'true') + return options.get(key, str(default)).lower() in ("1", "on", "yes", "true") - silent = getbool(options, 'silent', True) + silent = getbool(options, "silent", True) environment = Environment( - options.get('block_start_string', BLOCK_START_STRING), - options.get('block_end_string', BLOCK_END_STRING), - options.get('variable_start_string', VARIABLE_START_STRING), - options.get('variable_end_string', VARIABLE_END_STRING), - options.get('comment_start_string', COMMENT_START_STRING), - options.get('comment_end_string', COMMENT_END_STRING), - options.get('line_statement_prefix') or LINE_STATEMENT_PREFIX, - options.get('line_comment_prefix') or LINE_COMMENT_PREFIX, - getbool(options, 'trim_blocks', TRIM_BLOCKS), - getbool(options, 'lstrip_blocks', LSTRIP_BLOCKS), + options.get("block_start_string", BLOCK_START_STRING), + options.get("block_end_string", BLOCK_END_STRING), + options.get("variable_start_string", VARIABLE_START_STRING), + options.get("variable_end_string", VARIABLE_END_STRING), + options.get("comment_start_string", COMMENT_START_STRING), + options.get("comment_end_string", COMMENT_END_STRING), + options.get("line_statement_prefix") or LINE_STATEMENT_PREFIX, + options.get("line_comment_prefix") or LINE_COMMENT_PREFIX, + getbool(options, "trim_blocks", TRIM_BLOCKS), + getbool(options, "lstrip_blocks", LSTRIP_BLOCKS), NEWLINE_SEQUENCE, - getbool(options, 'keep_trailing_newline', KEEP_TRAILING_NEWLINE), + getbool(options, "keep_trailing_newline", KEEP_TRAILING_NEWLINE), frozenset(extensions), cache_size=0, - auto_reload=False + auto_reload=False, ) - if getbool(options, 'trimmed'): - environment.policies['ext.i18n.trimmed'] = True - if getbool(options, 'newstyle_gettext'): + if getbool(options, "trimmed"): + environment.policies["ext.i18n.trimmed"] = True + if getbool(options, "newstyle_gettext"): environment.newstyle_gettext = True - source = fileobj.read().decode(options.get('encoding', 'utf-8')) + source = fileobj.read().decode(options.get("encoding", "utf-8")) try: node = environment.parse(source) tokens = list(environment.lex(environment.preprocess(source))) - except TemplateSyntaxError as e: + except TemplateSyntaxError: if not silent: raise # skip templates with syntax errors @@ -625,3 +701,4 @@ def getbool(options, key, default=False): loopcontrols = LoopControlExtension with_ = WithExtension autoescape = AutoEscapeExtension +debug = DebugExtension diff --git a/pipenv/vendor/jinja2/filters.py b/pipenv/vendor/jinja2/filters.py index 267ddddaa0..1af7ac88a7 100644 --- a/pipenv/vendor/jinja2/filters.py +++ b/pipenv/vendor/jinja2/filters.py @@ -1,29 +1,31 @@ # -*- coding: utf-8 -*- -""" - jinja2.filters - ~~~~~~~~~~~~~~ - - Bundled jinja filters. - - :copyright: (c) 2017 by the Jinja Team. - :license: BSD, see LICENSE for more details. -""" -import re +"""Built-in template filters used with the ``|`` operator.""" import math import random +import re import warnings - -from itertools import groupby, chain from collections import namedtuple -from jinja2.utils import Markup, escape, pformat, urlize, soft_unicode, \ - unicode_urlencode, htmlsafe_json_dumps -from jinja2.runtime import Undefined -from jinja2.exceptions import FilterArgumentError -from jinja2._compat import imap, string_types, text_type, iteritems, PY2 +from itertools import chain +from itertools import groupby + +from markupsafe import escape +from markupsafe import Markup +from markupsafe import soft_unicode +from ._compat import abc +from ._compat import imap +from ._compat import iteritems +from ._compat import string_types +from ._compat import text_type +from .exceptions import FilterArgumentError +from .runtime import Undefined +from .utils import htmlsafe_json_dumps +from .utils import pformat +from .utils import unicode_urlencode +from .utils import urlize -_word_re = re.compile(r'\w+', re.UNICODE) -_word_beginning_split_re = re.compile(r'([-\s\(\{\[\<]+)', re.UNICODE) +_word_re = re.compile(r"\w+", re.UNICODE) +_word_beginning_split_re = re.compile(r"([-\s\(\{\[\<]+)", re.UNICODE) def contextfilter(f): @@ -59,23 +61,21 @@ def ignore_case(value): return value.lower() if isinstance(value, string_types) else value -def make_attrgetter(environment, attribute, postprocess=None): +def make_attrgetter(environment, attribute, postprocess=None, default=None): """Returns a callable that looks up the given attribute from a passed object with the rules of the environment. Dots are allowed to access attributes of attributes. Integer parts in paths are looked up as integers. """ - if attribute is None: - attribute = [] - elif isinstance(attribute, string_types): - attribute = [int(x) if x.isdigit() else x for x in attribute.split('.')] - else: - attribute = [attribute] + attribute = _prepare_attribute_parts(attribute) def attrgetter(item): for part in attribute: item = environment.getitem(item, part) + if default and isinstance(item, Undefined): + item = default + if postprocess is not None: item = postprocess(item) @@ -84,32 +84,84 @@ def attrgetter(item): return attrgetter +def make_multi_attrgetter(environment, attribute, postprocess=None): + """Returns a callable that looks up the given comma separated + attributes from a passed object with the rules of the environment. + Dots are allowed to access attributes of each attribute. Integer + parts in paths are looked up as integers. + + The value returned by the returned callable is a list of extracted + attribute values. + + Examples of attribute: "attr1,attr2", "attr1.inner1.0,attr2.inner2.0", etc. + """ + attribute_parts = ( + attribute.split(",") if isinstance(attribute, string_types) else [attribute] + ) + attribute = [ + _prepare_attribute_parts(attribute_part) for attribute_part in attribute_parts + ] + + def attrgetter(item): + items = [None] * len(attribute) + for i, attribute_part in enumerate(attribute): + item_i = item + for part in attribute_part: + item_i = environment.getitem(item_i, part) + + if postprocess is not None: + item_i = postprocess(item_i) + + items[i] = item_i + return items + + return attrgetter + + +def _prepare_attribute_parts(attr): + if attr is None: + return [] + elif isinstance(attr, string_types): + return [int(x) if x.isdigit() else x for x in attr.split(".")] + else: + return [attr] + + def do_forceescape(value): """Enforce HTML escaping. This will probably double escape variables.""" - if hasattr(value, '__html__'): + if hasattr(value, "__html__"): value = value.__html__() return escape(text_type(value)) def do_urlencode(value): - """Escape strings for use in URLs (uses UTF-8 encoding). It accepts both - dictionaries and regular strings as well as pairwise iterables. + """Quote data for use in a URL path or query using UTF-8. + + Basic wrapper around :func:`urllib.parse.quote` when given a + string, or :func:`urllib.parse.urlencode` for a dict or iterable. + + :param value: Data to quote. A string will be quoted directly. A + dict or iterable of ``(key, value)`` pairs will be joined as a + query string. + + When given a string, "/" is not quoted. HTTP servers treat "/" and + "%2F" equivalently in paths. If you need quoted slashes, use the + ``|replace("/", "%2F")`` filter. .. versionadded:: 2.7 """ - itemiter = None - if isinstance(value, dict): - itemiter = iteritems(value) - elif not isinstance(value, string_types): - try: - itemiter = iter(value) - except TypeError: - pass - if itemiter is None: + if isinstance(value, string_types) or not isinstance(value, abc.Iterable): return unicode_urlencode(value) - return u'&'.join(unicode_urlencode(k) + '=' + - unicode_urlencode(v, for_qs=True) - for k, v in itemiter) + + if isinstance(value, dict): + items = iteritems(value) + else: + items = iter(value) + + return u"&".join( + "%s=%s" % (unicode_urlencode(k, for_qs=True), unicode_urlencode(v, for_qs=True)) + for k, v in items + ) @evalcontextfilter @@ -132,8 +184,11 @@ def do_replace(eval_ctx, s, old, new, count=None): count = -1 if not eval_ctx.autoescape: return text_type(s).replace(text_type(old), text_type(new), count) - if hasattr(old, '__html__') or hasattr(new, '__html__') and \ - not hasattr(s, '__html__'): + if ( + hasattr(old, "__html__") + or hasattr(new, "__html__") + and not hasattr(s, "__html__") + ): s = escape(s) else: s = soft_unicode(s) @@ -174,13 +229,13 @@ def do_xmlattr(_eval_ctx, d, autospace=True): As you can see it automatically prepends a space in front of the item if the filter returned something unless the second parameter is false. """ - rv = u' '.join( + rv = u" ".join( u'%s="%s"' % (escape(key), escape(value)) for key, value in iteritems(d) if value is not None and not isinstance(value, Undefined) ) if autospace and rv: - rv = u' ' + rv + rv = u" " + rv if _eval_ctx.autoescape: rv = Markup(rv) return rv @@ -197,13 +252,16 @@ def do_title(s): """Return a titlecased version of the value. I.e. words will start with uppercase letters, all remaining characters are lowercase. """ - return ''.join( - [item[0].upper() + item[1:].lower() - for item in _word_beginning_split_re.split(soft_unicode(s)) - if item]) + return "".join( + [ + item[0].upper() + item[1:].lower() + for item in _word_beginning_split_re.split(soft_unicode(s)) + if item + ] + ) -def do_dictsort(value, case_sensitive=False, by='key', reverse=False): +def do_dictsort(value, case_sensitive=False, by="key", reverse=False): """Sort a dict and yield (key, value) pairs. Because python dicts are unsorted you may want to use this function to order them by either key or value: @@ -222,14 +280,12 @@ def do_dictsort(value, case_sensitive=False, by='key', reverse=False): {% for item in mydict|dictsort(false, 'value') %} sort the dict by value, case insensitive """ - if by == 'key': + if by == "key": pos = 0 - elif by == 'value': + elif by == "value": pos = 1 else: - raise FilterArgumentError( - 'You can only sort by either "key" or "value"' - ) + raise FilterArgumentError('You can only sort by either "key" or "value"') def sort_func(item): value = item[pos] @@ -243,48 +299,62 @@ def sort_func(item): @environmentfilter -def do_sort( - environment, value, reverse=False, case_sensitive=False, attribute=None -): - """Sort an iterable. Per default it sorts ascending, if you pass it - true as first argument it will reverse the sorting. +def do_sort(environment, value, reverse=False, case_sensitive=False, attribute=None): + """Sort an iterable using Python's :func:`sorted`. + + .. sourcecode:: jinja + + {% for city in cities|sort %} + ... + {% endfor %} - If the iterable is made of strings the third parameter can be used to - control the case sensitiveness of the comparison which is disabled by - default. + :param reverse: Sort descending instead of ascending. + :param case_sensitive: When sorting strings, sort upper and lower + case separately. + :param attribute: When sorting objects or dicts, an attribute or + key to sort by. Can use dot notation like ``"address.city"``. + Can be a list of attributes like ``"age,name"``. + + The sort is stable, it does not change the relative order of + elements that compare equal. This makes it is possible to chain + sorts on different attributes and ordering. .. sourcecode:: jinja - {% for item in iterable|sort %} + {% for user in users|sort(attribute="name") + |sort(reverse=true, attribute="age") %} ... {% endfor %} - It is also possible to sort by an attribute (for example to sort - by the date of an object) by specifying the `attribute` parameter: + As a shortcut to chaining when the direction is the same for all + attributes, pass a comma separate list of attributes. .. sourcecode:: jinja - {% for item in iterable|sort(attribute='date') %} + {% for user users|sort(attribute="age,name") %} ... {% endfor %} + .. versionchanged:: 2.11.0 + The ``attribute`` parameter can be a comma separated list of + attributes, e.g. ``"age,name"``. + .. versionchanged:: 2.6 - The `attribute` parameter was added. + The ``attribute`` parameter was added. """ - key_func = make_attrgetter( - environment, attribute, - postprocess=ignore_case if not case_sensitive else None + key_func = make_multi_attrgetter( + environment, attribute, postprocess=ignore_case if not case_sensitive else None ) return sorted(value, key=key_func, reverse=reverse) @environmentfilter def do_unique(environment, value, case_sensitive=False, attribute=None): - """Returns a list of unique items from the the given iterable. + """Returns a list of unique items from the given iterable. .. sourcecode:: jinja - {{ ['foo', 'bar', 'foobar', 'FooBar']|unique }} + {{ ['foo', 'bar', 'foobar', 'FooBar']|unique|list }} -> ['foo', 'bar', 'foobar'] The unique items are yielded in the same order as their first occurrence in @@ -294,8 +364,7 @@ def do_unique(environment, value, case_sensitive=False, attribute=None): :param attribute: Filter objects with unique values for this attribute. """ getter = make_attrgetter( - environment, attribute, - postprocess=ignore_case if not case_sensitive else None + environment, attribute, postprocess=ignore_case if not case_sensitive else None ) seen = set() @@ -313,11 +382,10 @@ def _min_or_max(environment, value, func, case_sensitive, attribute): try: first = next(it) except StopIteration: - return environment.undefined('No aggregated item, sequence was empty.') + return environment.undefined("No aggregated item, sequence was empty.") key_func = make_attrgetter( - environment, attribute, - ignore_case if not case_sensitive else None + environment, attribute, postprocess=ignore_case if not case_sensitive else None ) return func(chain([first], it), key=key_func) @@ -332,7 +400,7 @@ def do_min(environment, value, case_sensitive=False, attribute=None): -> 1 :param case_sensitive: Treat upper and lower case strings as distinct. - :param attribute: Get the object with the max value of this attribute. + :param attribute: Get the object with the min value of this attribute. """ return _min_or_max(environment, value, min, case_sensitive, attribute) @@ -352,7 +420,7 @@ def do_max(environment, value, case_sensitive=False, attribute=None): return _min_or_max(environment, value, max, case_sensitive, attribute) -def do_default(value, default_value=u'', boolean=False): +def do_default(value, default_value=u"", boolean=False): """If the value is undefined it will return the passed default value, otherwise the value of the variable: @@ -368,6 +436,12 @@ def do_default(value, default_value=u'', boolean=False): .. sourcecode:: jinja {{ ''|default('the string was empty', true) }} + + .. versionchanged:: 2.11 + It's now possible to configure the :class:`~jinja2.Environment` with + :class:`~jinja2.ChainableUndefined` to make the `default` filter work + on nested elements and attributes that may contain undefined values + in the chain without getting an :exc:`~jinja2.UndefinedError`. """ if isinstance(value, Undefined) or (boolean and not value): return default_value @@ -375,7 +449,7 @@ def do_default(value, default_value=u'', boolean=False): @evalcontextfilter -def do_join(eval_ctx, value, d=u'', attribute=None): +def do_join(eval_ctx, value, d=u"", attribute=None): """Return a string which is the concatenation of the strings in the sequence. The separator between elements is an empty string per default, you can define it with the optional parameter: @@ -400,17 +474,17 @@ def do_join(eval_ctx, value, d=u'', attribute=None): if attribute is not None: value = imap(make_attrgetter(eval_ctx.environment, attribute), value) - # no automatic escaping? joining is a lot eaiser then + # no automatic escaping? joining is a lot easier then if not eval_ctx.autoescape: return text_type(d).join(imap(text_type, value)) # if the delimiter doesn't have an html representation we check # if any of the items has. If yes we do a coercion to Markup - if not hasattr(d, '__html__'): + if not hasattr(d, "__html__"): value = list(value) do_escape = False for idx, item in enumerate(value): - if hasattr(item, '__html__'): + if hasattr(item, "__html__"): do_escape = True else: value[idx] = text_type(item) @@ -435,16 +509,25 @@ def do_first(environment, seq): try: return next(iter(seq)) except StopIteration: - return environment.undefined('No first item, sequence was empty.') + return environment.undefined("No first item, sequence was empty.") @environmentfilter def do_last(environment, seq): - """Return the last item of a sequence.""" + """ + Return the last item of a sequence. + + Note: Does not work with generators. You may want to explicitly + convert it to a list: + + .. sourcecode:: jinja + + {{ data | selectattr('name', '==', 'Jinja') | list | last }} + """ try: return next(iter(reversed(seq))) except StopIteration: - return environment.undefined('No last item, sequence was empty.') + return environment.undefined("No last item, sequence was empty.") @contextfilter @@ -453,7 +536,7 @@ def do_random(context, seq): try: return random.choice(seq) except IndexError: - return context.environment.undefined('No random item, sequence was empty.') + return context.environment.undefined("No random item, sequence was empty.") def do_filesizeformat(value, binary=False): @@ -465,25 +548,25 @@ def do_filesizeformat(value, binary=False): bytes = float(value) base = binary and 1024 or 1000 prefixes = [ - (binary and 'KiB' or 'kB'), - (binary and 'MiB' or 'MB'), - (binary and 'GiB' or 'GB'), - (binary and 'TiB' or 'TB'), - (binary and 'PiB' or 'PB'), - (binary and 'EiB' or 'EB'), - (binary and 'ZiB' or 'ZB'), - (binary and 'YiB' or 'YB') + (binary and "KiB" or "kB"), + (binary and "MiB" or "MB"), + (binary and "GiB" or "GB"), + (binary and "TiB" or "TB"), + (binary and "PiB" or "PB"), + (binary and "EiB" or "EB"), + (binary and "ZiB" or "ZB"), + (binary and "YiB" or "YB"), ] if bytes == 1: - return '1 Byte' + return "1 Byte" elif bytes < base: - return '%d Bytes' % bytes + return "%d Bytes" % bytes else: for i, prefix in enumerate(prefixes): unit = base ** (i + 2) if bytes < unit: - return '%.1f %s' % ((base * bytes / unit), prefix) - return '%.1f %s' % ((base * bytes / unit), prefix) + return "%.1f %s" % ((base * bytes / unit), prefix) + return "%.1f %s" % ((base * bytes / unit), prefix) def do_pprint(value, verbose=False): @@ -496,8 +579,9 @@ def do_pprint(value, verbose=False): @evalcontextfilter -def do_urlize(eval_ctx, value, trim_url_limit=None, nofollow=False, - target=None, rel=None): +def do_urlize( + eval_ctx, value, trim_url_limit=None, nofollow=False, target=None, rel=None +): """Converts URLs in plain text into clickable links. If you pass the filter an additional integer it will shorten the urls @@ -520,22 +604,20 @@ def do_urlize(eval_ctx, value, trim_url_limit=None, nofollow=False, The *target* parameter was added. """ policies = eval_ctx.environment.policies - rel = set((rel or '').split() or []) + rel = set((rel or "").split() or []) if nofollow: - rel.add('nofollow') - rel.update((policies['urlize.rel'] or '').split()) + rel.add("nofollow") + rel.update((policies["urlize.rel"] or "").split()) if target is None: - target = policies['urlize.target'] - rel = ' '.join(sorted(rel)) or None + target = policies["urlize.target"] + rel = " ".join(sorted(rel)) or None rv = urlize(value, trim_url_limit, rel=rel, target=target) if eval_ctx.autoescape: rv = Markup(rv) return rv -def do_indent( - s, width=4, first=False, blank=False, indentfirst=None -): +def do_indent(s, width=4, first=False, blank=False, indentfirst=None): """Return a copy of the string with each line indented by 4 spaces. The first line and blank lines are not indented by default. @@ -549,22 +631,31 @@ def do_indent( Rename the ``indentfirst`` argument to ``first``. """ if indentfirst is not None: - warnings.warn(DeprecationWarning( - 'The "indentfirst" argument is renamed to "first".' - ), stacklevel=2) + warnings.warn( + "The 'indentfirst' argument is renamed to 'first' and will" + " be removed in version 3.0.", + DeprecationWarning, + stacklevel=2, + ) first = indentfirst - s += u'\n' # this quirk is necessary for splitlines method - indention = u' ' * width + indention = u" " * width + newline = u"\n" + + if isinstance(s, Markup): + indention = Markup(indention) + newline = Markup(newline) + + s += newline # this quirk is necessary for splitlines method if blank: - rv = (u'\n' + indention).join(s.splitlines()) + rv = (newline + indention).join(s.splitlines()) else: lines = s.splitlines() rv = lines.pop(0) if lines: - rv += u'\n' + u'\n'.join( + rv += newline + newline.join( indention + line if line else line for line in lines ) @@ -575,7 +666,7 @@ def do_indent( @environmentfilter -def do_truncate(env, s, length=255, killwords=False, end='...', leeway=None): +def do_truncate(env, s, length=255, killwords=False, end="...", leeway=None): """Return a truncated copy of the string. The length is specified with the first parameter which defaults to ``255``. If the second parameter is ``true`` the filter will cut the text at length. Otherwise @@ -596,41 +687,76 @@ def do_truncate(env, s, length=255, killwords=False, end='...', leeway=None): {{ "foo bar baz qux"|truncate(11, False, '...', 0) }} -> "foo bar..." - The default leeway on newer Jinja2 versions is 5 and was 0 before but + The default leeway on newer Jinja versions is 5 and was 0 before but can be reconfigured globally. """ if leeway is None: - leeway = env.policies['truncate.leeway'] - assert length >= len(end), 'expected length >= %s, got %s' % (len(end), length) - assert leeway >= 0, 'expected leeway >= 0, got %s' % leeway + leeway = env.policies["truncate.leeway"] + assert length >= len(end), "expected length >= %s, got %s" % (len(end), length) + assert leeway >= 0, "expected leeway >= 0, got %s" % leeway if len(s) <= length + leeway: return s if killwords: - return s[:length - len(end)] + end - result = s[:length - len(end)].rsplit(' ', 1)[0] + return s[: length - len(end)] + end + result = s[: length - len(end)].rsplit(" ", 1)[0] return result + end @environmentfilter -def do_wordwrap(environment, s, width=79, break_long_words=True, - wrapstring=None): +def do_wordwrap( + environment, + s, + width=79, + break_long_words=True, + wrapstring=None, + break_on_hyphens=True, +): + """Wrap a string to the given width. Existing newlines are treated + as paragraphs to be wrapped separately. + + :param s: Original text to wrap. + :param width: Maximum length of wrapped lines. + :param break_long_words: If a word is longer than ``width``, break + it across lines. + :param break_on_hyphens: If a word contains hyphens, it may be split + across lines. + :param wrapstring: String to join each wrapped line. Defaults to + :attr:`Environment.newline_sequence`. + + .. versionchanged:: 2.11 + Existing newlines are treated as paragraphs wrapped separately. + + .. versionchanged:: 2.11 + Added the ``break_on_hyphens`` parameter. + + .. versionchanged:: 2.7 + Added the ``wrapstring`` parameter. """ - Return a copy of the string passed to the filter wrapped after - ``79`` characters. You can override this default using the first - parameter. If you set the second parameter to `false` Jinja will not - split words apart if they are longer than `width`. By default, the newlines - will be the default newlines for the environment, but this can be changed - using the wrapstring keyword argument. - .. versionadded:: 2.7 - Added support for the `wrapstring` parameter. - """ + import textwrap + if not wrapstring: wrapstring = environment.newline_sequence - import textwrap - return wrapstring.join(textwrap.wrap(s, width=width, expand_tabs=False, - replace_whitespace=False, - break_long_words=break_long_words)) + + # textwrap.wrap doesn't consider existing newlines when wrapping. + # If the string has a newline before width, wrap will still insert + # a newline at width, resulting in a short line. Instead, split and + # wrap each paragraph individually. + return wrapstring.join( + [ + wrapstring.join( + textwrap.wrap( + line, + width=width, + expand_tabs=False, + replace_whitespace=False, + break_long_words=break_long_words, + break_on_hyphens=break_on_hyphens, + ) + ) + for line in s.splitlines() + ] + ) def do_wordcount(s): @@ -671,29 +797,40 @@ def do_float(value, default=0.0): def do_format(value, *args, **kwargs): - """ - Apply python string formatting on an object: + """Apply the given values to a `printf-style`_ format string, like + ``string % values``. .. sourcecode:: jinja - {{ "%s - %s"|format("Hello?", "Foo!") }} - -> Hello? - Foo! + {{ "%s, %s!"|format(greeting, name) }} + Hello, World! + + In most cases it should be more convenient and efficient to use the + ``%`` operator or :meth:`str.format`. + + .. code-block:: text + + {{ "%s, %s!" % (greeting, name) }} + {{ "{}, {}!".format(greeting, name) }} + + .. _printf-style: https://docs.python.org/library/stdtypes.html + #printf-style-string-formatting """ if args and kwargs: - raise FilterArgumentError('can\'t handle positional and keyword ' - 'arguments at the same time') + raise FilterArgumentError( + "can't handle positional and keyword arguments at the same time" + ) return soft_unicode(value) % (kwargs or args) -def do_trim(value): - """Strip leading and trailing whitespace.""" - return soft_unicode(value).strip() +def do_trim(value, chars=None): + """Strip leading and trailing characters, by default whitespace.""" + return soft_unicode(value).strip(chars) def do_striptags(value): - """Strip SGML/XML tags and replace adjacent whitespace by one space. - """ - if hasattr(value, '__html__'): + """Strip SGML/XML tags and replace adjacent whitespace by one space.""" + if hasattr(value, "__html__"): value = value.__html__() return Markup(text_type(value)).striptags() @@ -705,7 +842,7 @@ def do_slice(value, slices, fill_with=None): .. sourcecode:: html+jinja - <div class="columwrapper"> + <div class="columnwrapper"> {%- for column in items|slice(3) %} <ul class="column-{{ loop.index }}"> {%- for item in column %} @@ -765,7 +902,7 @@ def do_batch(value, linecount, fill_with=None): yield tmp -def do_round(value, precision=0, method='common'): +def do_round(value, precision=0, method="common"): """Round the number to a given precision. The first parameter specifies the precision (default is ``0``), the second the rounding method: @@ -791,9 +928,9 @@ def do_round(value, precision=0, method='common'): {{ 42.55|round|int }} -> 43 """ - if not method in ('common', 'ceil', 'floor'): - raise FilterArgumentError('method must be common, ceil or floor') - if method == 'common': + if method not in {"common", "ceil", "floor"}: + raise FilterArgumentError("method must be common, ceil or floor") + if method == "common": return round(value, precision) func = getattr(math, method) return func(value * (10 ** precision)) / (10 ** precision) @@ -804,52 +941,51 @@ def do_round(value, precision=0, method='common'): # we do not want to accidentally expose an auto generated repr in case # people start to print this out in comments or something similar for # debugging. -_GroupTuple = namedtuple('_GroupTuple', ['grouper', 'list']) +_GroupTuple = namedtuple("_GroupTuple", ["grouper", "list"]) _GroupTuple.__repr__ = tuple.__repr__ _GroupTuple.__str__ = tuple.__str__ + @environmentfilter def do_groupby(environment, value, attribute): - """Group a sequence of objects by a common attribute. + """Group a sequence of objects by an attribute using Python's + :func:`itertools.groupby`. The attribute can use dot notation for + nested access, like ``"address.city"``. Unlike Python's ``groupby``, + the values are sorted first so only one group is returned for each + unique value. - If you for example have a list of dicts or objects that represent persons - with `gender`, `first_name` and `last_name` attributes and you want to - group all users by genders you can do something like the following - snippet: + For example, a list of ``User`` objects with a ``city`` attribute + can be rendered in groups. In this example, ``grouper`` refers to + the ``city`` value of the group. .. sourcecode:: html+jinja - <ul> - {% for group in persons|groupby('gender') %} - <li>{{ group.grouper }}<ul> - {% for person in group.list %} - <li>{{ person.first_name }} {{ person.last_name }}</li> - {% endfor %}</ul></li> - {% endfor %} - </ul> + <ul>{% for city, items in users|groupby("city") %} + <li>{{ city }} + <ul>{% for user in items %} + <li>{{ user.name }} + {% endfor %}</ul> + </li> + {% endfor %}</ul> - Additionally it's possible to use tuple unpacking for the grouper and - list: + ``groupby`` yields namedtuples of ``(grouper, list)``, which + can be used instead of the tuple unpacking above. ``grouper`` is the + value of the attribute, and ``list`` is the items with that value. .. sourcecode:: html+jinja - <ul> - {% for grouper, list in persons|groupby('gender') %} - ... - {% endfor %} - </ul> - - As you can see the item we're grouping by is stored in the `grouper` - attribute and the `list` contains all the objects that have this grouper - in common. + <ul>{% for group in users|groupby("city") %} + <li>{{ group.grouper }}: {{ group.list|join(", ") }} + {% endfor %}</ul> .. versionchanged:: 2.6 - It's now possible to use dotted notation to group by the child - attribute of another attribute. + The attribute supports dot notation for nested access. """ expr = make_attrgetter(environment, attribute) - return [_GroupTuple(key, list(values)) for key, values - in groupby(sorted(value, key=expr), expr)] + return [ + _GroupTuple(key, list(values)) + for key, values in groupby(sorted(value, key=expr), expr) + ] @environmentfilter @@ -906,7 +1042,7 @@ def do_reverse(value): rv.reverse() return rv except TypeError: - raise FilterArgumentError('argument must be iterable') + raise FilterArgumentError("argument must be iterable") @environmentfilter @@ -927,8 +1063,9 @@ def do_attr(environment, obj, name): except AttributeError: pass else: - if environment.sandboxed and not \ - environment.is_safe_attribute(obj, name, value): + if environment.sandboxed and not environment.is_safe_attribute( + obj, name, value + ): return environment.unsafe_undefined(obj, name) return value return environment.undefined(obj=obj, name=name) @@ -947,6 +1084,13 @@ def do_map(*args, **kwargs): Users on this page: {{ users|map(attribute='username')|join(', ') }} + You can specify a ``default`` value to use if an object in the list + does not have the given attribute. + + .. sourcecode:: jinja + + {{ users|map(attribute="username", default="Anonymous")|join(", ") }} + Alternatively you can let it invoke a filter by passing the name of the filter and the arguments afterwards. A good example would be applying a text conversion filter on a sequence: @@ -955,6 +1099,17 @@ def do_map(*args, **kwargs): Users on this page: {{ titles|map('lower')|join(', ') }} + Similar to a generator comprehension such as: + + .. code-block:: python + + (u.username for u in users) + (u.username or "Anonymous" for u in users) + (do_lower(x) for x in titles) + + .. versionchanged:: 2.11.0 + Added the ``default`` parameter. + .. versionadded:: 2.7 """ seq, func = prepare_map(args, kwargs) @@ -980,6 +1135,13 @@ def do_select(*args, **kwargs): {{ numbers|select("lessthan", 42) }} {{ strings|select("equalto", "mystring") }} + Similar to a generator comprehension such as: + + .. code-block:: python + + (n for n in numbers if test_odd(n)) + (n for n in numbers if test_divisibleby(n, 3)) + .. versionadded:: 2.7 """ return select_or_reject(args, kwargs, lambda x: x, False) @@ -998,6 +1160,12 @@ def do_reject(*args, **kwargs): {{ numbers|reject("odd") }} + Similar to a generator comprehension such as: + + .. code-block:: python + + (n for n in numbers if not test_odd(n)) + .. versionadded:: 2.7 """ return select_or_reject(args, kwargs, lambda x: not x, False) @@ -1019,6 +1187,13 @@ def do_selectattr(*args, **kwargs): {{ users|selectattr("is_active") }} {{ users|selectattr("email", "none") }} + Similar to a generator comprehension such as: + + .. code-block:: python + + (u for user in users if user.is_active) + (u for user in users if test_none(user.email)) + .. versionadded:: 2.7 """ return select_or_reject(args, kwargs, lambda x: x, True) @@ -1038,6 +1213,13 @@ def do_rejectattr(*args, **kwargs): {{ users|rejectattr("is_active") }} {{ users|rejectattr("email", "none") }} + Similar to a generator comprehension such as: + + .. code-block:: python + + (u for user in users if not user.is_active) + (u for user in users if not test_none(user.email)) + .. versionadded:: 2.7 """ return select_or_reject(args, kwargs, lambda x: not x, True) @@ -1070,32 +1252,38 @@ def do_tojson(eval_ctx, value, indent=None): .. versionadded:: 2.9 """ policies = eval_ctx.environment.policies - dumper = policies['json.dumps_function'] - options = policies['json.dumps_kwargs'] + dumper = policies["json.dumps_function"] + options = policies["json.dumps_kwargs"] if indent is not None: options = dict(options) - options['indent'] = indent + options["indent"] = indent return htmlsafe_json_dumps(value, dumper=dumper, **options) def prepare_map(args, kwargs): context = args[0] seq = args[1] + default = None - if len(args) == 2 and 'attribute' in kwargs: - attribute = kwargs.pop('attribute') + if len(args) == 2 and "attribute" in kwargs: + attribute = kwargs.pop("attribute") + default = kwargs.pop("default", None) if kwargs: - raise FilterArgumentError('Unexpected keyword argument %r' % - next(iter(kwargs))) - func = make_attrgetter(context.environment, attribute) + raise FilterArgumentError( + "Unexpected keyword argument %r" % next(iter(kwargs)) + ) + func = make_attrgetter(context.environment, attribute, default=default) else: try: name = args[2] args = args[3:] except LookupError: - raise FilterArgumentError('map requires a filter argument') - func = lambda item: context.environment.call_filter( - name, item, args, kwargs, context=context) + raise FilterArgumentError("map requires a filter argument") + + def func(item): + return context.environment.call_filter( + name, item, args, kwargs, context=context + ) return seq, func @@ -1107,18 +1295,22 @@ def prepare_select_or_reject(args, kwargs, modfunc, lookup_attr): try: attr = args[2] except LookupError: - raise FilterArgumentError('Missing parameter for attribute name') + raise FilterArgumentError("Missing parameter for attribute name") transfunc = make_attrgetter(context.environment, attr) off = 1 else: off = 0 - transfunc = lambda x: x + + def transfunc(x): + return x try: name = args[2 + off] - args = args[3 + off:] - func = lambda item: context.environment.call_test( - name, item, args, kwargs) + args = args[3 + off :] + + def func(item): + return context.environment.call_test(name, item, args, kwargs) + except LookupError: func = bool @@ -1134,57 +1326,57 @@ def select_or_reject(args, kwargs, modfunc, lookup_attr): FILTERS = { - 'abs': abs, - 'attr': do_attr, - 'batch': do_batch, - 'capitalize': do_capitalize, - 'center': do_center, - 'count': len, - 'd': do_default, - 'default': do_default, - 'dictsort': do_dictsort, - 'e': escape, - 'escape': escape, - 'filesizeformat': do_filesizeformat, - 'first': do_first, - 'float': do_float, - 'forceescape': do_forceescape, - 'format': do_format, - 'groupby': do_groupby, - 'indent': do_indent, - 'int': do_int, - 'join': do_join, - 'last': do_last, - 'length': len, - 'list': do_list, - 'lower': do_lower, - 'map': do_map, - 'min': do_min, - 'max': do_max, - 'pprint': do_pprint, - 'random': do_random, - 'reject': do_reject, - 'rejectattr': do_rejectattr, - 'replace': do_replace, - 'reverse': do_reverse, - 'round': do_round, - 'safe': do_mark_safe, - 'select': do_select, - 'selectattr': do_selectattr, - 'slice': do_slice, - 'sort': do_sort, - 'string': soft_unicode, - 'striptags': do_striptags, - 'sum': do_sum, - 'title': do_title, - 'trim': do_trim, - 'truncate': do_truncate, - 'unique': do_unique, - 'upper': do_upper, - 'urlencode': do_urlencode, - 'urlize': do_urlize, - 'wordcount': do_wordcount, - 'wordwrap': do_wordwrap, - 'xmlattr': do_xmlattr, - 'tojson': do_tojson, + "abs": abs, + "attr": do_attr, + "batch": do_batch, + "capitalize": do_capitalize, + "center": do_center, + "count": len, + "d": do_default, + "default": do_default, + "dictsort": do_dictsort, + "e": escape, + "escape": escape, + "filesizeformat": do_filesizeformat, + "first": do_first, + "float": do_float, + "forceescape": do_forceescape, + "format": do_format, + "groupby": do_groupby, + "indent": do_indent, + "int": do_int, + "join": do_join, + "last": do_last, + "length": len, + "list": do_list, + "lower": do_lower, + "map": do_map, + "min": do_min, + "max": do_max, + "pprint": do_pprint, + "random": do_random, + "reject": do_reject, + "rejectattr": do_rejectattr, + "replace": do_replace, + "reverse": do_reverse, + "round": do_round, + "safe": do_mark_safe, + "select": do_select, + "selectattr": do_selectattr, + "slice": do_slice, + "sort": do_sort, + "string": soft_unicode, + "striptags": do_striptags, + "sum": do_sum, + "title": do_title, + "trim": do_trim, + "truncate": do_truncate, + "unique": do_unique, + "upper": do_upper, + "urlencode": do_urlencode, + "urlize": do_urlize, + "wordcount": do_wordcount, + "wordwrap": do_wordwrap, + "xmlattr": do_xmlattr, + "tojson": do_tojson, } diff --git a/pipenv/vendor/jinja2/idtracking.py b/pipenv/vendor/jinja2/idtracking.py index 491bfe0836..9a0d838017 100644 --- a/pipenv/vendor/jinja2/idtracking.py +++ b/pipenv/vendor/jinja2/idtracking.py @@ -1,11 +1,10 @@ -from jinja2.visitor import NodeVisitor -from jinja2._compat import iteritems +from ._compat import iteritems +from .visitor import NodeVisitor - -VAR_LOAD_PARAMETER = 'param' -VAR_LOAD_RESOLVE = 'resolve' -VAR_LOAD_ALIAS = 'alias' -VAR_LOAD_UNDEFINED = 'undefined' +VAR_LOAD_PARAMETER = "param" +VAR_LOAD_RESOLVE = "resolve" +VAR_LOAD_ALIAS = "alias" +VAR_LOAD_UNDEFINED = "undefined" def find_symbols(nodes, parent_symbols=None): @@ -23,7 +22,6 @@ def symbols_for_node(node, parent_symbols=None): class Symbols(object): - def __init__(self, parent=None, level=None): if level is None: if parent is None: @@ -41,7 +39,7 @@ def analyze_node(self, node, **kwargs): visitor.visit(node, **kwargs) def _define_ref(self, name, load=None): - ident = 'l_%d_%s' % (self.level, name) + ident = "l_%d_%s" % (self.level, name) self.refs[name] = ident if load is not None: self.loads[ident] = load @@ -62,8 +60,10 @@ def find_ref(self, name): def ref(self, name): rv = self.find_ref(name) if rv is None: - raise AssertionError('Tried to resolve a name to a reference that ' - 'was unknown to the frame (%r)' % name) + raise AssertionError( + "Tried to resolve a name to a reference that " + "was unknown to the frame (%r)" % name + ) return rv def copy(self): @@ -118,7 +118,7 @@ def branch_update(self, branch_symbols): if branch_count == len(branch_symbols): continue target = self.find_ref(name) - assert target is not None, 'should not happen' + assert target is not None, "should not happen" if self.parent is not None: outer_target = self.parent.find_ref(name) @@ -149,7 +149,6 @@ def dump_param_targets(self): class RootVisitor(NodeVisitor): - def __init__(self, symbols): self.sym_visitor = FrameSymbolVisitor(symbols) @@ -157,35 +156,39 @@ def _simple_visit(self, node, **kwargs): for child in node.iter_child_nodes(): self.sym_visitor.visit(child) - visit_Template = visit_Block = visit_Macro = visit_FilterBlock = \ - visit_Scope = visit_If = visit_ScopedEvalContextModifier = \ - _simple_visit + visit_Template = ( + visit_Block + ) = ( + visit_Macro + ) = ( + visit_FilterBlock + ) = visit_Scope = visit_If = visit_ScopedEvalContextModifier = _simple_visit def visit_AssignBlock(self, node, **kwargs): for child in node.body: self.sym_visitor.visit(child) def visit_CallBlock(self, node, **kwargs): - for child in node.iter_child_nodes(exclude=('call',)): + for child in node.iter_child_nodes(exclude=("call",)): self.sym_visitor.visit(child) def visit_OverlayScope(self, node, **kwargs): for child in node.body: self.sym_visitor.visit(child) - def visit_For(self, node, for_branch='body', **kwargs): - if for_branch == 'body': + def visit_For(self, node, for_branch="body", **kwargs): + if for_branch == "body": self.sym_visitor.visit(node.target, store_as_param=True) branch = node.body - elif for_branch == 'else': + elif for_branch == "else": branch = node.else_ - elif for_branch == 'test': + elif for_branch == "test": self.sym_visitor.visit(node.target, store_as_param=True) if node.test is not None: self.sym_visitor.visit(node.test) return else: - raise RuntimeError('Unknown for branch') + raise RuntimeError("Unknown for branch") for item in branch or (): self.sym_visitor.visit(item) @@ -196,8 +199,9 @@ def visit_With(self, node, **kwargs): self.sym_visitor.visit(child) def generic_visit(self, node, *args, **kwargs): - raise NotImplementedError('Cannot find symbols for %r' % - node.__class__.__name__) + raise NotImplementedError( + "Cannot find symbols for %r" % node.__class__.__name__ + ) class FrameSymbolVisitor(NodeVisitor): @@ -208,11 +212,11 @@ def __init__(self, symbols): def visit_Name(self, node, store_as_param=False, **kwargs): """All assignments to names go through this function.""" - if store_as_param or node.ctx == 'param': + if store_as_param or node.ctx == "param": self.symbols.declare_parameter(node.name) - elif node.ctx == 'store': + elif node.ctx == "store": self.symbols.store(node.name) - elif node.ctx == 'load': + elif node.ctx == "load": self.symbols.load(node.name) def visit_NSRef(self, node, **kwargs): diff --git a/pipenv/vendor/jinja2/lexer.py b/pipenv/vendor/jinja2/lexer.py index 6fd135dd5b..a2b44e926b 100644 --- a/pipenv/vendor/jinja2/lexer.py +++ b/pipenv/vendor/jinja2/lexer.py @@ -1,185 +1,194 @@ # -*- coding: utf-8 -*- -""" - jinja2.lexer - ~~~~~~~~~~~~ - - This module implements a Jinja / Python combination lexer. The - `Lexer` class provided by this module is used to do some preprocessing - for Jinja. - - On the one hand it filters out invalid operators like the bitshift - operators we don't allow in templates. On the other hand it separates - template code and python code in expressions. - - :copyright: (c) 2017 by the Jinja Team. - :license: BSD, see LICENSE for more details. +"""Implements a Jinja / Python combination lexer. The ``Lexer`` class +is used to do some preprocessing. It filters out invalid operators like +the bitshift operators we don't allow in templates. It separates +template code and python code in expressions. """ import re +from ast import literal_eval from collections import deque from operator import itemgetter -from jinja2._compat import implements_iterator, intern, iteritems, text_type -from jinja2.exceptions import TemplateSyntaxError -from jinja2.utils import LRUCache +from ._compat import implements_iterator +from ._compat import intern +from ._compat import iteritems +from ._compat import text_type +from .exceptions import TemplateSyntaxError +from .utils import LRUCache # cache for the lexers. Exists in order to be able to have multiple # environments with the same lexer _lexer_cache = LRUCache(50) # static regular expressions -whitespace_re = re.compile(r'\s+', re.U) -string_re = re.compile(r"('([^'\\]*(?:\\.[^'\\]*)*)'" - r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S) -integer_re = re.compile(r'\d+') +whitespace_re = re.compile(r"\s+", re.U) +newline_re = re.compile(r"(\r\n|\r|\n)") +string_re = re.compile( + r"('([^'\\]*(?:\\.[^'\\]*)*)'" r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S +) +integer_re = re.compile(r"(\d+_)*\d+") +float_re = re.compile( + r""" + (?<!\.) # doesn't start with a . + (\d+_)*\d+ # digits, possibly _ separated + ( + (\.(\d+_)*\d+)? # optional fractional part + e[+\-]?(\d+_)*\d+ # exponent part + | + \.(\d+_)*\d+ # required fractional part + ) + """, + re.IGNORECASE | re.VERBOSE, +) try: # check if this Python supports Unicode identifiers - compile('föö', '<unknown>', 'eval') + compile("föö", "<unknown>", "eval") except SyntaxError: - # no Unicode support, use ASCII identifiers - name_re = re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*') + # Python 2, no Unicode support, use ASCII identifiers + name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*") check_ident = False else: - # Unicode support, build a pattern to match valid characters, and set flag - # to use str.isidentifier to validate during lexing - from jinja2 import _identifier - name_re = re.compile(r'[\w{0}]+'.format(_identifier.pattern)) - check_ident = True - # remove the pattern from memory after building the regex - import sys - del sys.modules['jinja2._identifier'] - import jinja2 - del jinja2._identifier - del _identifier + # Unicode support, import generated re pattern and set flag to use + # str.isidentifier to validate during lexing. + from ._identifier import pattern as name_re -float_re = re.compile(r'(?<!\.)\d+\.\d+') -newline_re = re.compile(r'(\r\n|\r|\n)') + check_ident = True # internal the tokens and keep references to them -TOKEN_ADD = intern('add') -TOKEN_ASSIGN = intern('assign') -TOKEN_COLON = intern('colon') -TOKEN_COMMA = intern('comma') -TOKEN_DIV = intern('div') -TOKEN_DOT = intern('dot') -TOKEN_EQ = intern('eq') -TOKEN_FLOORDIV = intern('floordiv') -TOKEN_GT = intern('gt') -TOKEN_GTEQ = intern('gteq') -TOKEN_LBRACE = intern('lbrace') -TOKEN_LBRACKET = intern('lbracket') -TOKEN_LPAREN = intern('lparen') -TOKEN_LT = intern('lt') -TOKEN_LTEQ = intern('lteq') -TOKEN_MOD = intern('mod') -TOKEN_MUL = intern('mul') -TOKEN_NE = intern('ne') -TOKEN_PIPE = intern('pipe') -TOKEN_POW = intern('pow') -TOKEN_RBRACE = intern('rbrace') -TOKEN_RBRACKET = intern('rbracket') -TOKEN_RPAREN = intern('rparen') -TOKEN_SEMICOLON = intern('semicolon') -TOKEN_SUB = intern('sub') -TOKEN_TILDE = intern('tilde') -TOKEN_WHITESPACE = intern('whitespace') -TOKEN_FLOAT = intern('float') -TOKEN_INTEGER = intern('integer') -TOKEN_NAME = intern('name') -TOKEN_STRING = intern('string') -TOKEN_OPERATOR = intern('operator') -TOKEN_BLOCK_BEGIN = intern('block_begin') -TOKEN_BLOCK_END = intern('block_end') -TOKEN_VARIABLE_BEGIN = intern('variable_begin') -TOKEN_VARIABLE_END = intern('variable_end') -TOKEN_RAW_BEGIN = intern('raw_begin') -TOKEN_RAW_END = intern('raw_end') -TOKEN_COMMENT_BEGIN = intern('comment_begin') -TOKEN_COMMENT_END = intern('comment_end') -TOKEN_COMMENT = intern('comment') -TOKEN_LINESTATEMENT_BEGIN = intern('linestatement_begin') -TOKEN_LINESTATEMENT_END = intern('linestatement_end') -TOKEN_LINECOMMENT_BEGIN = intern('linecomment_begin') -TOKEN_LINECOMMENT_END = intern('linecomment_end') -TOKEN_LINECOMMENT = intern('linecomment') -TOKEN_DATA = intern('data') -TOKEN_INITIAL = intern('initial') -TOKEN_EOF = intern('eof') +TOKEN_ADD = intern("add") +TOKEN_ASSIGN = intern("assign") +TOKEN_COLON = intern("colon") +TOKEN_COMMA = intern("comma") +TOKEN_DIV = intern("div") +TOKEN_DOT = intern("dot") +TOKEN_EQ = intern("eq") +TOKEN_FLOORDIV = intern("floordiv") +TOKEN_GT = intern("gt") +TOKEN_GTEQ = intern("gteq") +TOKEN_LBRACE = intern("lbrace") +TOKEN_LBRACKET = intern("lbracket") +TOKEN_LPAREN = intern("lparen") +TOKEN_LT = intern("lt") +TOKEN_LTEQ = intern("lteq") +TOKEN_MOD = intern("mod") +TOKEN_MUL = intern("mul") +TOKEN_NE = intern("ne") +TOKEN_PIPE = intern("pipe") +TOKEN_POW = intern("pow") +TOKEN_RBRACE = intern("rbrace") +TOKEN_RBRACKET = intern("rbracket") +TOKEN_RPAREN = intern("rparen") +TOKEN_SEMICOLON = intern("semicolon") +TOKEN_SUB = intern("sub") +TOKEN_TILDE = intern("tilde") +TOKEN_WHITESPACE = intern("whitespace") +TOKEN_FLOAT = intern("float") +TOKEN_INTEGER = intern("integer") +TOKEN_NAME = intern("name") +TOKEN_STRING = intern("string") +TOKEN_OPERATOR = intern("operator") +TOKEN_BLOCK_BEGIN = intern("block_begin") +TOKEN_BLOCK_END = intern("block_end") +TOKEN_VARIABLE_BEGIN = intern("variable_begin") +TOKEN_VARIABLE_END = intern("variable_end") +TOKEN_RAW_BEGIN = intern("raw_begin") +TOKEN_RAW_END = intern("raw_end") +TOKEN_COMMENT_BEGIN = intern("comment_begin") +TOKEN_COMMENT_END = intern("comment_end") +TOKEN_COMMENT = intern("comment") +TOKEN_LINESTATEMENT_BEGIN = intern("linestatement_begin") +TOKEN_LINESTATEMENT_END = intern("linestatement_end") +TOKEN_LINECOMMENT_BEGIN = intern("linecomment_begin") +TOKEN_LINECOMMENT_END = intern("linecomment_end") +TOKEN_LINECOMMENT = intern("linecomment") +TOKEN_DATA = intern("data") +TOKEN_INITIAL = intern("initial") +TOKEN_EOF = intern("eof") # bind operators to token types operators = { - '+': TOKEN_ADD, - '-': TOKEN_SUB, - '/': TOKEN_DIV, - '//': TOKEN_FLOORDIV, - '*': TOKEN_MUL, - '%': TOKEN_MOD, - '**': TOKEN_POW, - '~': TOKEN_TILDE, - '[': TOKEN_LBRACKET, - ']': TOKEN_RBRACKET, - '(': TOKEN_LPAREN, - ')': TOKEN_RPAREN, - '{': TOKEN_LBRACE, - '}': TOKEN_RBRACE, - '==': TOKEN_EQ, - '!=': TOKEN_NE, - '>': TOKEN_GT, - '>=': TOKEN_GTEQ, - '<': TOKEN_LT, - '<=': TOKEN_LTEQ, - '=': TOKEN_ASSIGN, - '.': TOKEN_DOT, - ':': TOKEN_COLON, - '|': TOKEN_PIPE, - ',': TOKEN_COMMA, - ';': TOKEN_SEMICOLON + "+": TOKEN_ADD, + "-": TOKEN_SUB, + "/": TOKEN_DIV, + "//": TOKEN_FLOORDIV, + "*": TOKEN_MUL, + "%": TOKEN_MOD, + "**": TOKEN_POW, + "~": TOKEN_TILDE, + "[": TOKEN_LBRACKET, + "]": TOKEN_RBRACKET, + "(": TOKEN_LPAREN, + ")": TOKEN_RPAREN, + "{": TOKEN_LBRACE, + "}": TOKEN_RBRACE, + "==": TOKEN_EQ, + "!=": TOKEN_NE, + ">": TOKEN_GT, + ">=": TOKEN_GTEQ, + "<": TOKEN_LT, + "<=": TOKEN_LTEQ, + "=": TOKEN_ASSIGN, + ".": TOKEN_DOT, + ":": TOKEN_COLON, + "|": TOKEN_PIPE, + ",": TOKEN_COMMA, + ";": TOKEN_SEMICOLON, } reverse_operators = dict([(v, k) for k, v in iteritems(operators)]) -assert len(operators) == len(reverse_operators), 'operators dropped' -operator_re = re.compile('(%s)' % '|'.join(re.escape(x) for x in - sorted(operators, key=lambda x: -len(x)))) - -ignored_tokens = frozenset([TOKEN_COMMENT_BEGIN, TOKEN_COMMENT, - TOKEN_COMMENT_END, TOKEN_WHITESPACE, - TOKEN_LINECOMMENT_BEGIN, TOKEN_LINECOMMENT_END, - TOKEN_LINECOMMENT]) -ignore_if_empty = frozenset([TOKEN_WHITESPACE, TOKEN_DATA, - TOKEN_COMMENT, TOKEN_LINECOMMENT]) +assert len(operators) == len(reverse_operators), "operators dropped" +operator_re = re.compile( + "(%s)" % "|".join(re.escape(x) for x in sorted(operators, key=lambda x: -len(x))) +) + +ignored_tokens = frozenset( + [ + TOKEN_COMMENT_BEGIN, + TOKEN_COMMENT, + TOKEN_COMMENT_END, + TOKEN_WHITESPACE, + TOKEN_LINECOMMENT_BEGIN, + TOKEN_LINECOMMENT_END, + TOKEN_LINECOMMENT, + ] +) +ignore_if_empty = frozenset( + [TOKEN_WHITESPACE, TOKEN_DATA, TOKEN_COMMENT, TOKEN_LINECOMMENT] +) def _describe_token_type(token_type): if token_type in reverse_operators: return reverse_operators[token_type] return { - TOKEN_COMMENT_BEGIN: 'begin of comment', - TOKEN_COMMENT_END: 'end of comment', - TOKEN_COMMENT: 'comment', - TOKEN_LINECOMMENT: 'comment', - TOKEN_BLOCK_BEGIN: 'begin of statement block', - TOKEN_BLOCK_END: 'end of statement block', - TOKEN_VARIABLE_BEGIN: 'begin of print statement', - TOKEN_VARIABLE_END: 'end of print statement', - TOKEN_LINESTATEMENT_BEGIN: 'begin of line statement', - TOKEN_LINESTATEMENT_END: 'end of line statement', - TOKEN_DATA: 'template data / text', - TOKEN_EOF: 'end of template' + TOKEN_COMMENT_BEGIN: "begin of comment", + TOKEN_COMMENT_END: "end of comment", + TOKEN_COMMENT: "comment", + TOKEN_LINECOMMENT: "comment", + TOKEN_BLOCK_BEGIN: "begin of statement block", + TOKEN_BLOCK_END: "end of statement block", + TOKEN_VARIABLE_BEGIN: "begin of print statement", + TOKEN_VARIABLE_END: "end of print statement", + TOKEN_LINESTATEMENT_BEGIN: "begin of line statement", + TOKEN_LINESTATEMENT_END: "end of line statement", + TOKEN_DATA: "template data / text", + TOKEN_EOF: "end of template", }.get(token_type, token_type) def describe_token(token): """Returns a description of the token.""" - if token.type == 'name': + if token.type == TOKEN_NAME: return token.value return _describe_token_type(token.type) def describe_token_expr(expr): """Like `describe_token` but for token expressions.""" - if ':' in expr: - type, value = expr.split(':', 1) - if type == 'name': + if ":" in expr: + type, value = expr.split(":", 1) + if type == TOKEN_NAME: return value else: type = expr @@ -197,21 +206,39 @@ def compile_rules(environment): """Compiles all the rules from the environment into a list of rules.""" e = re.escape rules = [ - (len(environment.comment_start_string), 'comment', - e(environment.comment_start_string)), - (len(environment.block_start_string), 'block', - e(environment.block_start_string)), - (len(environment.variable_start_string), 'variable', - e(environment.variable_start_string)) + ( + len(environment.comment_start_string), + TOKEN_COMMENT_BEGIN, + e(environment.comment_start_string), + ), + ( + len(environment.block_start_string), + TOKEN_BLOCK_BEGIN, + e(environment.block_start_string), + ), + ( + len(environment.variable_start_string), + TOKEN_VARIABLE_BEGIN, + e(environment.variable_start_string), + ), ] if environment.line_statement_prefix is not None: - rules.append((len(environment.line_statement_prefix), 'linestatement', - r'^[ \t\v]*' + e(environment.line_statement_prefix))) + rules.append( + ( + len(environment.line_statement_prefix), + TOKEN_LINESTATEMENT_BEGIN, + r"^[ \t\v]*" + e(environment.line_statement_prefix), + ) + ) if environment.line_comment_prefix is not None: - rules.append((len(environment.line_comment_prefix), 'linecomment', - r'(?:^|(?<=\S))[^\S\r\n]*' + - e(environment.line_comment_prefix))) + rules.append( + ( + len(environment.line_comment_prefix), + TOKEN_LINECOMMENT_BEGIN, + r"(?:^|(?<=\S))[^\S\r\n]*" + e(environment.line_comment_prefix), + ) + ) return [x[1:] for x in sorted(rules, reverse=True)] @@ -231,6 +258,7 @@ def __call__(self, lineno, filename): class Token(tuple): """Token class.""" + __slots__ = () lineno, type, value = (property(itemgetter(x)) for x in range(3)) @@ -240,7 +268,7 @@ def __new__(cls, lineno, type, value): def __str__(self): if self.type in reverse_operators: return reverse_operators[self.type] - elif self.type == 'name': + elif self.type == "name": return self.value return self.type @@ -253,8 +281,8 @@ def test(self, expr): # passed an iterable of not interned strings. if self.type == expr: return True - elif ':' in expr: - return expr.split(':', 1) == [self.type, self.value] + elif ":" in expr: + return expr.split(":", 1) == [self.type, self.value] return False def test_any(self, *iterable): @@ -265,11 +293,7 @@ def test_any(self, *iterable): return False def __repr__(self): - return 'Token(%r, %r, %r)' % ( - self.lineno, - self.type, - self.value - ) + return "Token(%r, %r, %r)" % (self.lineno, self.type, self.value) @implements_iterator @@ -306,7 +330,7 @@ def __init__(self, generator, name, filename): self.name = name self.filename = filename self.closed = False - self.current = Token(1, TOKEN_INITIAL, '') + self.current = Token(1, TOKEN_INITIAL, "") next(self) def __iter__(self): @@ -314,9 +338,13 @@ def __iter__(self): def __bool__(self): return bool(self._pushed) or self.current.type is not TOKEN_EOF + __nonzero__ = __bool__ # py2 - eos = property(lambda x: not x, doc="Are we at the end of the stream?") + @property + def eos(self): + """Are we at the end of the stream?""" + return not self def push(self, token): """Push a token back to the stream.""" @@ -332,7 +360,7 @@ def look(self): def skip(self, n=1): """Got n tokens ahead.""" - for x in range(n): + for _ in range(n): next(self) def next_if(self, expr): @@ -363,7 +391,7 @@ def __next__(self): def close(self): """Close the stream.""" - self.current = Token(self.current.lineno, TOKEN_EOF, '') + self.current = Token(self.current.lineno, TOKEN_EOF, "") self._iter = None self.closed = True @@ -374,14 +402,18 @@ def expect(self, expr): if not self.current.test(expr): expr = describe_token_expr(expr) if self.current.type is TOKEN_EOF: - raise TemplateSyntaxError('unexpected end of template, ' - 'expected %r.' % expr, - self.current.lineno, - self.name, self.filename) - raise TemplateSyntaxError("expected token %r, got %r" % - (expr, describe_token(self.current)), - self.current.lineno, - self.name, self.filename) + raise TemplateSyntaxError( + "unexpected end of template, expected %r." % expr, + self.current.lineno, + self.name, + self.filename, + ) + raise TemplateSyntaxError( + "expected token %r, got %r" % (expr, describe_token(self.current)), + self.current.lineno, + self.name, + self.filename, + ) try: return self.current finally: @@ -390,18 +422,20 @@ def expect(self, expr): def get_lexer(environment): """Return a lexer which is probably cached.""" - key = (environment.block_start_string, - environment.block_end_string, - environment.variable_start_string, - environment.variable_end_string, - environment.comment_start_string, - environment.comment_end_string, - environment.line_statement_prefix, - environment.line_comment_prefix, - environment.trim_blocks, - environment.lstrip_blocks, - environment.newline_sequence, - environment.keep_trailing_newline) + key = ( + environment.block_start_string, + environment.block_end_string, + environment.variable_start_string, + environment.variable_end_string, + environment.comment_start_string, + environment.comment_end_string, + environment.line_statement_prefix, + environment.line_comment_prefix, + environment.trim_blocks, + environment.lstrip_blocks, + environment.newline_sequence, + environment.keep_trailing_newline, + ) lexer = _lexer_cache.get(key) if lexer is None: lexer = Lexer(environment) @@ -409,6 +443,19 @@ def get_lexer(environment): return lexer +class OptionalLStrip(tuple): + """A special tuple for marking a point in the state that can have + lstrip applied. + """ + + __slots__ = () + + # Even though it looks like a no-op, creating instances fails + # without this. + def __new__(cls, *members, **kwargs): + return super(OptionalLStrip, cls).__new__(cls, members) + + class Lexer(object): """Class that implements a lexer for a given environment. Automatically created by the environment class, usually you don't have to do that. @@ -419,9 +466,11 @@ class Lexer(object): def __init__(self, environment): # shortcuts - c = lambda x: re.compile(x, re.M | re.S) e = re.escape + def c(x): + return re.compile(x, re.M | re.S) + # lexing rules for tags tag_rules = [ (whitespace_re, TOKEN_WHITESPACE, None), @@ -429,7 +478,7 @@ def __init__(self, environment): (integer_re, TOKEN_INTEGER, None), (name_re, TOKEN_NAME, None), (string_re, TOKEN_STRING, None), - (operator_re, TOKEN_OPERATOR, None) + (operator_re, TOKEN_OPERATOR, None), ] # assemble the root lexing rule. because "|" is ungreedy @@ -441,108 +490,120 @@ def __init__(self, environment): root_tag_rules = compile_rules(environment) # block suffix if trimming is enabled - block_suffix_re = environment.trim_blocks and '\\n?' or '' - - # strip leading spaces if lstrip_blocks is enabled - prefix_re = {} - if environment.lstrip_blocks: - # use '{%+' to manually disable lstrip_blocks behavior - no_lstrip_re = e('+') - # detect overlap between block and variable or comment strings - block_diff = c(r'^%s(.*)' % e(environment.block_start_string)) - # make sure we don't mistake a block for a variable or a comment - m = block_diff.match(environment.comment_start_string) - no_lstrip_re += m and r'|%s' % e(m.group(1)) or '' - m = block_diff.match(environment.variable_start_string) - no_lstrip_re += m and r'|%s' % e(m.group(1)) or '' - - # detect overlap between comment and variable strings - comment_diff = c(r'^%s(.*)' % e(environment.comment_start_string)) - m = comment_diff.match(environment.variable_start_string) - no_variable_re = m and r'(?!%s)' % e(m.group(1)) or '' - - lstrip_re = r'^[ \t]*' - block_prefix_re = r'%s%s(?!%s)|%s\+?' % ( - lstrip_re, - e(environment.block_start_string), - no_lstrip_re, - e(environment.block_start_string), - ) - comment_prefix_re = r'%s%s%s|%s\+?' % ( - lstrip_re, - e(environment.comment_start_string), - no_variable_re, - e(environment.comment_start_string), - ) - prefix_re['block'] = block_prefix_re - prefix_re['comment'] = comment_prefix_re - else: - block_prefix_re = '%s' % e(environment.block_start_string) + block_suffix_re = environment.trim_blocks and "\\n?" or "" + + # If lstrip is enabled, it should not be applied if there is any + # non-whitespace between the newline and block. + self.lstrip_unless_re = c(r"[^ \t]") if environment.lstrip_blocks else None self.newline_sequence = environment.newline_sequence self.keep_trailing_newline = environment.keep_trailing_newline # global lexing rules self.rules = { - 'root': [ + "root": [ # directives - (c('(.*?)(?:%s)' % '|'.join( - [r'(?P<raw_begin>(?:\s*%s\-|%s)\s*raw\s*(?:\-%s\s*|%s))' % ( - e(environment.block_start_string), - block_prefix_re, - e(environment.block_end_string), - e(environment.block_end_string) - )] + [ - r'(?P<%s_begin>\s*%s\-|%s)' % (n, r, prefix_re.get(n,r)) - for n, r in root_tag_rules - ])), (TOKEN_DATA, '#bygroup'), '#bygroup'), + ( + c( + "(.*?)(?:%s)" + % "|".join( + [ + r"(?P<raw_begin>%s(\-|\+|)\s*raw\s*(?:\-%s\s*|%s))" + % ( + e(environment.block_start_string), + e(environment.block_end_string), + e(environment.block_end_string), + ) + ] + + [ + r"(?P<%s>%s(\-|\+|))" % (n, r) + for n, r in root_tag_rules + ] + ) + ), + OptionalLStrip(TOKEN_DATA, "#bygroup"), + "#bygroup", + ), # data - (c('.+'), TOKEN_DATA, None) + (c(".+"), TOKEN_DATA, None), ], # comments TOKEN_COMMENT_BEGIN: [ - (c(r'(.*?)((?:\-%s\s*|%s)%s)' % ( - e(environment.comment_end_string), - e(environment.comment_end_string), - block_suffix_re - )), (TOKEN_COMMENT, TOKEN_COMMENT_END), '#pop'), - (c('(.)'), (Failure('Missing end of comment tag'),), None) + ( + c( + r"(.*?)((?:\-%s\s*|%s)%s)" + % ( + e(environment.comment_end_string), + e(environment.comment_end_string), + block_suffix_re, + ) + ), + (TOKEN_COMMENT, TOKEN_COMMENT_END), + "#pop", + ), + (c("(.)"), (Failure("Missing end of comment tag"),), None), ], # blocks TOKEN_BLOCK_BEGIN: [ - (c(r'(?:\-%s\s*|%s)%s' % ( - e(environment.block_end_string), - e(environment.block_end_string), - block_suffix_re - )), TOKEN_BLOCK_END, '#pop'), - ] + tag_rules, + ( + c( + r"(?:\-%s\s*|%s)%s" + % ( + e(environment.block_end_string), + e(environment.block_end_string), + block_suffix_re, + ) + ), + TOKEN_BLOCK_END, + "#pop", + ), + ] + + tag_rules, # variables TOKEN_VARIABLE_BEGIN: [ - (c(r'\-%s\s*|%s' % ( - e(environment.variable_end_string), - e(environment.variable_end_string) - )), TOKEN_VARIABLE_END, '#pop') - ] + tag_rules, + ( + c( + r"\-%s\s*|%s" + % ( + e(environment.variable_end_string), + e(environment.variable_end_string), + ) + ), + TOKEN_VARIABLE_END, + "#pop", + ) + ] + + tag_rules, # raw block TOKEN_RAW_BEGIN: [ - (c(r'(.*?)((?:\s*%s\-|%s)\s*endraw\s*(?:\-%s\s*|%s%s))' % ( - e(environment.block_start_string), - block_prefix_re, - e(environment.block_end_string), - e(environment.block_end_string), - block_suffix_re - )), (TOKEN_DATA, TOKEN_RAW_END), '#pop'), - (c('(.)'), (Failure('Missing end of raw directive'),), None) + ( + c( + r"(.*?)((?:%s(\-|\+|))\s*endraw\s*(?:\-%s\s*|%s%s))" + % ( + e(environment.block_start_string), + e(environment.block_end_string), + e(environment.block_end_string), + block_suffix_re, + ) + ), + OptionalLStrip(TOKEN_DATA, TOKEN_RAW_END), + "#pop", + ), + (c("(.)"), (Failure("Missing end of raw directive"),), None), ], # line statements TOKEN_LINESTATEMENT_BEGIN: [ - (c(r'\s*(\n|$)'), TOKEN_LINESTATEMENT_END, '#pop') - ] + tag_rules, + (c(r"\s*(\n|$)"), TOKEN_LINESTATEMENT_END, "#pop") + ] + + tag_rules, # line comments TOKEN_LINECOMMENT_BEGIN: [ - (c(r'(.*?)()(?=\n|$)'), (TOKEN_LINECOMMENT, - TOKEN_LINECOMMENT_END), '#pop') - ] + ( + c(r"(.*?)()(?=\n|$)"), + (TOKEN_LINECOMMENT, TOKEN_LINECOMMENT_END), + "#pop", + ) + ], } def _normalize_newlines(self, value): @@ -550,8 +611,7 @@ def _normalize_newlines(self, value): return newline_re.sub(self.newline_sequence, value) def tokenize(self, source, name=None, filename=None, state=None): - """Calls tokeniter + tokenize and wraps it in a token stream. - """ + """Calls tokeniter + tokenize and wraps it in a token stream.""" stream = self.tokeniter(source, name, filename, state) return TokenStream(self.wrap(stream, name, filename), name, filename) @@ -562,37 +622,40 @@ def wrap(self, stream, name=None, filename=None): for lineno, token, value in stream: if token in ignored_tokens: continue - elif token == 'linestatement_begin': - token = 'block_begin' - elif token == 'linestatement_end': - token = 'block_end' + elif token == TOKEN_LINESTATEMENT_BEGIN: + token = TOKEN_BLOCK_BEGIN + elif token == TOKEN_LINESTATEMENT_END: + token = TOKEN_BLOCK_END # we are not interested in those tokens in the parser - elif token in ('raw_begin', 'raw_end'): + elif token in (TOKEN_RAW_BEGIN, TOKEN_RAW_END): continue - elif token == 'data': + elif token == TOKEN_DATA: value = self._normalize_newlines(value) - elif token == 'keyword': + elif token == "keyword": token = value - elif token == 'name': + elif token == TOKEN_NAME: value = str(value) if check_ident and not value.isidentifier(): raise TemplateSyntaxError( - 'Invalid character in identifier', - lineno, name, filename) - elif token == 'string': + "Invalid character in identifier", lineno, name, filename + ) + elif token == TOKEN_STRING: # try to unescape string try: - value = self._normalize_newlines(value[1:-1]) \ - .encode('ascii', 'backslashreplace') \ - .decode('unicode-escape') + value = ( + self._normalize_newlines(value[1:-1]) + .encode("ascii", "backslashreplace") + .decode("unicode-escape") + ) except Exception as e: - msg = str(e).split(':')[-1].strip() + msg = str(e).split(":")[-1].strip() raise TemplateSyntaxError(msg, lineno, name, filename) - elif token == 'integer': - value = int(value) - elif token == 'float': - value = float(value) - elif token == 'operator': + elif token == TOKEN_INTEGER: + value = int(value.replace("_", "")) + elif token == TOKEN_FLOAT: + # remove all "_" first to support more Python versions + value = literal_eval(value.replace("_", "")) + elif token == TOKEN_OPERATOR: token = operators[value] yield Token(lineno, token, value) @@ -603,23 +666,21 @@ def tokeniter(self, source, name, filename=None, state=None): source = text_type(source) lines = source.splitlines() if self.keep_trailing_newline and source: - for newline in ('\r\n', '\r', '\n'): + for newline in ("\r\n", "\r", "\n"): if source.endswith(newline): - lines.append('') + lines.append("") break - source = '\n'.join(lines) + source = "\n".join(lines) pos = 0 lineno = 1 - stack = ['root'] - if state is not None and state != 'root': - assert state in ('variable', 'block'), 'invalid state' - stack.append(state + '_begin') - else: - state = 'root' + stack = ["root"] + if state is not None and state != "root": + assert state in ("variable", "block"), "invalid state" + stack.append(state + "_begin") statetokens = self.rules[stack[-1]] source_length = len(source) - balancing_stack = [] + lstrip_unless_re = self.lstrip_unless_re while 1: # tokenizer loop @@ -633,13 +694,46 @@ def tokeniter(self, source, name, filename=None, state=None): # are balanced. continue parsing with the lower rule which # is the operator rule. do this only if the end tags look # like operators - if balancing_stack and \ - tokens in ('variable_end', 'block_end', - 'linestatement_end'): + if balancing_stack and tokens in ( + TOKEN_VARIABLE_END, + TOKEN_BLOCK_END, + TOKEN_LINESTATEMENT_END, + ): continue # tuples support more options if isinstance(tokens, tuple): + groups = m.groups() + + if isinstance(tokens, OptionalLStrip): + # Rule supports lstrip. Match will look like + # text, block type, whitespace control, type, control, ... + text = groups[0] + + # Skipping the text and first type, every other group is the + # whitespace control for each type. One of the groups will be + # -, +, or empty string instead of None. + strip_sign = next(g for g in groups[2::2] if g is not None) + + if strip_sign == "-": + # Strip all whitespace between the text and the tag. + groups = (text.rstrip(),) + groups[1:] + elif ( + # Not marked for preserving whitespace. + strip_sign != "+" + # lstrip is enabled. + and lstrip_unless_re is not None + # Not a variable expression. + and not m.groupdict().get(TOKEN_VARIABLE_BEGIN) + ): + # The start of text between the last newline and the tag. + l_pos = text.rfind("\n") + 1 + + # If there's only whitespace between the newline and the + # tag, strip it. + if not lstrip_unless_re.search(text, l_pos): + groups = (text[:l_pos],) + groups[1:] + for idx, token in enumerate(tokens): # failure group if token.__class__ is Failure: @@ -647,51 +741,54 @@ def tokeniter(self, source, name, filename=None, state=None): # bygroup is a bit more complex, in that case we # yield for the current token the first named # group that matched - elif token == '#bygroup': + elif token == "#bygroup": for key, value in iteritems(m.groupdict()): if value is not None: yield lineno, key, value - lineno += value.count('\n') + lineno += value.count("\n") break else: - raise RuntimeError('%r wanted to resolve ' - 'the token dynamically' - ' but no group matched' - % regex) + raise RuntimeError( + "%r wanted to resolve " + "the token dynamically" + " but no group matched" % regex + ) # normal group else: - data = m.group(idx + 1) + data = groups[idx] if data or token not in ignore_if_empty: yield lineno, token, data - lineno += data.count('\n') + lineno += data.count("\n") # strings as token just are yielded as it. else: data = m.group() # update brace/parentheses balance - if tokens == 'operator': - if data == '{': - balancing_stack.append('}') - elif data == '(': - balancing_stack.append(')') - elif data == '[': - balancing_stack.append(']') - elif data in ('}', ')', ']'): + if tokens == TOKEN_OPERATOR: + if data == "{": + balancing_stack.append("}") + elif data == "(": + balancing_stack.append(")") + elif data == "[": + balancing_stack.append("]") + elif data in ("}", ")", "]"): if not balancing_stack: - raise TemplateSyntaxError('unexpected \'%s\'' % - data, lineno, name, - filename) + raise TemplateSyntaxError( + "unexpected '%s'" % data, lineno, name, filename + ) expected_op = balancing_stack.pop() if expected_op != data: - raise TemplateSyntaxError('unexpected \'%s\', ' - 'expected \'%s\'' % - (data, expected_op), - lineno, name, - filename) + raise TemplateSyntaxError( + "unexpected '%s', " + "expected '%s'" % (data, expected_op), + lineno, + name, + filename, + ) # yield items if data or tokens not in ignore_if_empty: yield lineno, tokens, data - lineno += data.count('\n') + lineno += data.count("\n") # fetch new position into new variable so that we can check # if there is a internal parsing error which would result @@ -701,19 +798,20 @@ def tokeniter(self, source, name, filename=None, state=None): # handle state changes if new_state is not None: # remove the uppermost state - if new_state == '#pop': + if new_state == "#pop": stack.pop() # resolve the new state by group checking - elif new_state == '#bygroup': + elif new_state == "#bygroup": for key, value in iteritems(m.groupdict()): if value is not None: stack.append(key) break else: - raise RuntimeError('%r wanted to resolve the ' - 'new state dynamically but' - ' no group matched' % - regex) + raise RuntimeError( + "%r wanted to resolve the " + "new state dynamically but" + " no group matched" % regex + ) # direct state name given else: stack.append(new_state) @@ -722,8 +820,9 @@ def tokeniter(self, source, name, filename=None, state=None): # this means a loop without break condition, avoid that and # raise error elif pos2 == pos: - raise RuntimeError('%r yielded empty string without ' - 'stack change' % regex) + raise RuntimeError( + "%r yielded empty string without stack change" % regex + ) # publish new function and start again pos = pos2 break @@ -734,6 +833,9 @@ def tokeniter(self, source, name, filename=None, state=None): if pos >= source_length: return # something went wrong - raise TemplateSyntaxError('unexpected char %r at %d' % - (source[pos], pos), lineno, - name, filename) + raise TemplateSyntaxError( + "unexpected char %r at %d" % (source[pos], pos), + lineno, + name, + filename, + ) diff --git a/pipenv/vendor/jinja2/loaders.py b/pipenv/vendor/jinja2/loaders.py index 4c79793760..ce5537a03c 100644 --- a/pipenv/vendor/jinja2/loaders.py +++ b/pipenv/vendor/jinja2/loaders.py @@ -1,22 +1,23 @@ # -*- coding: utf-8 -*- -""" - jinja2.loaders - ~~~~~~~~~~~~~~ - - Jinja loader classes. - - :copyright: (c) 2017 by the Jinja Team. - :license: BSD, see LICENSE for more details. +"""API and implementations for loading templates from different data +sources. """ import os +import pkgutil import sys import weakref -from types import ModuleType -from os import path from hashlib import sha1 -from jinja2.exceptions import TemplateNotFound -from jinja2.utils import open_if_exists, internalcode -from jinja2._compat import string_types, iteritems +from importlib import import_module +from os import path +from types import ModuleType + +from ._compat import abc +from ._compat import fspath +from ._compat import iteritems +from ._compat import string_types +from .exceptions import TemplateNotFound +from .utils import internalcode +from .utils import open_if_exists def split_template_path(template): @@ -24,12 +25,14 @@ def split_template_path(template): '..' in the path it will raise a `TemplateNotFound` error. """ pieces = [] - for piece in template.split('/'): - if path.sep in piece \ - or (path.altsep and path.altsep in piece) or \ - piece == path.pardir: + for piece in template.split("/"): + if ( + path.sep in piece + or (path.altsep and path.altsep in piece) + or piece == path.pardir + ): raise TemplateNotFound(template) - elif piece and piece != '.': + elif piece and piece != ".": pieces.append(piece) return pieces @@ -86,15 +89,16 @@ def get_source(self, environment, template): the template will be reloaded. """ if not self.has_source_access: - raise RuntimeError('%s cannot provide access to the source' % - self.__class__.__name__) + raise RuntimeError( + "%s cannot provide access to the source" % self.__class__.__name__ + ) raise TemplateNotFound(template) def list_templates(self): """Iterates over all templates. If the loader does not support that it should raise a :exc:`TypeError` which is the default behavior. """ - raise TypeError('this loader cannot iterate over all templates') + raise TypeError("this loader cannot iterate over all templates") @internalcode def load(self, environment, name, globals=None): @@ -131,8 +135,9 @@ def load(self, environment, name, globals=None): bucket.code = code bcc.set_bucket(bucket) - return environment.template_class.from_code(environment, code, - globals, uptodate) + return environment.template_class.from_code( + environment, code, globals, uptodate + ) class FileSystemLoader(BaseLoader): @@ -153,14 +158,20 @@ class FileSystemLoader(BaseLoader): >>> loader = FileSystemLoader('/path/to/templates', followlinks=True) - .. versionchanged:: 2.8+ - The *followlinks* parameter was added. + .. versionchanged:: 2.8 + The ``followlinks`` parameter was added. """ - def __init__(self, searchpath, encoding='utf-8', followlinks=False): - if isinstance(searchpath, string_types): + def __init__(self, searchpath, encoding="utf-8", followlinks=False): + if not isinstance(searchpath, abc.Iterable) or isinstance( + searchpath, string_types + ): searchpath = [searchpath] - self.searchpath = list(searchpath) + + # In Python 3.5, os.path.join doesn't support Path. This can be + # simplified to list(searchpath) when Python 3.5 is dropped. + self.searchpath = [fspath(p) for p in searchpath] + self.encoding = encoding self.followlinks = followlinks @@ -183,6 +194,7 @@ def uptodate(): return path.getmtime(filename) == mtime except OSError: return False + return contents, filename, uptodate raise TemplateNotFound(template) @@ -190,12 +202,14 @@ def list_templates(self): found = set() for searchpath in self.searchpath: walk_dir = os.walk(searchpath, followlinks=self.followlinks) - for dirpath, dirnames, filenames in walk_dir: + for dirpath, _, filenames in walk_dir: for filename in filenames: - template = os.path.join(dirpath, filename) \ - [len(searchpath):].strip(os.path.sep) \ - .replace(os.path.sep, '/') - if template[:2] == './': + template = ( + os.path.join(dirpath, filename)[len(searchpath) :] + .strip(os.path.sep) + .replace(os.path.sep, "/") + ) + if template[:2] == "./": template = template[2:] if template not in found: found.add(template) @@ -203,66 +217,141 @@ def list_templates(self): class PackageLoader(BaseLoader): - """Load templates from python eggs or packages. It is constructed with - the name of the python package and the path to the templates in that - package:: + """Load templates from a directory in a Python package. - loader = PackageLoader('mypackage', 'views') + :param package_name: Import name of the package that contains the + template directory. + :param package_path: Directory within the imported package that + contains the templates. + :param encoding: Encoding of template files. - If the package path is not given, ``'templates'`` is assumed. + The following example looks up templates in the ``pages`` directory + within the ``project.ui`` package. - Per default the template encoding is ``'utf-8'`` which can be changed - by setting the `encoding` parameter to something else. Due to the nature - of eggs it's only possible to reload templates if the package was loaded - from the file system and not a zip file. + .. code-block:: python + + loader = PackageLoader("project.ui", "pages") + + Only packages installed as directories (standard pip behavior) or + zip/egg files (less common) are supported. The Python API for + introspecting data in packages is too limited to support other + installation methods the way this loader requires. + + There is limited support for :pep:`420` namespace packages. The + template directory is assumed to only be in one namespace + contributor. Zip files contributing to a namespace are not + supported. + + .. versionchanged:: 2.11.0 + No longer uses ``setuptools`` as a dependency. + + .. versionchanged:: 2.11.0 + Limited PEP 420 namespace package support. """ - def __init__(self, package_name, package_path='templates', - encoding='utf-8'): - from pkg_resources import DefaultProvider, ResourceManager, \ - get_provider - provider = get_provider(package_name) - self.encoding = encoding - self.manager = ResourceManager() - self.filesystem_bound = isinstance(provider, DefaultProvider) - self.provider = provider + def __init__(self, package_name, package_path="templates", encoding="utf-8"): + if package_path == os.path.curdir: + package_path = "" + elif package_path[:2] == os.path.curdir + os.path.sep: + package_path = package_path[2:] + + package_path = os.path.normpath(package_path).rstrip(os.path.sep) self.package_path = package_path + self.package_name = package_name + self.encoding = encoding + + # Make sure the package exists. This also makes namespace + # packages work, otherwise get_loader returns None. + import_module(package_name) + self._loader = loader = pkgutil.get_loader(package_name) + + # Zip loader's archive attribute points at the zip. + self._archive = getattr(loader, "archive", None) + self._template_root = None + + if hasattr(loader, "get_filename"): + # A standard directory package, or a zip package. + self._template_root = os.path.join( + os.path.dirname(loader.get_filename(package_name)), package_path + ) + elif hasattr(loader, "_path"): + # A namespace package, limited support. Find the first + # contributor with the template directory. + for root in loader._path: + root = os.path.join(root, package_path) + + if os.path.isdir(root): + self._template_root = root + break + + if self._template_root is None: + raise ValueError( + "The %r package was not installed in a way that" + " PackageLoader understands." % package_name + ) def get_source(self, environment, template): - pieces = split_template_path(template) - p = '/'.join((self.package_path,) + tuple(pieces)) - if not self.provider.has_resource(p): - raise TemplateNotFound(template) + p = os.path.join(self._template_root, *split_template_path(template)) - filename = uptodate = None - if self.filesystem_bound: - filename = self.provider.get_resource_filename(self.manager, p) - mtime = path.getmtime(filename) - def uptodate(): - try: - return path.getmtime(filename) == mtime - except OSError: - return False + if self._archive is None: + # Package is a directory. + if not os.path.isfile(p): + raise TemplateNotFound(template) + + with open(p, "rb") as f: + source = f.read() + + mtime = os.path.getmtime(p) + + def up_to_date(): + return os.path.isfile(p) and os.path.getmtime(p) == mtime - source = self.provider.get_resource_string(self.manager, p) - return source.decode(self.encoding), filename, uptodate + else: + # Package is a zip file. + try: + source = self._loader.get_data(p) + except OSError: + raise TemplateNotFound(template) + + # Could use the zip's mtime for all template mtimes, but + # would need to safely reload the module if it's out of + # date, so just report it as always current. + up_to_date = None + + return source.decode(self.encoding), p, up_to_date def list_templates(self): - path = self.package_path - if path[:2] == './': - path = path[2:] - elif path == '.': - path = '' - offset = len(path) results = [] - def _walk(path): - for filename in self.provider.resource_listdir(path): - fullname = path + '/' + filename - if self.provider.resource_isdir(fullname): - _walk(fullname) - else: - results.append(fullname[offset:].lstrip('/')) - _walk(path) + + if self._archive is None: + # Package is a directory. + offset = len(self._template_root) + + for dirpath, _, filenames in os.walk(self._template_root): + dirpath = dirpath[offset:].lstrip(os.path.sep) + results.extend( + os.path.join(dirpath, name).replace(os.path.sep, "/") + for name in filenames + ) + else: + if not hasattr(self._loader, "_files"): + raise TypeError( + "This zip import does not have the required" + " metadata to list templates." + ) + + # Package is a zip file. + prefix = ( + self._template_root[len(self._archive) :].lstrip(os.path.sep) + + os.path.sep + ) + offset = len(prefix) + + for name in self._loader._files.keys(): + # Find names under the templates directory that aren't directories. + if name.startswith(prefix) and name[-1] != os.path.sep: + results.append(name[offset:].replace(os.path.sep, "/")) + results.sort() return results @@ -334,7 +423,7 @@ class PrefixLoader(BaseLoader): by loading ``'app2/index.html'`` the file from the second. """ - def __init__(self, mapping, delimiter='/'): + def __init__(self, mapping, delimiter="/"): self.mapping = mapping self.delimiter = delimiter @@ -434,19 +523,20 @@ class ModuleLoader(BaseLoader): has_source_access = False def __init__(self, path): - package_name = '_jinja2_module_templates_%x' % id(self) + package_name = "_jinja2_module_templates_%x" % id(self) # create a fake module that looks for the templates in the # path given. mod = _TemplateModule(package_name) - if isinstance(path, string_types): + + if not isinstance(path, abc.Iterable) or isinstance(path, string_types): path = [path] - else: - path = list(path) - mod.__path__ = path - sys.modules[package_name] = weakref.proxy(mod, - lambda x: sys.modules.pop(package_name, None)) + mod.__path__ = [fspath(p) for p in path] + + sys.modules[package_name] = weakref.proxy( + mod, lambda x: sys.modules.pop(package_name, None) + ) # the only strong reference, the sys.modules entry is weak # so that the garbage collector can remove it once the @@ -456,20 +546,20 @@ def __init__(self, path): @staticmethod def get_template_key(name): - return 'tmpl_' + sha1(name.encode('utf-8')).hexdigest() + return "tmpl_" + sha1(name.encode("utf-8")).hexdigest() @staticmethod def get_module_filename(name): - return ModuleLoader.get_template_key(name) + '.py' + return ModuleLoader.get_template_key(name) + ".py" @internalcode def load(self, environment, name, globals=None): key = self.get_template_key(name) - module = '%s.%s' % (self.package_name, key) + module = "%s.%s" % (self.package_name, key) mod = getattr(self.module, module, None) if mod is None: try: - mod = __import__(module, None, None, ['root']) + mod = __import__(module, None, None, ["root"]) except ImportError: raise TemplateNotFound(name) @@ -478,4 +568,5 @@ def load(self, environment, name, globals=None): sys.modules.pop(module, None) return environment.template_class.from_module_dict( - environment, mod.__dict__, globals) + environment, mod.__dict__, globals + ) diff --git a/pipenv/vendor/jinja2/meta.py b/pipenv/vendor/jinja2/meta.py index 7421914f77..3795aace59 100644 --- a/pipenv/vendor/jinja2/meta.py +++ b/pipenv/vendor/jinja2/meta.py @@ -1,25 +1,18 @@ # -*- coding: utf-8 -*- +"""Functions that expose information about templates that might be +interesting for introspection. """ - jinja2.meta - ~~~~~~~~~~~ - - This module implements various functions that exposes information about - templates that might be interesting for various kinds of applications. - - :copyright: (c) 2017 by the Jinja Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -from jinja2 import nodes -from jinja2.compiler import CodeGenerator -from jinja2._compat import string_types, iteritems +from . import nodes +from ._compat import iteritems +from ._compat import string_types +from .compiler import CodeGenerator class TrackingCodeGenerator(CodeGenerator): """We abuse the code generator for introspection.""" def __init__(self, environment): - CodeGenerator.__init__(self, environment, '<introspection>', - '<introspection>') + CodeGenerator.__init__(self, environment, "<introspection>", "<introspection>") self.undeclared_identifiers = set() def write(self, x): @@ -29,7 +22,7 @@ def enter_frame(self, frame): """Remember all undeclared identifiers.""" CodeGenerator.enter_frame(self, frame) for _, (action, param) in iteritems(frame.symbols.loads): - if action == 'resolve': + if action == "resolve" and param not in self.environment.globals: self.undeclared_identifiers.add(param) @@ -72,8 +65,9 @@ def find_referenced_templates(ast): This function is useful for dependency tracking. For example if you want to rebuild parts of the website after a layout template has changed. """ - for node in ast.find_all((nodes.Extends, nodes.FromImport, nodes.Import, - nodes.Include)): + for node in ast.find_all( + (nodes.Extends, nodes.FromImport, nodes.Import, nodes.Include) + ): if not isinstance(node.template, nodes.Const): # a tuple with some non consts in there if isinstance(node.template, (nodes.Tuple, nodes.List)): @@ -96,8 +90,9 @@ def find_referenced_templates(ast): # a tuple or list (latter *should* not happen) made of consts, # yield the consts that are strings. We could warn here for # non string values - elif isinstance(node, nodes.Include) and \ - isinstance(node.template.value, (tuple, list)): + elif isinstance(node, nodes.Include) and isinstance( + node.template.value, (tuple, list) + ): for template_name in node.template.value: if isinstance(template_name, string_types): yield template_name diff --git a/pipenv/vendor/jinja2/nativetypes.py b/pipenv/vendor/jinja2/nativetypes.py index fe17e4138d..9866c962dc 100644 --- a/pipenv/vendor/jinja2/nativetypes.py +++ b/pipenv/vendor/jinja2/nativetypes.py @@ -1,19 +1,27 @@ -import sys +import types from ast import literal_eval -from itertools import islice, chain -from jinja2 import nodes -from jinja2._compat import text_type -from jinja2.compiler import CodeGenerator, has_safe_repr -from jinja2.environment import Environment, Template -from jinja2.utils import concat, escape - - -def native_concat(nodes): - """Return a native Python type from the list of compiled nodes. If the - result is a single node, its value is returned. Otherwise, the nodes are - concatenated as strings. If the result can be parsed with - :func:`ast.literal_eval`, the parsed value is returned. Otherwise, the - string is returned. +from itertools import chain +from itertools import islice + +from . import nodes +from ._compat import text_type +from .compiler import CodeGenerator +from .compiler import has_safe_repr +from .environment import Environment +from .environment import Template + + +def native_concat(nodes, preserve_quotes=True): + """Return a native Python type from the list of compiled nodes. If + the result is a single node, its value is returned. Otherwise, the + nodes are concatenated as strings. If the result can be parsed with + :func:`ast.literal_eval`, the parsed value is returned. Otherwise, + the string is returned. + + :param nodes: Iterable of nodes to concatenate. + :param preserve_quotes: Whether to re-wrap literal strings with + quotes, to preserve quotes around expressions for later parsing. + Should be ``False`` in :meth:`NativeEnvironment.render`. """ head = list(islice(nodes, 2)) @@ -21,200 +29,83 @@ def native_concat(nodes): return None if len(head) == 1: - out = head[0] + raw = head[0] else: - out = u''.join([text_type(v) for v in chain(head, nodes)]) + if isinstance(nodes, types.GeneratorType): + nodes = chain(head, nodes) + raw = u"".join([text_type(v) for v in nodes]) try: - return literal_eval(out) + literal = literal_eval(raw) except (ValueError, SyntaxError, MemoryError): - return out + return raw + + # If literal_eval returned a string, re-wrap with the original + # quote character to avoid dropping quotes between expression nodes. + # Without this, "'{{ a }}', '{{ b }}'" results in "a, b", but should + # be ('a', 'b'). + if preserve_quotes and isinstance(literal, str): + return "{quote}{}{quote}".format(literal, quote=raw[0]) + + return literal class NativeCodeGenerator(CodeGenerator): - """A code generator which avoids injecting ``to_string()`` calls around the - internal code Jinja uses to render templates. + """A code generator which renders Python types by not adding + ``to_string()`` around output nodes, and using :func:`native_concat` + to convert complex strings back to Python types if possible. """ - def visit_Output(self, node, frame): - """Same as :meth:`CodeGenerator.visit_Output`, but do not call - ``to_string`` on output nodes in generated code. - """ - if self.has_known_extends and frame.require_output_check: - return - - finalize = self.environment.finalize - finalize_context = getattr(finalize, 'contextfunction', False) - finalize_eval = getattr(finalize, 'evalcontextfunction', False) - finalize_env = getattr(finalize, 'environmentfunction', False) - - if finalize is not None: - if finalize_context or finalize_eval: - const_finalize = None - elif finalize_env: - def const_finalize(x): - return finalize(self.environment, x) - else: - const_finalize = finalize - else: - def const_finalize(x): - return x - - # If we are inside a frame that requires output checking, we do so. - outdent_later = False - - if frame.require_output_check: - self.writeline('if parent_template is None:') - self.indent() - outdent_later = True - - # Try to evaluate as many chunks as possible into a static string at - # compile time. - body = [] - - for child in node.nodes: - try: - if const_finalize is None: - raise nodes.Impossible() - - const = child.as_const(frame.eval_ctx) - if not has_safe_repr(const): - raise nodes.Impossible() - except nodes.Impossible: - body.append(child) - continue - - # the frame can't be volatile here, because otherwise the as_const - # function would raise an Impossible exception at that point - try: - if frame.eval_ctx.autoescape: - if hasattr(const, '__html__'): - const = const.__html__() - else: - const = escape(const) - - const = const_finalize(const) - except Exception: - # if something goes wrong here we evaluate the node at runtime - # for easier debugging - body.append(child) - continue - - if body and isinstance(body[-1], list): - body[-1].append(const) - else: - body.append([const]) - - # if we have less than 3 nodes or a buffer we yield or extend/append - if len(body) < 3 or frame.buffer is not None: - if frame.buffer is not None: - # for one item we append, for more we extend - if len(body) == 1: - self.writeline('%s.append(' % frame.buffer) - else: - self.writeline('%s.extend((' % frame.buffer) - - self.indent() - - for item in body: - if isinstance(item, list): - val = repr(native_concat(item)) - - if frame.buffer is None: - self.writeline('yield ' + val) - else: - self.writeline(val + ',') - else: - if frame.buffer is None: - self.writeline('yield ', item) - else: - self.newline(item) - - close = 0 - - if finalize is not None: - self.write('environment.finalize(') - - if finalize_context: - self.write('context, ') - - close += 1 - - self.visit(item, frame) - - if close > 0: - self.write(')' * close) - - if frame.buffer is not None: - self.write(',') - - if frame.buffer is not None: - # close the open parentheses - self.outdent() - self.writeline(len(body) == 1 and ')' or '))') - - # otherwise we create a format string as this is faster in that case - else: - format = [] - arguments = [] - - for item in body: - if isinstance(item, list): - format.append(native_concat(item).replace('%', '%%')) - else: - format.append('%s') - arguments.append(item) - - self.writeline('yield ') - self.write(repr(concat(format)) + ' % (') - self.indent() - - for argument in arguments: - self.newline(argument) - close = 0 - - if finalize is not None: - self.write('environment.finalize(') - - if finalize_context: - self.write('context, ') - elif finalize_eval: - self.write('context.eval_ctx, ') - elif finalize_env: - self.write('environment, ') - - close += 1 - - self.visit(argument, frame) - self.write(')' * close + ', ') - - self.outdent() - self.writeline(')') + @staticmethod + def _default_finalize(value): + return value - if outdent_later: - self.outdent() + def _output_const_repr(self, group): + return repr(native_concat(group)) + def _output_child_to_const(self, node, frame, finalize): + const = node.as_const(frame.eval_ctx) -class NativeTemplate(Template): - def render(self, *args, **kwargs): - """Render the template to produce a native Python type. If the result - is a single node, its value is returned. Otherwise, the nodes are - concatenated as strings. If the result can be parsed with - :func:`ast.literal_eval`, the parsed value is returned. Otherwise, the - string is returned. - """ - vars = dict(*args, **kwargs) + if not has_safe_repr(const): + raise nodes.Impossible() - try: - return native_concat(self.root_render_func(self.new_context(vars))) - except Exception: - exc_info = sys.exc_info() + if isinstance(node, nodes.TemplateData): + return const + + return finalize.const(const) + + def _output_child_pre(self, node, frame, finalize): + if finalize.src is not None: + self.write(finalize.src) - return self.environment.handle_exception(exc_info, True) + def _output_child_post(self, node, frame, finalize): + if finalize.src is not None: + self.write(")") class NativeEnvironment(Environment): """An environment that renders templates to native Python types.""" code_generator_class = NativeCodeGenerator - template_class = NativeTemplate + + +class NativeTemplate(Template): + environment_class = NativeEnvironment + + def render(self, *args, **kwargs): + """Render the template to produce a native Python type. If the + result is a single node, its value is returned. Otherwise, the + nodes are concatenated as strings. If the result can be parsed + with :func:`ast.literal_eval`, the parsed value is returned. + Otherwise, the string is returned. + """ + vars = dict(*args, **kwargs) + try: + return native_concat( + self.root_render_func(self.new_context(vars)), preserve_quotes=False + ) + except Exception: + return self.environment.handle_exception() + + +NativeEnvironment.template_class = NativeTemplate diff --git a/pipenv/vendor/jinja2/nodes.py b/pipenv/vendor/jinja2/nodes.py index 4d9a01ad8b..9f3edc05f9 100644 --- a/pipenv/vendor/jinja2/nodes.py +++ b/pipenv/vendor/jinja2/nodes.py @@ -1,54 +1,39 @@ # -*- coding: utf-8 -*- +"""AST nodes generated by the parser for the compiler. Also provides +some node tree helper functions used by the parser and compiler in order +to normalize nodes. """ - jinja2.nodes - ~~~~~~~~~~~~ - - This module implements additional nodes derived from the ast base node. - - It also provides some node tree helper functions like `in_lineno` and - `get_nodes` used by the parser and translator in order to normalize - python and jinja nodes. - - :copyright: (c) 2017 by the Jinja Team. - :license: BSD, see LICENSE for more details. -""" -import types import operator - from collections import deque -from jinja2.utils import Markup -from jinja2._compat import izip, with_metaclass, text_type, PY2 - -#: the types we support for context functions -_context_function_types = (types.FunctionType, types.MethodType) +from markupsafe import Markup +from ._compat import izip +from ._compat import PY2 +from ._compat import text_type +from ._compat import with_metaclass _binop_to_func = { - '*': operator.mul, - '/': operator.truediv, - '//': operator.floordiv, - '**': operator.pow, - '%': operator.mod, - '+': operator.add, - '-': operator.sub + "*": operator.mul, + "/": operator.truediv, + "//": operator.floordiv, + "**": operator.pow, + "%": operator.mod, + "+": operator.add, + "-": operator.sub, } -_uaop_to_func = { - 'not': operator.not_, - '+': operator.pos, - '-': operator.neg -} +_uaop_to_func = {"not": operator.not_, "+": operator.pos, "-": operator.neg} _cmpop_to_func = { - 'eq': operator.eq, - 'ne': operator.ne, - 'gt': operator.gt, - 'gteq': operator.ge, - 'lt': operator.lt, - 'lteq': operator.le, - 'in': lambda a, b: a in b, - 'notin': lambda a, b: a not in b + "eq": operator.eq, + "ne": operator.ne, + "gt": operator.gt, + "gteq": operator.ge, + "lt": operator.lt, + "lteq": operator.le, + "in": lambda a, b: a in b, + "notin": lambda a, b: a not in b, } @@ -61,16 +46,16 @@ class NodeType(type): inheritance. fields and attributes from the parent class are automatically forwarded to the child.""" - def __new__(cls, name, bases, d): - for attr in 'fields', 'attributes': + def __new__(mcs, name, bases, d): + for attr in "fields", "attributes": storage = [] storage.extend(getattr(bases[0], attr, ())) storage.extend(d.get(attr, ())) - assert len(bases) == 1, 'multiple inheritance not allowed' - assert len(storage) == len(set(storage)), 'layout conflict' + assert len(bases) == 1, "multiple inheritance not allowed" + assert len(storage) == len(set(storage)), "layout conflict" d[attr] = tuple(storage) - d.setdefault('abstract', False) - return type.__new__(cls, name, bases, d) + d.setdefault("abstract", False) + return type.__new__(mcs, name, bases, d) class EvalContext(object): @@ -97,15 +82,17 @@ def revert(self, old): def get_eval_context(node, ctx): if ctx is None: if node.environment is None: - raise RuntimeError('if no eval context is passed, the ' - 'node must have an attached ' - 'environment.') + raise RuntimeError( + "if no eval context is passed, the " + "node must have an attached " + "environment." + ) return EvalContext(node.environment) return ctx class Node(with_metaclass(NodeType, object)): - """Baseclass for all Jinja2 nodes. There are a number of nodes available + """Baseclass for all Jinja nodes. There are a number of nodes available of different types. There are four major types: - :class:`Stmt`: statements @@ -120,30 +107,32 @@ class Node(with_metaclass(NodeType, object)): The `environment` attribute is set at the end of the parsing process for all nodes automatically. """ + fields = () - attributes = ('lineno', 'environment') + attributes = ("lineno", "environment") abstract = True def __init__(self, *fields, **attributes): if self.abstract: - raise TypeError('abstract nodes are not instanciable') + raise TypeError("abstract nodes are not instantiable") if fields: if len(fields) != len(self.fields): if not self.fields: - raise TypeError('%r takes 0 arguments' % - self.__class__.__name__) - raise TypeError('%r takes 0 or %d argument%s' % ( - self.__class__.__name__, - len(self.fields), - len(self.fields) != 1 and 's' or '' - )) + raise TypeError("%r takes 0 arguments" % self.__class__.__name__) + raise TypeError( + "%r takes 0 or %d argument%s" + % ( + self.__class__.__name__, + len(self.fields), + len(self.fields) != 1 and "s" or "", + ) + ) for name, arg in izip(self.fields, fields): setattr(self, name, arg) for attr in self.attributes: setattr(self, attr, attributes.pop(attr, None)) if attributes: - raise TypeError('unknown attribute %r' % - next(iter(attributes))) + raise TypeError("unknown attribute %r" % next(iter(attributes))) def iter_fields(self, exclude=None, only=None): """This method iterates over all fields that are defined and yields @@ -153,9 +142,11 @@ def iter_fields(self, exclude=None, only=None): should be sets or tuples of field names. """ for name in self.fields: - if (exclude is only is None) or \ - (exclude is not None and name not in exclude) or \ - (only is not None and name in only): + if ( + (exclude is only is None) + or (exclude is not None and name not in exclude) + or (only is not None and name in only) + ): try: yield name, getattr(self, name) except AttributeError: @@ -166,7 +157,7 @@ def iter_child_nodes(self, exclude=None, only=None): over all fields and yields the values of they are nodes. If the value of a field is a list all the nodes in that list are returned. """ - for field, item in self.iter_fields(exclude, only): + for _, item in self.iter_fields(exclude, only): if isinstance(item, list): for n in item: if isinstance(n, Node): @@ -200,7 +191,7 @@ def set_ctx(self, ctx): todo = deque([self]) while todo: node = todo.popleft() - if 'ctx' in node.fields: + if "ctx" in node.fields: node.ctx = ctx todo.extend(node.iter_child_nodes()) return self @@ -210,7 +201,7 @@ def set_lineno(self, lineno, override=False): todo = deque([self]) while todo: node = todo.popleft() - if 'lineno' in node.attributes: + if "lineno" in node.attributes: if node.lineno is None or override: node.lineno = lineno todo.extend(node.iter_child_nodes()) @@ -226,8 +217,9 @@ def set_environment(self, environment): return self def __eq__(self, other): - return type(self) is type(other) and \ - tuple(self.iter_fields()) == tuple(other.iter_fields()) + return type(self) is type(other) and tuple(self.iter_fields()) == tuple( + other.iter_fields() + ) def __ne__(self, other): return not self.__eq__(other) @@ -236,10 +228,9 @@ def __ne__(self, other): __hash__ = object.__hash__ def __repr__(self): - return '%s(%s)' % ( + return "%s(%s)" % ( self.__class__.__name__, - ', '.join('%s=%r' % (arg, getattr(self, arg, None)) for - arg in self.fields) + ", ".join("%s=%r" % (arg, getattr(self, arg, None)) for arg in self.fields), ) def dump(self): @@ -248,37 +239,39 @@ def _dump(node): buf.append(repr(node)) return - buf.append('nodes.%s(' % node.__class__.__name__) + buf.append("nodes.%s(" % node.__class__.__name__) if not node.fields: - buf.append(')') + buf.append(")") return for idx, field in enumerate(node.fields): if idx: - buf.append(', ') + buf.append(", ") value = getattr(node, field) if isinstance(value, list): - buf.append('[') + buf.append("[") for idx, item in enumerate(value): if idx: - buf.append(', ') + buf.append(", ") _dump(item) - buf.append(']') + buf.append("]") else: _dump(value) - buf.append(')') + buf.append(")") + buf = [] _dump(self) - return ''.join(buf) - + return "".join(buf) class Stmt(Node): """Base node for all statements.""" + abstract = True class Helper(Node): """Nodes that exist in a specific context only.""" + abstract = True @@ -286,19 +279,22 @@ class Template(Node): """Node that represents a template. This must be the outermost node that is passed to the compiler. """ - fields = ('body',) + + fields = ("body",) class Output(Stmt): """A node that holds multiple expressions which are then printed out. This is used both for the `print` statement and the regular template data. """ - fields = ('nodes',) + + fields = ("nodes",) class Extends(Stmt): """Represents an extends statement.""" - fields = ('template',) + + fields = ("template",) class For(Stmt): @@ -309,12 +305,14 @@ class For(Stmt): For filtered nodes an expression can be stored as `test`, otherwise `None`. """ - fields = ('target', 'iter', 'body', 'else_', 'test', 'recursive') + + fields = ("target", "iter", "body", "else_", "test", "recursive") class If(Stmt): """If `test` is true, `body` is rendered, else `else_`.""" - fields = ('test', 'body', 'elif_', 'else_') + + fields = ("test", "body", "elif_", "else_") class Macro(Stmt): @@ -322,19 +320,22 @@ class Macro(Stmt): arguments and `defaults` a list of defaults if there are any. `body` is a list of nodes for the macro body. """ - fields = ('name', 'args', 'defaults', 'body') + + fields = ("name", "args", "defaults", "body") class CallBlock(Stmt): """Like a macro without a name but a call instead. `call` is called with the unnamed macro as `caller` argument this node holds. """ - fields = ('call', 'args', 'defaults', 'body') + + fields = ("call", "args", "defaults", "body") class FilterBlock(Stmt): """Node for filter sections.""" - fields = ('body', 'filter') + + fields = ("body", "filter") class With(Stmt): @@ -343,22 +344,26 @@ class With(Stmt): .. versionadded:: 2.9.3 """ - fields = ('targets', 'values', 'body') + + fields = ("targets", "values", "body") class Block(Stmt): """A node that represents a block.""" - fields = ('name', 'body', 'scoped') + + fields = ("name", "body", "scoped") class Include(Stmt): """A node that represents the include tag.""" - fields = ('template', 'with_context', 'ignore_missing') + + fields = ("template", "with_context", "ignore_missing") class Import(Stmt): """A node that represents the import tag.""" - fields = ('template', 'target', 'with_context') + + fields = ("template", "target", "with_context") class FromImport(Stmt): @@ -372,26 +377,31 @@ class FromImport(Stmt): The list of names may contain tuples if aliases are wanted. """ - fields = ('template', 'names', 'with_context') + + fields = ("template", "names", "with_context") class ExprStmt(Stmt): """A statement that evaluates an expression and discards the result.""" - fields = ('node',) + + fields = ("node",) class Assign(Stmt): """Assigns an expression to a target.""" - fields = ('target', 'node') + + fields = ("target", "node") class AssignBlock(Stmt): """Assigns a block to a target.""" - fields = ('target', 'filter', 'body') + + fields = ("target", "filter", "body") class Expr(Node): """Baseclass for all expressions.""" + abstract = True def as_const(self, eval_ctx=None): @@ -414,15 +424,18 @@ def can_assign(self): class BinExpr(Expr): """Baseclass for all binary expressions.""" - fields = ('left', 'right') + + fields = ("left", "right") operator = None abstract = True def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) # intercepted operators cannot be folded at compile time - if self.environment.sandboxed and \ - self.operator in self.environment.intercepted_binops: + if ( + self.environment.sandboxed + and self.operator in self.environment.intercepted_binops + ): raise Impossible() f = _binop_to_func[self.operator] try: @@ -433,15 +446,18 @@ def as_const(self, eval_ctx=None): class UnaryExpr(Expr): """Baseclass for all unary expressions.""" - fields = ('node',) + + fields = ("node",) operator = None abstract = True def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) # intercepted operators cannot be folded at compile time - if self.environment.sandboxed and \ - self.operator in self.environment.intercepted_unops: + if ( + self.environment.sandboxed + and self.operator in self.environment.intercepted_unops + ): raise Impossible() f = _uaop_to_func[self.operator] try: @@ -458,16 +474,17 @@ class Name(Expr): - `load`: load that name - `param`: like `store` but if the name was defined as function parameter. """ - fields = ('name', 'ctx') + + fields = ("name", "ctx") def can_assign(self): - return self.name not in ('true', 'false', 'none', - 'True', 'False', 'None') + return self.name not in ("true", "false", "none", "True", "False", "None") class NSRef(Expr): """Reference to a namespace value assignment""" - fields = ('name', 'attr') + + fields = ("name", "attr") def can_assign(self): # We don't need any special checks here; NSRef assignments have a @@ -479,6 +496,7 @@ def can_assign(self): class Literal(Expr): """Baseclass for literals.""" + abstract = True @@ -488,14 +506,18 @@ class Const(Literal): complex values such as lists too. Only constants with a safe representation (objects where ``eval(repr(x)) == x`` is true). """ - fields = ('value',) + + fields = ("value",) def as_const(self, eval_ctx=None): rv = self.value - if PY2 and type(rv) is text_type and \ - self.environment.policies['compiler.ascii_str']: + if ( + PY2 + and type(rv) is text_type + and self.environment.policies["compiler.ascii_str"] + ): try: - rv = rv.encode('ascii') + rv = rv.encode("ascii") except UnicodeError: pass return rv @@ -507,6 +529,7 @@ def from_untrusted(cls, value, lineno=None, environment=None): an `Impossible` exception. """ from .compiler import has_safe_repr + if not has_safe_repr(value): raise Impossible() return cls(value, lineno=lineno, environment=environment) @@ -514,7 +537,8 @@ def from_untrusted(cls, value, lineno=None, environment=None): class TemplateData(Literal): """A constant template string.""" - fields = ('data',) + + fields = ("data",) def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) @@ -530,7 +554,8 @@ class Tuple(Literal): for subscripts. Like for :class:`Name` `ctx` specifies if the tuple is used for loading the names or storing. """ - fields = ('items', 'ctx') + + fields = ("items", "ctx") def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) @@ -545,7 +570,8 @@ def can_assign(self): class List(Literal): """Any list literal such as ``[1, 2, 3]``""" - fields = ('items',) + + fields = ("items",) def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) @@ -556,7 +582,8 @@ class Dict(Literal): """Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of :class:`Pair` nodes. """ - fields = ('items',) + + fields = ("items",) def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) @@ -565,7 +592,8 @@ def as_const(self, eval_ctx=None): class Pair(Helper): """A key, value pair for dicts.""" - fields = ('key', 'value') + + fields = ("key", "value") def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) @@ -574,7 +602,8 @@ def as_const(self, eval_ctx=None): class Keyword(Helper): """A key, value pair for keyword arguments where key is a string.""" - fields = ('key', 'value') + + fields = ("key", "value") def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) @@ -585,7 +614,8 @@ class CondExpr(Expr): """A conditional expression (inline if expression). (``{{ foo if bar else baz }}``) """ - fields = ('test', 'expr1', 'expr2') + + fields = ("test", "expr1", "expr2") def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) @@ -626,7 +656,7 @@ class Filter(Expr): filtered. Buffers are created by macros and filter blocks. """ - fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs') + fields = ("node", "name", "args", "kwargs", "dyn_args", "dyn_kwargs") def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) @@ -636,28 +666,27 @@ def as_const(self, eval_ctx=None): # we have to be careful here because we call filter_ below. # if this variable would be called filter, 2to3 would wrap the - # call in a list beause it is assuming we are talking about the + # call in a list because it is assuming we are talking about the # builtin filter function here which no longer returns a list in # python 3. because of that, do not rename filter_ to filter! filter_ = self.environment.filters.get(self.name) - if filter_ is None or getattr(filter_, 'contextfilter', False): + if filter_ is None or getattr(filter_, "contextfilter", False): raise Impossible() # We cannot constant handle async filters, so we need to make sure # to not go down this path. - if ( - eval_ctx.environment.is_async - and getattr(filter_, 'asyncfiltervariant', False) + if eval_ctx.environment.is_async and getattr( + filter_, "asyncfiltervariant", False ): raise Impossible() args, kwargs = args_as_const(self, eval_ctx) args.insert(0, self.node.as_const(eval_ctx)) - if getattr(filter_, 'evalcontextfilter', False): + if getattr(filter_, "evalcontextfilter", False): args.insert(0, eval_ctx) - elif getattr(filter_, 'environmentfilter', False): + elif getattr(filter_, "environmentfilter", False): args.insert(0, self.environment) try: @@ -671,7 +700,7 @@ class Test(Expr): rest of the fields are the same as for :class:`Call`. """ - fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs') + fields = ("node", "name", "args", "kwargs", "dyn_args", "dyn_kwargs") def as_const(self, eval_ctx=None): test = self.environment.tests.get(self.name) @@ -696,20 +725,23 @@ class Call(Expr): node for dynamic positional (``*args``) or keyword (``**kwargs``) arguments. """ - fields = ('node', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs') + + fields = ("node", "args", "kwargs", "dyn_args", "dyn_kwargs") class Getitem(Expr): """Get an attribute or item from an expression and prefer the item.""" - fields = ('node', 'arg', 'ctx') + + fields = ("node", "arg", "ctx") def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) - if self.ctx != 'load': + if self.ctx != "load": raise Impossible() try: - return self.environment.getitem(self.node.as_const(eval_ctx), - self.arg.as_const(eval_ctx)) + return self.environment.getitem( + self.node.as_const(eval_ctx), self.arg.as_const(eval_ctx) + ) except Exception: raise Impossible() @@ -721,15 +753,15 @@ class Getattr(Expr): """Get an attribute or item from an expression that is a ascii-only bytestring and prefer the attribute. """ - fields = ('node', 'attr', 'ctx') + + fields = ("node", "attr", "ctx") def as_const(self, eval_ctx=None): - if self.ctx != 'load': + if self.ctx != "load": raise Impossible() try: eval_ctx = get_eval_context(self, eval_ctx) - return self.environment.getattr(self.node.as_const(eval_ctx), - self.attr) + return self.environment.getattr(self.node.as_const(eval_ctx), self.attr) except Exception: raise Impossible() @@ -741,14 +773,17 @@ class Slice(Expr): """Represents a slice object. This must only be used as argument for :class:`Subscript`. """ - fields = ('start', 'stop', 'step') + + fields = ("start", "stop", "step") def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) + def const(obj): if obj is None: return None return obj.as_const(eval_ctx) + return slice(const(self.start), const(self.stop), const(self.step)) @@ -756,82 +791,103 @@ class Concat(Expr): """Concatenates the list of expressions provided after converting them to unicode. """ - fields = ('nodes',) + + fields = ("nodes",) def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) - return ''.join(text_type(x.as_const(eval_ctx)) for x in self.nodes) + return "".join(text_type(x.as_const(eval_ctx)) for x in self.nodes) class Compare(Expr): """Compares an expression with some other expressions. `ops` must be a list of :class:`Operand`\\s. """ - fields = ('expr', 'ops') + + fields = ("expr", "ops") def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) result = value = self.expr.as_const(eval_ctx) + try: for op in self.ops: new_value = op.expr.as_const(eval_ctx) result = _cmpop_to_func[op.op](value, new_value) + + if not result: + return False + value = new_value except Exception: raise Impossible() + return result class Operand(Helper): """Holds an operator and an expression.""" - fields = ('op', 'expr') + + fields = ("op", "expr") + if __debug__: - Operand.__doc__ += '\nThe following operators are available: ' + \ - ', '.join(sorted('``%s``' % x for x in set(_binop_to_func) | - set(_uaop_to_func) | set(_cmpop_to_func))) + Operand.__doc__ += "\nThe following operators are available: " + ", ".join( + sorted( + "``%s``" % x + for x in set(_binop_to_func) | set(_uaop_to_func) | set(_cmpop_to_func) + ) + ) class Mul(BinExpr): """Multiplies the left with the right node.""" - operator = '*' + + operator = "*" class Div(BinExpr): """Divides the left by the right node.""" - operator = '/' + + operator = "/" class FloorDiv(BinExpr): """Divides the left by the right node and truncates conver the result into an integer by truncating. """ - operator = '//' + + operator = "//" class Add(BinExpr): """Add the left to the right node.""" - operator = '+' + + operator = "+" class Sub(BinExpr): """Subtract the right from the left node.""" - operator = '-' + + operator = "-" class Mod(BinExpr): """Left modulo right.""" - operator = '%' + + operator = "%" class Pow(BinExpr): """Left to the power of right.""" - operator = '**' + + operator = "**" class And(BinExpr): """Short circuited AND.""" - operator = 'and' + + operator = "and" def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) @@ -840,7 +896,8 @@ def as_const(self, eval_ctx=None): class Or(BinExpr): """Short circuited OR.""" - operator = 'or' + + operator = "or" def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) @@ -849,17 +906,20 @@ def as_const(self, eval_ctx=None): class Not(UnaryExpr): """Negate the expression.""" - operator = 'not' + + operator = "not" class Neg(UnaryExpr): """Make the expression negative.""" - operator = '-' + + operator = "-" class Pos(UnaryExpr): """Make the expression positive (noop for most expressions)""" - operator = '+' + + operator = "+" # Helpers for extensions @@ -869,7 +929,8 @@ class EnvironmentAttribute(Expr): """Loads an attribute from the environment object. This is useful for extensions that want to call a callback stored on the environment. """ - fields = ('name',) + + fields = ("name",) class ExtensionAttribute(Expr): @@ -879,7 +940,8 @@ class ExtensionAttribute(Expr): This node is usually constructed by calling the :meth:`~jinja2.ext.Extension.attr` method on an extension. """ - fields = ('identifier', 'name') + + fields = ("identifier", "name") class ImportedName(Expr): @@ -888,7 +950,8 @@ class ImportedName(Expr): function from the cgi module on evaluation. Imports are optimized by the compiler so there is no need to assign them to local variables. """ - fields = ('importname',) + + fields = ("importname",) class InternalName(Expr): @@ -898,16 +961,20 @@ class InternalName(Expr): a new identifier for you. This identifier is not available from the template and is not threated specially by the compiler. """ - fields = ('name',) + + fields = ("name",) def __init__(self): - raise TypeError('Can\'t create internal names. Use the ' - '`free_identifier` method on a parser.') + raise TypeError( + "Can't create internal names. Use the " + "`free_identifier` method on a parser." + ) class MarkSafe(Expr): """Mark the wrapped expression as safe (wrap it as `Markup`).""" - fields = ('expr',) + + fields = ("expr",) def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) @@ -920,7 +987,8 @@ class MarkSafeIfAutoescape(Expr): .. versionadded:: 2.5 """ - fields = ('expr',) + + fields = ("expr",) def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) @@ -942,6 +1010,20 @@ class ContextReference(Expr): Assign(Name('foo', ctx='store'), Getattr(ContextReference(), 'name')) + + This is basically equivalent to using the + :func:`~jinja2.contextfunction` decorator when using the + high-level API, which causes a reference to the context to be passed + as the first argument to a function. + """ + + +class DerivedContextReference(Expr): + """Return the current template context including locals. Behaves + exactly like :class:`ContextReference`, but includes local + variables, such as from a ``for`` loop. + + .. versionadded:: 2.11 """ @@ -955,7 +1037,8 @@ class Break(Stmt): class Scope(Stmt): """An artificial scope.""" - fields = ('body',) + + fields = ("body",) class OverlayScope(Stmt): @@ -971,7 +1054,8 @@ class OverlayScope(Stmt): .. versionadded:: 2.10 """ - fields = ('context', 'body') + + fields = ("context", "body") class EvalContextModifier(Stmt): @@ -982,7 +1066,8 @@ class EvalContextModifier(Stmt): EvalContextModifier(options=[Keyword('autoescape', Const(True))]) """ - fields = ('options',) + + fields = ("options",) class ScopedEvalContextModifier(EvalContextModifier): @@ -990,10 +1075,14 @@ class ScopedEvalContextModifier(EvalContextModifier): :class:`EvalContextModifier` but will only modify the :class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`. """ - fields = ('body',) + + fields = ("body",) # make sure nobody creates custom nodes def _failing_new(*args, **kwargs): - raise TypeError('can\'t create custom node types') -NodeType.__new__ = staticmethod(_failing_new); del _failing_new + raise TypeError("can't create custom node types") + + +NodeType.__new__ = staticmethod(_failing_new) +del _failing_new diff --git a/pipenv/vendor/jinja2/optimizer.py b/pipenv/vendor/jinja2/optimizer.py index 65ab3ceb71..7bc78c4524 100644 --- a/pipenv/vendor/jinja2/optimizer.py +++ b/pipenv/vendor/jinja2/optimizer.py @@ -1,23 +1,15 @@ # -*- coding: utf-8 -*- +"""The optimizer tries to constant fold expressions and modify the AST +in place so that it should be faster to evaluate. + +Because the AST does not contain all the scoping information and the +compiler has to find that out, we cannot do all the optimizations we +want. For example, loop unrolling doesn't work because unrolled loops +would have a different scope. The solution would be a second syntax tree +that stored the scoping rules. """ - jinja2.optimizer - ~~~~~~~~~~~~~~~~ - - The jinja optimizer is currently trying to constant fold a few expressions - and modify the AST in place so that it should be easier to evaluate it. - - Because the AST does not contain all the scoping information and the - compiler has to find that out, we cannot do all the optimizations we - want. For example loop unrolling doesn't work because unrolled loops would - have a different scoping. - - The solution would be a second syntax tree that has the scoping rules stored. - - :copyright: (c) 2017 by the Jinja Team. - :license: BSD. -""" -from jinja2 import nodes -from jinja2.visitor import NodeTransformer +from . import nodes +from .visitor import NodeTransformer def optimize(node, environment): @@ -28,22 +20,22 @@ def optimize(node, environment): class Optimizer(NodeTransformer): - def __init__(self, environment): self.environment = environment - def fold(self, node, eval_ctx=None): - """Do constant folding.""" - node = self.generic_visit(node) - try: - return nodes.Const.from_untrusted(node.as_const(eval_ctx), - lineno=node.lineno, - environment=self.environment) - except nodes.Impossible: - return node - - visit_Add = visit_Sub = visit_Mul = visit_Div = visit_FloorDiv = \ - visit_Pow = visit_Mod = visit_And = visit_Or = visit_Pos = visit_Neg = \ - visit_Not = visit_Compare = visit_Getitem = visit_Getattr = visit_Call = \ - visit_Filter = visit_Test = visit_CondExpr = fold - del fold + def generic_visit(self, node, *args, **kwargs): + node = super(Optimizer, self).generic_visit(node, *args, **kwargs) + + # Do constant folding. Some other nodes besides Expr have + # as_const, but folding them causes errors later on. + if isinstance(node, nodes.Expr): + try: + return nodes.Const.from_untrusted( + node.as_const(args[0] if args else None), + lineno=node.lineno, + environment=self.environment, + ) + except nodes.Impossible: + pass + + return node diff --git a/pipenv/vendor/jinja2/parser.py b/pipenv/vendor/jinja2/parser.py index ed00d9708e..d5881066f7 100644 --- a/pipenv/vendor/jinja2/parser.py +++ b/pipenv/vendor/jinja2/parser.py @@ -1,41 +1,46 @@ # -*- coding: utf-8 -*- -""" - jinja2.parser - ~~~~~~~~~~~~~ - - Implements the template parser. - - :copyright: (c) 2017 by the Jinja Team. - :license: BSD, see LICENSE for more details. -""" -from jinja2 import nodes -from jinja2.exceptions import TemplateSyntaxError, TemplateAssertionError -from jinja2.lexer import describe_token, describe_token_expr -from jinja2._compat import imap - - -_statement_keywords = frozenset(['for', 'if', 'block', 'extends', 'print', - 'macro', 'include', 'from', 'import', - 'set', 'with', 'autoescape']) -_compare_operators = frozenset(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq']) +"""Parse tokens from the lexer into nodes for the compiler.""" +from . import nodes +from ._compat import imap +from .exceptions import TemplateAssertionError +from .exceptions import TemplateSyntaxError +from .lexer import describe_token +from .lexer import describe_token_expr + +_statement_keywords = frozenset( + [ + "for", + "if", + "block", + "extends", + "print", + "macro", + "include", + "from", + "import", + "set", + "with", + "autoescape", + ] +) +_compare_operators = frozenset(["eq", "ne", "lt", "lteq", "gt", "gteq"]) _math_nodes = { - 'add': nodes.Add, - 'sub': nodes.Sub, - 'mul': nodes.Mul, - 'div': nodes.Div, - 'floordiv': nodes.FloorDiv, - 'mod': nodes.Mod, + "add": nodes.Add, + "sub": nodes.Sub, + "mul": nodes.Mul, + "div": nodes.Div, + "floordiv": nodes.FloorDiv, + "mod": nodes.Mod, } class Parser(object): - """This is the central parsing class Jinja2 uses. It's passed to + """This is the central parsing class Jinja uses. It's passed to extensions and can be used to parse expressions or statements. """ - def __init__(self, environment, source, name=None, filename=None, - state=None): + def __init__(self, environment, source, name=None, filename=None, state=None): self.environment = environment self.stream = environment._tokenize(source, name, filename, state) self.name = name @@ -63,31 +68,37 @@ def _fail_ut_eof(self, name, end_token_stack, lineno): for exprs in end_token_stack: expected.extend(imap(describe_token_expr, exprs)) if end_token_stack: - currently_looking = ' or '.join( - "'%s'" % describe_token_expr(expr) - for expr in end_token_stack[-1]) + currently_looking = " or ".join( + "'%s'" % describe_token_expr(expr) for expr in end_token_stack[-1] + ) else: currently_looking = None if name is None: - message = ['Unexpected end of template.'] + message = ["Unexpected end of template."] else: - message = ['Encountered unknown tag \'%s\'.' % name] + message = ["Encountered unknown tag '%s'." % name] if currently_looking: if name is not None and name in expected: - message.append('You probably made a nesting mistake. Jinja ' - 'is expecting this tag, but currently looking ' - 'for %s.' % currently_looking) + message.append( + "You probably made a nesting mistake. Jinja " + "is expecting this tag, but currently looking " + "for %s." % currently_looking + ) else: - message.append('Jinja was looking for the following tags: ' - '%s.' % currently_looking) + message.append( + "Jinja was looking for the following tags: " + "%s." % currently_looking + ) if self._tag_stack: - message.append('The innermost block that needs to be ' - 'closed is \'%s\'.' % self._tag_stack[-1]) + message.append( + "The innermost block that needs to be " + "closed is '%s'." % self._tag_stack[-1] + ) - self.fail(' '.join(message), lineno) + self.fail(" ".join(message), lineno) def fail_unknown_tag(self, name, lineno=None): """Called if the parser encounters an unknown tag. Tries to fail @@ -105,7 +116,7 @@ def fail_eof(self, end_tokens=None, lineno=None): def is_tuple_end(self, extra_end_rules=None): """Are we at the end of a tuple?""" - if self.stream.current.type in ('variable_end', 'block_end', 'rparen'): + if self.stream.current.type in ("variable_end", "block_end", "rparen"): return True elif extra_end_rules is not None: return self.stream.current.test_any(extra_end_rules) @@ -115,22 +126,22 @@ def free_identifier(self, lineno=None): """Return a new free identifier as :class:`~jinja2.nodes.InternalName`.""" self._last_identifier += 1 rv = object.__new__(nodes.InternalName) - nodes.Node.__init__(rv, 'fi%d' % self._last_identifier, lineno=lineno) + nodes.Node.__init__(rv, "fi%d" % self._last_identifier, lineno=lineno) return rv def parse_statement(self): """Parse a single statement.""" token = self.stream.current - if token.type != 'name': - self.fail('tag name expected', token.lineno) + if token.type != "name": + self.fail("tag name expected", token.lineno) self._tag_stack.append(token.value) pop_tag = True try: if token.value in _statement_keywords: - return getattr(self, 'parse_' + self.stream.current.value)() - if token.value == 'call': + return getattr(self, "parse_" + self.stream.current.value)() + if token.value == "call": return self.parse_call_block() - if token.value == 'filter': + if token.value == "filter": return self.parse_filter_block() ext = self.extensions.get(token.value) if ext is not None: @@ -157,16 +168,16 @@ def parse_statements(self, end_tokens, drop_needle=False): can be set to `True` and the end token is removed. """ # the first token may be a colon for python compatibility - self.stream.skip_if('colon') + self.stream.skip_if("colon") # in the future it would be possible to add whole code sections # by adding some sort of end of statement token and parsing those here. - self.stream.expect('block_end') + self.stream.expect("block_end") result = self.subparse(end_tokens) # we reached the end of the template too early, the subparser # does not check for this, so we do that now - if self.stream.current.type == 'eof': + if self.stream.current.type == "eof": self.fail_eof(end_tokens) if drop_needle: @@ -177,50 +188,47 @@ def parse_set(self): """Parse an assign statement.""" lineno = next(self.stream).lineno target = self.parse_assign_target(with_namespace=True) - if self.stream.skip_if('assign'): + if self.stream.skip_if("assign"): expr = self.parse_tuple() return nodes.Assign(target, expr, lineno=lineno) filter_node = self.parse_filter(None) - body = self.parse_statements(('name:endset',), - drop_needle=True) + body = self.parse_statements(("name:endset",), drop_needle=True) return nodes.AssignBlock(target, filter_node, body, lineno=lineno) def parse_for(self): """Parse a for loop.""" - lineno = self.stream.expect('name:for').lineno - target = self.parse_assign_target(extra_end_rules=('name:in',)) - self.stream.expect('name:in') - iter = self.parse_tuple(with_condexpr=False, - extra_end_rules=('name:recursive',)) + lineno = self.stream.expect("name:for").lineno + target = self.parse_assign_target(extra_end_rules=("name:in",)) + self.stream.expect("name:in") + iter = self.parse_tuple( + with_condexpr=False, extra_end_rules=("name:recursive",) + ) test = None - if self.stream.skip_if('name:if'): + if self.stream.skip_if("name:if"): test = self.parse_expression() - recursive = self.stream.skip_if('name:recursive') - body = self.parse_statements(('name:endfor', 'name:else')) - if next(self.stream).value == 'endfor': + recursive = self.stream.skip_if("name:recursive") + body = self.parse_statements(("name:endfor", "name:else")) + if next(self.stream).value == "endfor": else_ = [] else: - else_ = self.parse_statements(('name:endfor',), drop_needle=True) - return nodes.For(target, iter, body, else_, test, - recursive, lineno=lineno) + else_ = self.parse_statements(("name:endfor",), drop_needle=True) + return nodes.For(target, iter, body, else_, test, recursive, lineno=lineno) def parse_if(self): """Parse an if construct.""" - node = result = nodes.If(lineno=self.stream.expect('name:if').lineno) + node = result = nodes.If(lineno=self.stream.expect("name:if").lineno) while 1: node.test = self.parse_tuple(with_condexpr=False) - node.body = self.parse_statements(('name:elif', 'name:else', - 'name:endif')) + node.body = self.parse_statements(("name:elif", "name:else", "name:endif")) node.elif_ = [] node.else_ = [] token = next(self.stream) - if token.test('name:elif'): + if token.test("name:elif"): node = nodes.If(lineno=self.stream.current.lineno) result.elif_.append(node) continue - elif token.test('name:else'): - result.else_ = self.parse_statements(('name:endif',), - drop_needle=True) + elif token.test("name:else"): + result.else_ = self.parse_statements(("name:endif",), drop_needle=True) break return result @@ -228,45 +236,42 @@ def parse_with(self): node = nodes.With(lineno=next(self.stream).lineno) targets = [] values = [] - while self.stream.current.type != 'block_end': - lineno = self.stream.current.lineno + while self.stream.current.type != "block_end": if targets: - self.stream.expect('comma') + self.stream.expect("comma") target = self.parse_assign_target() - target.set_ctx('param') + target.set_ctx("param") targets.append(target) - self.stream.expect('assign') + self.stream.expect("assign") values.append(self.parse_expression()) node.targets = targets node.values = values - node.body = self.parse_statements(('name:endwith',), - drop_needle=True) + node.body = self.parse_statements(("name:endwith",), drop_needle=True) return node def parse_autoescape(self): node = nodes.ScopedEvalContextModifier(lineno=next(self.stream).lineno) - node.options = [ - nodes.Keyword('autoescape', self.parse_expression()) - ] - node.body = self.parse_statements(('name:endautoescape',), - drop_needle=True) + node.options = [nodes.Keyword("autoescape", self.parse_expression())] + node.body = self.parse_statements(("name:endautoescape",), drop_needle=True) return nodes.Scope([node]) def parse_block(self): node = nodes.Block(lineno=next(self.stream).lineno) - node.name = self.stream.expect('name').value - node.scoped = self.stream.skip_if('name:scoped') + node.name = self.stream.expect("name").value + node.scoped = self.stream.skip_if("name:scoped") # common problem people encounter when switching from django # to jinja. we do not support hyphens in block names, so let's # raise a nicer error message in that case. - if self.stream.current.type == 'sub': - self.fail('Block names in Jinja have to be valid Python ' - 'identifiers and may not contain hyphens, use an ' - 'underscore instead.') - - node.body = self.parse_statements(('name:endblock',), drop_needle=True) - self.stream.skip_if('name:' + node.name) + if self.stream.current.type == "sub": + self.fail( + "Block names in Jinja have to be valid Python " + "identifiers and may not contain hyphens, use an " + "underscore instead." + ) + + node.body = self.parse_statements(("name:endblock",), drop_needle=True) + self.stream.skip_if("name:" + node.name) return node def parse_extends(self): @@ -275,9 +280,10 @@ def parse_extends(self): return node def parse_import_context(self, node, default): - if self.stream.current.test_any('name:with', 'name:without') and \ - self.stream.look().test('name:context'): - node.with_context = next(self.stream).value == 'with' + if self.stream.current.test_any( + "name:with", "name:without" + ) and self.stream.look().test("name:context"): + node.with_context = next(self.stream).value == "with" self.stream.skip() else: node.with_context = default @@ -286,8 +292,9 @@ def parse_import_context(self, node, default): def parse_include(self): node = nodes.Include(lineno=next(self.stream).lineno) node.template = self.parse_expression() - if self.stream.current.test('name:ignore') and \ - self.stream.look().test('name:missing'): + if self.stream.current.test("name:ignore") and self.stream.look().test( + "name:missing" + ): node.ignore_missing = True self.stream.skip(2) else: @@ -297,67 +304,71 @@ def parse_include(self): def parse_import(self): node = nodes.Import(lineno=next(self.stream).lineno) node.template = self.parse_expression() - self.stream.expect('name:as') + self.stream.expect("name:as") node.target = self.parse_assign_target(name_only=True).name return self.parse_import_context(node, False) def parse_from(self): node = nodes.FromImport(lineno=next(self.stream).lineno) node.template = self.parse_expression() - self.stream.expect('name:import') + self.stream.expect("name:import") node.names = [] def parse_context(): - if self.stream.current.value in ('with', 'without') and \ - self.stream.look().test('name:context'): - node.with_context = next(self.stream).value == 'with' + if self.stream.current.value in ( + "with", + "without", + ) and self.stream.look().test("name:context"): + node.with_context = next(self.stream).value == "with" self.stream.skip() return True return False while 1: if node.names: - self.stream.expect('comma') - if self.stream.current.type == 'name': + self.stream.expect("comma") + if self.stream.current.type == "name": if parse_context(): break target = self.parse_assign_target(name_only=True) - if target.name.startswith('_'): - self.fail('names starting with an underline can not ' - 'be imported', target.lineno, - exc=TemplateAssertionError) - if self.stream.skip_if('name:as'): + if target.name.startswith("_"): + self.fail( + "names starting with an underline can not be imported", + target.lineno, + exc=TemplateAssertionError, + ) + if self.stream.skip_if("name:as"): alias = self.parse_assign_target(name_only=True) node.names.append((target.name, alias.name)) else: node.names.append(target.name) - if parse_context() or self.stream.current.type != 'comma': + if parse_context() or self.stream.current.type != "comma": break else: - self.stream.expect('name') - if not hasattr(node, 'with_context'): + self.stream.expect("name") + if not hasattr(node, "with_context"): node.with_context = False return node def parse_signature(self, node): node.args = args = [] node.defaults = defaults = [] - self.stream.expect('lparen') - while self.stream.current.type != 'rparen': + self.stream.expect("lparen") + while self.stream.current.type != "rparen": if args: - self.stream.expect('comma') + self.stream.expect("comma") arg = self.parse_assign_target(name_only=True) - arg.set_ctx('param') - if self.stream.skip_if('assign'): + arg.set_ctx("param") + if self.stream.skip_if("assign"): defaults.append(self.parse_expression()) elif defaults: - self.fail('non-default argument follows default argument') + self.fail("non-default argument follows default argument") args.append(arg) - self.stream.expect('rparen') + self.stream.expect("rparen") def parse_call_block(self): node = nodes.CallBlock(lineno=next(self.stream).lineno) - if self.stream.current.type == 'lparen': + if self.stream.current.type == "lparen": self.parse_signature(node) else: node.args = [] @@ -365,37 +376,40 @@ def parse_call_block(self): node.call = self.parse_expression() if not isinstance(node.call, nodes.Call): - self.fail('expected call', node.lineno) - node.body = self.parse_statements(('name:endcall',), drop_needle=True) + self.fail("expected call", node.lineno) + node.body = self.parse_statements(("name:endcall",), drop_needle=True) return node def parse_filter_block(self): node = nodes.FilterBlock(lineno=next(self.stream).lineno) node.filter = self.parse_filter(None, start_inline=True) - node.body = self.parse_statements(('name:endfilter',), - drop_needle=True) + node.body = self.parse_statements(("name:endfilter",), drop_needle=True) return node def parse_macro(self): node = nodes.Macro(lineno=next(self.stream).lineno) node.name = self.parse_assign_target(name_only=True).name self.parse_signature(node) - node.body = self.parse_statements(('name:endmacro',), - drop_needle=True) + node.body = self.parse_statements(("name:endmacro",), drop_needle=True) return node def parse_print(self): node = nodes.Output(lineno=next(self.stream).lineno) node.nodes = [] - while self.stream.current.type != 'block_end': + while self.stream.current.type != "block_end": if node.nodes: - self.stream.expect('comma') + self.stream.expect("comma") node.nodes.append(self.parse_expression()) return node - def parse_assign_target(self, with_tuple=True, name_only=False, - extra_end_rules=None, with_namespace=False): - """Parse an assignment target. As Jinja2 allows assignments to + def parse_assign_target( + self, + with_tuple=True, + name_only=False, + extra_end_rules=None, + with_namespace=False, + ): + """Parse an assignment target. As Jinja allows assignments to tuples, this function can parse all allowed assignment targets. Per default assignments to tuples are parsed, that can be disable however by setting `with_tuple` to `False`. If only assignments to names are @@ -403,24 +417,26 @@ def parse_assign_target(self, with_tuple=True, name_only=False, parameter is forwarded to the tuple parsing function. If `with_namespace` is enabled, a namespace assignment may be parsed. """ - if with_namespace and self.stream.look().type == 'dot': - token = self.stream.expect('name') + if with_namespace and self.stream.look().type == "dot": + token = self.stream.expect("name") next(self.stream) # dot - attr = self.stream.expect('name') + attr = self.stream.expect("name") target = nodes.NSRef(token.value, attr.value, lineno=token.lineno) elif name_only: - token = self.stream.expect('name') - target = nodes.Name(token.value, 'store', lineno=token.lineno) + token = self.stream.expect("name") + target = nodes.Name(token.value, "store", lineno=token.lineno) else: if with_tuple: - target = self.parse_tuple(simplified=True, - extra_end_rules=extra_end_rules) + target = self.parse_tuple( + simplified=True, extra_end_rules=extra_end_rules + ) else: target = self.parse_primary() - target.set_ctx('store') + target.set_ctx("store") if not target.can_assign(): - self.fail('can\'t assign to %r' % target.__class__. - __name__.lower(), target.lineno) + self.fail( + "can't assign to %r" % target.__class__.__name__.lower(), target.lineno + ) return target def parse_expression(self, with_condexpr=True): @@ -435,9 +451,9 @@ def parse_expression(self, with_condexpr=True): def parse_condexpr(self): lineno = self.stream.current.lineno expr1 = self.parse_or() - while self.stream.skip_if('name:if'): + while self.stream.skip_if("name:if"): expr2 = self.parse_or() - if self.stream.skip_if('name:else'): + if self.stream.skip_if("name:else"): expr3 = self.parse_condexpr() else: expr3 = None @@ -448,7 +464,7 @@ def parse_condexpr(self): def parse_or(self): lineno = self.stream.current.lineno left = self.parse_and() - while self.stream.skip_if('name:or'): + while self.stream.skip_if("name:or"): right = self.parse_and() left = nodes.Or(left, right, lineno=lineno) lineno = self.stream.current.lineno @@ -457,14 +473,14 @@ def parse_or(self): def parse_and(self): lineno = self.stream.current.lineno left = self.parse_not() - while self.stream.skip_if('name:and'): + while self.stream.skip_if("name:and"): right = self.parse_not() left = nodes.And(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_not(self): - if self.stream.current.test('name:not'): + if self.stream.current.test("name:not"): lineno = next(self.stream).lineno return nodes.Not(self.parse_not(), lineno=lineno) return self.parse_compare() @@ -478,12 +494,13 @@ def parse_compare(self): if token_type in _compare_operators: next(self.stream) ops.append(nodes.Operand(token_type, self.parse_math1())) - elif self.stream.skip_if('name:in'): - ops.append(nodes.Operand('in', self.parse_math1())) - elif (self.stream.current.test('name:not') and - self.stream.look().test('name:in')): + elif self.stream.skip_if("name:in"): + ops.append(nodes.Operand("in", self.parse_math1())) + elif self.stream.current.test("name:not") and self.stream.look().test( + "name:in" + ): self.stream.skip(2) - ops.append(nodes.Operand('notin', self.parse_math1())) + ops.append(nodes.Operand("notin", self.parse_math1())) else: break lineno = self.stream.current.lineno @@ -494,7 +511,7 @@ def parse_compare(self): def parse_math1(self): lineno = self.stream.current.lineno left = self.parse_concat() - while self.stream.current.type in ('add', 'sub'): + while self.stream.current.type in ("add", "sub"): cls = _math_nodes[self.stream.current.type] next(self.stream) right = self.parse_concat() @@ -505,7 +522,7 @@ def parse_math1(self): def parse_concat(self): lineno = self.stream.current.lineno args = [self.parse_math2()] - while self.stream.current.type == 'tilde': + while self.stream.current.type == "tilde": next(self.stream) args.append(self.parse_math2()) if len(args) == 1: @@ -515,7 +532,7 @@ def parse_concat(self): def parse_math2(self): lineno = self.stream.current.lineno left = self.parse_pow() - while self.stream.current.type in ('mul', 'div', 'floordiv', 'mod'): + while self.stream.current.type in ("mul", "div", "floordiv", "mod"): cls = _math_nodes[self.stream.current.type] next(self.stream) right = self.parse_pow() @@ -526,7 +543,7 @@ def parse_math2(self): def parse_pow(self): lineno = self.stream.current.lineno left = self.parse_unary() - while self.stream.current.type == 'pow': + while self.stream.current.type == "pow": next(self.stream) right = self.parse_unary() left = nodes.Pow(left, right, lineno=lineno) @@ -536,10 +553,10 @@ def parse_pow(self): def parse_unary(self, with_filter=True): token_type = self.stream.current.type lineno = self.stream.current.lineno - if token_type == 'sub': + if token_type == "sub": next(self.stream) node = nodes.Neg(self.parse_unary(False), lineno=lineno) - elif token_type == 'add': + elif token_type == "add": next(self.stream) node = nodes.Pos(self.parse_unary(False), lineno=lineno) else: @@ -551,40 +568,44 @@ def parse_unary(self, with_filter=True): def parse_primary(self): token = self.stream.current - if token.type == 'name': - if token.value in ('true', 'false', 'True', 'False'): - node = nodes.Const(token.value in ('true', 'True'), - lineno=token.lineno) - elif token.value in ('none', 'None'): + if token.type == "name": + if token.value in ("true", "false", "True", "False"): + node = nodes.Const(token.value in ("true", "True"), lineno=token.lineno) + elif token.value in ("none", "None"): node = nodes.Const(None, lineno=token.lineno) else: - node = nodes.Name(token.value, 'load', lineno=token.lineno) + node = nodes.Name(token.value, "load", lineno=token.lineno) next(self.stream) - elif token.type == 'string': + elif token.type == "string": next(self.stream) buf = [token.value] lineno = token.lineno - while self.stream.current.type == 'string': + while self.stream.current.type == "string": buf.append(self.stream.current.value) next(self.stream) - node = nodes.Const(''.join(buf), lineno=lineno) - elif token.type in ('integer', 'float'): + node = nodes.Const("".join(buf), lineno=lineno) + elif token.type in ("integer", "float"): next(self.stream) node = nodes.Const(token.value, lineno=token.lineno) - elif token.type == 'lparen': + elif token.type == "lparen": next(self.stream) node = self.parse_tuple(explicit_parentheses=True) - self.stream.expect('rparen') - elif token.type == 'lbracket': + self.stream.expect("rparen") + elif token.type == "lbracket": node = self.parse_list() - elif token.type == 'lbrace': + elif token.type == "lbrace": node = self.parse_dict() else: self.fail("unexpected '%s'" % describe_token(token), token.lineno) return node - def parse_tuple(self, simplified=False, with_condexpr=True, - extra_end_rules=None, explicit_parentheses=False): + def parse_tuple( + self, + simplified=False, + with_condexpr=True, + extra_end_rules=None, + explicit_parentheses=False, + ): """Works like `parse_expression` but if multiple expressions are delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created. This method could also return a regular expression instead of a tuple @@ -609,16 +630,19 @@ def parse_tuple(self, simplified=False, with_condexpr=True, elif with_condexpr: parse = self.parse_expression else: - parse = lambda: self.parse_expression(with_condexpr=False) + + def parse(): + return self.parse_expression(with_condexpr=False) + args = [] is_tuple = False while 1: if args: - self.stream.expect('comma') + self.stream.expect("comma") if self.is_tuple_end(extra_end_rules): break args.append(parse()) - if self.stream.current.type == 'comma': + if self.stream.current.type == "comma": is_tuple = True else: break @@ -633,46 +657,48 @@ def parse_tuple(self, simplified=False, with_condexpr=True, # nothing) in the spot of an expression would be an empty # tuple. if not explicit_parentheses: - self.fail('Expected an expression, got \'%s\'' % - describe_token(self.stream.current)) + self.fail( + "Expected an expression, got '%s'" + % describe_token(self.stream.current) + ) - return nodes.Tuple(args, 'load', lineno=lineno) + return nodes.Tuple(args, "load", lineno=lineno) def parse_list(self): - token = self.stream.expect('lbracket') + token = self.stream.expect("lbracket") items = [] - while self.stream.current.type != 'rbracket': + while self.stream.current.type != "rbracket": if items: - self.stream.expect('comma') - if self.stream.current.type == 'rbracket': + self.stream.expect("comma") + if self.stream.current.type == "rbracket": break items.append(self.parse_expression()) - self.stream.expect('rbracket') + self.stream.expect("rbracket") return nodes.List(items, lineno=token.lineno) def parse_dict(self): - token = self.stream.expect('lbrace') + token = self.stream.expect("lbrace") items = [] - while self.stream.current.type != 'rbrace': + while self.stream.current.type != "rbrace": if items: - self.stream.expect('comma') - if self.stream.current.type == 'rbrace': + self.stream.expect("comma") + if self.stream.current.type == "rbrace": break key = self.parse_expression() - self.stream.expect('colon') + self.stream.expect("colon") value = self.parse_expression() items.append(nodes.Pair(key, value, lineno=key.lineno)) - self.stream.expect('rbrace') + self.stream.expect("rbrace") return nodes.Dict(items, lineno=token.lineno) def parse_postfix(self, node): while 1: token_type = self.stream.current.type - if token_type == 'dot' or token_type == 'lbracket': + if token_type == "dot" or token_type == "lbracket": node = self.parse_subscript(node) # calls are valid both after postfix expressions (getattr # and getitem) as well as filters and tests - elif token_type == 'lparen': + elif token_type == "lparen": node = self.parse_call(node) else: break @@ -681,13 +707,13 @@ def parse_postfix(self, node): def parse_filter_expr(self, node): while 1: token_type = self.stream.current.type - if token_type == 'pipe': + if token_type == "pipe": node = self.parse_filter(node) - elif token_type == 'name' and self.stream.current.value == 'is': + elif token_type == "name" and self.stream.current.value == "is": node = self.parse_test(node) # calls are valid both after postfix expressions (getattr # and getitem) as well as filters and tests - elif token_type == 'lparen': + elif token_type == "lparen": node = self.parse_call(node) else: break @@ -695,53 +721,54 @@ def parse_filter_expr(self, node): def parse_subscript(self, node): token = next(self.stream) - if token.type == 'dot': + if token.type == "dot": attr_token = self.stream.current next(self.stream) - if attr_token.type == 'name': - return nodes.Getattr(node, attr_token.value, 'load', - lineno=token.lineno) - elif attr_token.type != 'integer': - self.fail('expected name or number', attr_token.lineno) + if attr_token.type == "name": + return nodes.Getattr( + node, attr_token.value, "load", lineno=token.lineno + ) + elif attr_token.type != "integer": + self.fail("expected name or number", attr_token.lineno) arg = nodes.Const(attr_token.value, lineno=attr_token.lineno) - return nodes.Getitem(node, arg, 'load', lineno=token.lineno) - if token.type == 'lbracket': + return nodes.Getitem(node, arg, "load", lineno=token.lineno) + if token.type == "lbracket": args = [] - while self.stream.current.type != 'rbracket': + while self.stream.current.type != "rbracket": if args: - self.stream.expect('comma') + self.stream.expect("comma") args.append(self.parse_subscribed()) - self.stream.expect('rbracket') + self.stream.expect("rbracket") if len(args) == 1: arg = args[0] else: - arg = nodes.Tuple(args, 'load', lineno=token.lineno) - return nodes.Getitem(node, arg, 'load', lineno=token.lineno) - self.fail('expected subscript expression', self.lineno) + arg = nodes.Tuple(args, "load", lineno=token.lineno) + return nodes.Getitem(node, arg, "load", lineno=token.lineno) + self.fail("expected subscript expression", token.lineno) def parse_subscribed(self): lineno = self.stream.current.lineno - if self.stream.current.type == 'colon': + if self.stream.current.type == "colon": next(self.stream) args = [None] else: node = self.parse_expression() - if self.stream.current.type != 'colon': + if self.stream.current.type != "colon": return node next(self.stream) args = [node] - if self.stream.current.type == 'colon': + if self.stream.current.type == "colon": args.append(None) - elif self.stream.current.type not in ('rbracket', 'comma'): + elif self.stream.current.type not in ("rbracket", "comma"): args.append(self.parse_expression()) else: args.append(None) - if self.stream.current.type == 'colon': + if self.stream.current.type == "colon": next(self.stream) - if self.stream.current.type not in ('rbracket', 'comma'): + if self.stream.current.type not in ("rbracket", "comma"): args.append(self.parse_expression()) else: args.append(None) @@ -751,7 +778,7 @@ def parse_subscribed(self): return nodes.Slice(lineno=lineno, *args) def parse_call(self, node): - token = self.stream.expect('lparen') + token = self.stream.expect("lparen") args = [] kwargs = [] dyn_args = dyn_kwargs = None @@ -759,91 +786,100 @@ def parse_call(self, node): def ensure(expr): if not expr: - self.fail('invalid syntax for function call expression', - token.lineno) + self.fail("invalid syntax for function call expression", token.lineno) - while self.stream.current.type != 'rparen': + while self.stream.current.type != "rparen": if require_comma: - self.stream.expect('comma') + self.stream.expect("comma") # support for trailing comma - if self.stream.current.type == 'rparen': + if self.stream.current.type == "rparen": break - if self.stream.current.type == 'mul': + if self.stream.current.type == "mul": ensure(dyn_args is None and dyn_kwargs is None) next(self.stream) dyn_args = self.parse_expression() - elif self.stream.current.type == 'pow': + elif self.stream.current.type == "pow": ensure(dyn_kwargs is None) next(self.stream) dyn_kwargs = self.parse_expression() else: - ensure(dyn_args is None and dyn_kwargs is None) - if self.stream.current.type == 'name' and \ - self.stream.look().type == 'assign': + if ( + self.stream.current.type == "name" + and self.stream.look().type == "assign" + ): + # Parsing a kwarg + ensure(dyn_kwargs is None) key = self.stream.current.value self.stream.skip(2) value = self.parse_expression() - kwargs.append(nodes.Keyword(key, value, - lineno=value.lineno)) + kwargs.append(nodes.Keyword(key, value, lineno=value.lineno)) else: - ensure(not kwargs) + # Parsing an arg + ensure(dyn_args is None and dyn_kwargs is None and not kwargs) args.append(self.parse_expression()) require_comma = True - self.stream.expect('rparen') + self.stream.expect("rparen") if node is None: return args, kwargs, dyn_args, dyn_kwargs - return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs, - lineno=token.lineno) + return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno) def parse_filter(self, node, start_inline=False): - while self.stream.current.type == 'pipe' or start_inline: + while self.stream.current.type == "pipe" or start_inline: if not start_inline: next(self.stream) - token = self.stream.expect('name') + token = self.stream.expect("name") name = token.value - while self.stream.current.type == 'dot': + while self.stream.current.type == "dot": next(self.stream) - name += '.' + self.stream.expect('name').value - if self.stream.current.type == 'lparen': + name += "." + self.stream.expect("name").value + if self.stream.current.type == "lparen": args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None) else: args = [] kwargs = [] dyn_args = dyn_kwargs = None - node = nodes.Filter(node, name, args, kwargs, dyn_args, - dyn_kwargs, lineno=token.lineno) + node = nodes.Filter( + node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno + ) start_inline = False return node def parse_test(self, node): token = next(self.stream) - if self.stream.current.test('name:not'): + if self.stream.current.test("name:not"): next(self.stream) negated = True else: negated = False - name = self.stream.expect('name').value - while self.stream.current.type == 'dot': + name = self.stream.expect("name").value + while self.stream.current.type == "dot": next(self.stream) - name += '.' + self.stream.expect('name').value + name += "." + self.stream.expect("name").value dyn_args = dyn_kwargs = None kwargs = [] - if self.stream.current.type == 'lparen': + if self.stream.current.type == "lparen": args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None) - elif (self.stream.current.type in ('name', 'string', 'integer', - 'float', 'lparen', 'lbracket', - 'lbrace') and not - self.stream.current.test_any('name:else', 'name:or', - 'name:and')): - if self.stream.current.test('name:is'): - self.fail('You cannot chain multiple tests with is') - args = [self.parse_primary()] + elif self.stream.current.type in ( + "name", + "string", + "integer", + "float", + "lparen", + "lbracket", + "lbrace", + ) and not self.stream.current.test_any("name:else", "name:or", "name:and"): + if self.stream.current.test("name:is"): + self.fail("You cannot chain multiple tests with is") + arg_node = self.parse_primary() + arg_node = self.parse_postfix(arg_node) + args = [arg_node] else: args = [] - node = nodes.Test(node, name, args, kwargs, dyn_args, - dyn_kwargs, lineno=token.lineno) + node = nodes.Test( + node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno + ) if negated: node = nodes.Not(node, lineno=token.lineno) return node @@ -865,29 +901,29 @@ def flush_data(): try: while self.stream: token = self.stream.current - if token.type == 'data': + if token.type == "data": if token.value: - add_data(nodes.TemplateData(token.value, - lineno=token.lineno)) + add_data(nodes.TemplateData(token.value, lineno=token.lineno)) next(self.stream) - elif token.type == 'variable_begin': + elif token.type == "variable_begin": next(self.stream) add_data(self.parse_tuple(with_condexpr=True)) - self.stream.expect('variable_end') - elif token.type == 'block_begin': + self.stream.expect("variable_end") + elif token.type == "block_begin": flush_data() next(self.stream) - if end_tokens is not None and \ - self.stream.current.test_any(*end_tokens): + if end_tokens is not None and self.stream.current.test_any( + *end_tokens + ): return body rv = self.parse_statement() if isinstance(rv, list): body.extend(rv) else: body.append(rv) - self.stream.expect('block_end') + self.stream.expect("block_end") else: - raise AssertionError('internal parsing error') + raise AssertionError("internal parsing error") flush_data() finally: diff --git a/pipenv/vendor/jinja2/runtime.py b/pipenv/vendor/jinja2/runtime.py index 5e313369ed..527d4b5e4b 100644 --- a/pipenv/vendor/jinja2/runtime.py +++ b/pipenv/vendor/jinja2/runtime.py @@ -1,43 +1,62 @@ # -*- coding: utf-8 -*- -""" - jinja2.runtime - ~~~~~~~~~~~~~~ - - Runtime helpers. - - :copyright: (c) 2017 by the Jinja Team. - :license: BSD. -""" +"""The runtime functions and state used by compiled templates.""" import sys - from itertools import chain from types import MethodType -from jinja2.nodes import EvalContext, _context_function_types -from jinja2.utils import Markup, soft_unicode, escape, missing, concat, \ - internalcode, object_type_repr, evalcontextfunction, Namespace -from jinja2.exceptions import UndefinedError, TemplateRuntimeError, \ - TemplateNotFound -from jinja2._compat import imap, text_type, iteritems, \ - implements_iterator, implements_to_string, string_types, PY2, \ - with_metaclass, abc - +from markupsafe import escape # noqa: F401 +from markupsafe import Markup +from markupsafe import soft_unicode + +from ._compat import abc +from ._compat import imap +from ._compat import implements_iterator +from ._compat import implements_to_string +from ._compat import iteritems +from ._compat import PY2 +from ._compat import string_types +from ._compat import text_type +from ._compat import with_metaclass +from .exceptions import TemplateNotFound # noqa: F401 +from .exceptions import TemplateRuntimeError # noqa: F401 +from .exceptions import UndefinedError +from .nodes import EvalContext +from .utils import concat +from .utils import evalcontextfunction +from .utils import internalcode +from .utils import missing +from .utils import Namespace # noqa: F401 +from .utils import object_type_repr # these variables are exported to the template runtime -__all__ = ['LoopContext', 'TemplateReference', 'Macro', 'Markup', - 'TemplateRuntimeError', 'missing', 'concat', 'escape', - 'markup_join', 'unicode_join', 'to_string', 'identity', - 'TemplateNotFound', 'Namespace'] +exported = [ + "LoopContext", + "TemplateReference", + "Macro", + "Markup", + "TemplateRuntimeError", + "missing", + "concat", + "escape", + "markup_join", + "unicode_join", + "to_string", + "identity", + "TemplateNotFound", + "Namespace", + "Undefined", +] #: the name of the function that is used to convert something into #: a string. We can just use the text type here. to_string = text_type -#: the identity function. Useful for certain things in the environment -identity = lambda x: x -_first_iteration = object() -_last_iteration = object() +def identity(x): + """Returns its argument. Useful for certain things in the + environment. + """ + return x def markup_join(seq): @@ -46,8 +65,8 @@ def markup_join(seq): iterator = imap(soft_unicode, seq) for arg in iterator: buf.append(arg) - if hasattr(arg, '__html__'): - return Markup(u'').join(chain(buf, iterator)) + if hasattr(arg, "__html__"): + return Markup(u"").join(chain(buf, iterator)) return concat(buf) @@ -56,9 +75,16 @@ def unicode_join(seq): return concat(imap(text_type, seq)) -def new_context(environment, template_name, blocks, vars=None, - shared=None, globals=None, locals=None): - """Internal helper to for context creation.""" +def new_context( + environment, + template_name, + blocks, + vars=None, + shared=None, + globals=None, + locals=None, +): + """Internal helper for context creation.""" if vars is None: vars = {} if shared: @@ -73,8 +99,7 @@ def new_context(environment, template_name, blocks, vars=None, for key, value in iteritems(locals): if value is not missing: parent[key] = value - return environment.context_class(environment, parent, template_name, - blocks) + return environment.context_class(environment, parent, template_name, blocks) class TemplateReference(object): @@ -88,20 +113,16 @@ def __getitem__(self, name): return BlockReference(name, self.__context, blocks, 0) def __repr__(self): - return '<%s %r>' % ( - self.__class__.__name__, - self.__context.name - ) + return "<%s %r>" % (self.__class__.__name__, self.__context.name) def _get_func(x): - return getattr(x, '__func__', x) + return getattr(x, "__func__", x) class ContextMeta(type): - - def __new__(cls, name, bases, d): - rv = type.__new__(cls, name, bases, d) + def __new__(mcs, name, bases, d): + rv = type.__new__(mcs, name, bases, d) if bases == (): return rv @@ -112,11 +133,15 @@ def __new__(cls, name, bases, d): # If we have a changed resolve but no changed default or missing # resolve we invert the call logic. - if resolve is not default_resolve and \ - resolve_or_missing is default_resolve_or_missing: + if ( + resolve is not default_resolve + and resolve_or_missing is default_resolve_or_missing + ): rv._legacy_resolve_mode = True - elif resolve is default_resolve and \ - resolve_or_missing is default_resolve_or_missing: + elif ( + resolve is default_resolve + and resolve_or_missing is default_resolve_or_missing + ): rv._fast_resolve_mode = True return rv @@ -149,6 +174,7 @@ class Context(with_metaclass(ContextMeta)): method that doesn't fail with a `KeyError` but returns an :class:`Undefined` object for missing variables. """ + # XXX: we want to eventually make this be a deprecation warning and # remove it. _legacy_resolve_mode = False @@ -179,9 +205,9 @@ def super(self, name, current): index = blocks.index(current) + 1 blocks[index] except LookupError: - return self.environment.undefined('there is no parent block ' - 'called %r.' % name, - name='super') + return self.environment.undefined( + "there is no parent block called %r." % name, name="super" + ) return BlockReference(name, self, blocks, index) def get(self, key, default=None): @@ -232,7 +258,7 @@ def get_all(self): return dict(self.parent, **self.vars) @internalcode - def call(__self, __obj, *args, **kwargs): + def call(__self, __obj, *args, **kwargs): # noqa: B902 """Call the callable with the arguments and keyword arguments provided but inject the active context or environment as first argument if the callable is a :func:`contextfunction` or @@ -242,55 +268,62 @@ def call(__self, __obj, *args, **kwargs): __traceback_hide__ = True # noqa # Allow callable classes to take a context - if hasattr(__obj, '__call__'): + if hasattr(__obj, "__call__"): # noqa: B004 fn = __obj.__call__ - for fn_type in ('contextfunction', - 'evalcontextfunction', - 'environmentfunction'): + for fn_type in ( + "contextfunction", + "evalcontextfunction", + "environmentfunction", + ): if hasattr(fn, fn_type): __obj = fn break - if isinstance(__obj, _context_function_types): - if getattr(__obj, 'contextfunction', 0): + if callable(__obj): + if getattr(__obj, "contextfunction", 0): args = (__self,) + args - elif getattr(__obj, 'evalcontextfunction', 0): + elif getattr(__obj, "evalcontextfunction", 0): args = (__self.eval_ctx,) + args - elif getattr(__obj, 'environmentfunction', 0): + elif getattr(__obj, "environmentfunction", 0): args = (__self.environment,) + args try: return __obj(*args, **kwargs) except StopIteration: - return __self.environment.undefined('value was undefined because ' - 'a callable raised a ' - 'StopIteration exception') + return __self.environment.undefined( + "value was undefined because " + "a callable raised a " + "StopIteration exception" + ) def derived(self, locals=None): """Internal helper function to create a derived context. This is used in situations where the system needs a new context in the same template that is independent. """ - context = new_context(self.environment, self.name, {}, - self.get_all(), True, None, locals) + context = new_context( + self.environment, self.name, {}, self.get_all(), True, None, locals + ) context.eval_ctx = self.eval_ctx context.blocks.update((k, list(v)) for k, v in iteritems(self.blocks)) return context - def _all(meth): - proxy = lambda self: getattr(self.get_all(), meth)() + def _all(meth): # noqa: B902 + def proxy(self): + return getattr(self.get_all(), meth)() + proxy.__doc__ = getattr(dict, meth).__doc__ proxy.__name__ = meth return proxy - keys = _all('keys') - values = _all('values') - items = _all('items') + keys = _all("keys") + values = _all("values") + items = _all("items") # not available on python 3 if PY2: - iterkeys = _all('iterkeys') - itervalues = _all('itervalues') - iteritems = _all('iteritems') + iterkeys = _all("iterkeys") + itervalues = _all("itervalues") + iteritems = _all("iteritems") del _all def __contains__(self, name): @@ -306,10 +339,10 @@ def __getitem__(self, key): return item def __repr__(self): - return '<%s %s of %r>' % ( + return "<%s %s of %r>" % ( self.__class__.__name__, repr(self.get_all()), - self.name + self.name, ) @@ -329,11 +362,10 @@ def __init__(self, name, context, stack, depth): def super(self): """Super the block.""" if self._depth + 1 >= len(self._stack): - return self._context.environment. \ - undefined('there is no parent block called %r.' % - self.name, name='super') - return BlockReference(self.name, self._context, self._stack, - self._depth + 1) + return self._context.environment.undefined( + "there is no parent block called %r." % self.name, name="super" + ) + return BlockReference(self.name, self._context, self._stack, self._depth + 1) @internalcode def __call__(self): @@ -343,143 +375,212 @@ def __call__(self): return rv -class LoopContextBase(object): - """A loop context for dynamic iteration.""" +@implements_iterator +class LoopContext: + """A wrapper iterable for dynamic ``for`` loops, with information + about the loop and iteration. + """ + + #: Current iteration of the loop, starting at 0. + index0 = -1 - _before = _first_iteration - _current = _first_iteration - _after = _last_iteration _length = None + _after = missing + _current = missing + _before = missing + _last_changed_value = missing - def __init__(self, undefined, recurse=None, depth0=0): + def __init__(self, iterable, undefined, recurse=None, depth0=0): + """ + :param iterable: Iterable to wrap. + :param undefined: :class:`Undefined` class to use for next and + previous items. + :param recurse: The function to render the loop body when the + loop is marked recursive. + :param depth0: Incremented when looping recursively. + """ + self._iterable = iterable + self._iterator = self._to_iterator(iterable) self._undefined = undefined self._recurse = recurse - self.index0 = -1 + #: How many levels deep a recursive loop currently is, starting at 0. self.depth0 = depth0 - self._last_checked_value = missing - def cycle(self, *args): - """Cycles among the arguments with the current loop index.""" - if not args: - raise TypeError('no items for cycling given') - return args[self.index0 % len(args)] + @staticmethod + def _to_iterator(iterable): + return iter(iterable) - def changed(self, *value): - """Checks whether the value has changed since the last call.""" - if self._last_checked_value != value: - self._last_checked_value = value - return True - return False + @property + def length(self): + """Length of the iterable. - first = property(lambda x: x.index0 == 0) - last = property(lambda x: x._after is _last_iteration) - index = property(lambda x: x.index0 + 1) - revindex = property(lambda x: x.length - x.index0) - revindex0 = property(lambda x: x.length - x.index) - depth = property(lambda x: x.depth0 + 1) + If the iterable is a generator or otherwise does not have a + size, it is eagerly evaluated to get a size. + """ + if self._length is not None: + return self._length - @property - def previtem(self): - if self._before is _first_iteration: - return self._undefined('there is no previous item') - return self._before + try: + self._length = len(self._iterable) + except TypeError: + iterable = list(self._iterator) + self._iterator = self._to_iterator(iterable) + self._length = len(iterable) + self.index + (self._after is not missing) - @property - def nextitem(self): - if self._after is _last_iteration: - return self._undefined('there is no next item') - return self._after + return self._length def __len__(self): return self.length - @internalcode - def loop(self, iterable): - if self._recurse is None: - raise TypeError('Tried to call non recursive loop. Maybe you ' - "forgot the 'recursive' modifier.") - return self._recurse(iterable, self._recurse, self.depth0 + 1) + @property + def depth(self): + """How many levels deep a recursive loop currently is, starting at 1.""" + return self.depth0 + 1 - # a nifty trick to enhance the error message if someone tried to call - # the the loop without or with too many arguments. - __call__ = loop - del loop + @property + def index(self): + """Current iteration of the loop, starting at 1.""" + return self.index0 + 1 - def __repr__(self): - return '<%s %r/%r>' % ( - self.__class__.__name__, - self.index, - self.length - ) + @property + def revindex0(self): + """Number of iterations from the end of the loop, ending at 0. + Requires calculating :attr:`length`. + """ + return self.length - self.index -class LoopContext(LoopContextBase): + @property + def revindex(self): + """Number of iterations from the end of the loop, ending at 1. - def __init__(self, iterable, undefined, recurse=None, depth0=0): - LoopContextBase.__init__(self, undefined, recurse, depth0) - self._iterator = iter(iterable) + Requires calculating :attr:`length`. + """ + return self.length - self.index0 - # try to get the length of the iterable early. This must be done - # here because there are some broken iterators around where there - # __len__ is the number of iterations left (i'm looking at your - # listreverseiterator!). - try: - self._length = len(iterable) - except (TypeError, AttributeError): - self._length = None - self._after = self._safe_next() + @property + def first(self): + """Whether this is the first iteration of the loop.""" + return self.index0 == 0 + + def _peek_next(self): + """Return the next element in the iterable, or :data:`missing` + if the iterable is exhausted. Only peeks one item ahead, caching + the result in :attr:`_last` for use in subsequent checks. The + cache is reset when :meth:`__next__` is called. + """ + if self._after is not missing: + return self._after + + self._after = next(self._iterator, missing) + return self._after @property - def length(self): - if self._length is None: - # if was not possible to get the length of the iterator when - # the loop context was created (ie: iterating over a generator) - # we have to convert the iterable into a sequence and use the - # length of that + the number of iterations so far. - iterable = tuple(self._iterator) - self._iterator = iter(iterable) - iterations_done = self.index0 + 2 - self._length = len(iterable) + iterations_done - return self._length + def last(self): + """Whether this is the last iteration of the loop. - def __iter__(self): - return LoopContextIterator(self) + Causes the iterable to advance early. See + :func:`itertools.groupby` for issues this can cause. + The :func:`groupby` filter avoids that issue. + """ + return self._peek_next() is missing - def _safe_next(self): - try: - return next(self._iterator) - except StopIteration: - return _last_iteration + @property + def previtem(self): + """The item in the previous iteration. Undefined during the + first iteration. + """ + if self.first: + return self._undefined("there is no previous item") + return self._before -@implements_iterator -class LoopContextIterator(object): - """The iterator for a loop context.""" - __slots__ = ('context',) + @property + def nextitem(self): + """The item in the next iteration. Undefined during the last + iteration. - def __init__(self, context): - self.context = context + Causes the iterable to advance early. See + :func:`itertools.groupby` for issues this can cause. + The :func:`groupby` filter avoids that issue. + """ + rv = self._peek_next() + + if rv is missing: + return self._undefined("there is no next item") + + return rv + + def cycle(self, *args): + """Return a value from the given args, cycling through based on + the current :attr:`index0`. + + :param args: One or more values to cycle through. + """ + if not args: + raise TypeError("no items for cycling given") + + return args[self.index0 % len(args)] + + def changed(self, *value): + """Return ``True`` if previously called with a different value + (including when called for the first time). + + :param value: One or more values to compare to the last call. + """ + if self._last_changed_value != value: + self._last_changed_value = value + return True + + return False def __iter__(self): return self def __next__(self): - ctx = self.context - ctx.index0 += 1 - if ctx._after is _last_iteration: - raise StopIteration() - ctx._before = ctx._current - ctx._current = ctx._after - ctx._after = ctx._safe_next() - return ctx._current, ctx + if self._after is not missing: + rv = self._after + self._after = missing + else: + rv = next(self._iterator) + + self.index0 += 1 + self._before = self._current + self._current = rv + return rv, self + + @internalcode + def __call__(self, iterable): + """When iterating over nested data, render the body of the loop + recursively with the given inner iterable data. + + The loop must have the ``recursive`` marker for this to work. + """ + if self._recurse is None: + raise TypeError( + "The loop must have the 'recursive' marker to be called recursively." + ) + + return self._recurse(iterable, self._recurse, depth=self.depth) + + def __repr__(self): + return "<%s %d/%d>" % (self.__class__.__name__, self.index, self.length) class Macro(object): """Wraps a macro function.""" - def __init__(self, environment, func, name, arguments, - catch_kwargs, catch_varargs, caller, - default_autoescape=None): + def __init__( + self, + environment, + func, + name, + arguments, + catch_kwargs, + catch_varargs, + caller, + default_autoescape=None, + ): self._environment = environment self._func = func self._argument_count = len(arguments) @@ -488,7 +589,7 @@ def __init__(self, environment, func, name, arguments, self.catch_kwargs = catch_kwargs self.catch_varargs = catch_varargs self.caller = caller - self.explicit_caller = 'caller' in arguments + self.explicit_caller = "caller" in arguments if default_autoescape is None: default_autoescape = environment.autoescape self._default_autoescape = default_autoescape @@ -500,9 +601,8 @@ def __call__(self, *args, **kwargs): # decide largely based on compile-time information if a macro is # safe or unsafe. While there was a volatile mode it was largely # unused for deciding on escaping. This turns out to be - # problemtic for macros because if a macro is safe or not not so - # much depends on the escape mode when it was defined but when it - # was used. + # problematic for macros because whether a macro is safe depends not + # on the escape mode when it was defined, but rather when it was used. # # Because however we export macros from the module system and # there are historic callers that do not pass an eval context (and @@ -510,7 +610,7 @@ def __call__(self, *args, **kwargs): # check here. # # This is considered safe because an eval context is not a valid - # argument to callables otherwise anwyays. Worst case here is + # argument to callables otherwise anyway. Worst case here is # that if no eval context is passed we fall back to the compile # time autoescape flag. if args and isinstance(args[0], EvalContext): @@ -520,7 +620,7 @@ def __call__(self, *args, **kwargs): autoescape = self._default_autoescape # try to consume the positional arguments - arguments = list(args[:self._argument_count]) + arguments = list(args[: self._argument_count]) off = len(arguments) # For information why this is necessary refer to the handling @@ -531,12 +631,12 @@ def __call__(self, *args, **kwargs): # arguments expected we start filling in keyword arguments # and defaults. if off != self._argument_count: - for idx, name in enumerate(self.arguments[len(arguments):]): + for name in self.arguments[len(arguments) :]: try: value = kwargs.pop(name) except KeyError: value = missing - if name == 'caller': + if name == "caller": found_caller = True arguments.append(value) else: @@ -546,26 +646,31 @@ def __call__(self, *args, **kwargs): # if not also changed in the compiler's `function_scoping` method. # the order is caller, keyword arguments, positional arguments! if self.caller and not found_caller: - caller = kwargs.pop('caller', None) + caller = kwargs.pop("caller", None) if caller is None: - caller = self._environment.undefined('No caller defined', - name='caller') + caller = self._environment.undefined("No caller defined", name="caller") arguments.append(caller) if self.catch_kwargs: arguments.append(kwargs) elif kwargs: - if 'caller' in kwargs: - raise TypeError('macro %r was invoked with two values for ' - 'the special caller argument. This is ' - 'most likely a bug.' % self.name) - raise TypeError('macro %r takes no keyword argument %r' % - (self.name, next(iter(kwargs)))) + if "caller" in kwargs: + raise TypeError( + "macro %r was invoked with two values for " + "the special caller argument. This is " + "most likely a bug." % self.name + ) + raise TypeError( + "macro %r takes no keyword argument %r" + % (self.name, next(iter(kwargs))) + ) if self.catch_varargs: - arguments.append(args[self._argument_count:]) + arguments.append(args[self._argument_count :]) elif len(args) > self._argument_count: - raise TypeError('macro %r takes not more than %d argument(s)' % - (self.name, len(self.arguments))) + raise TypeError( + "macro %r takes not more than %d argument(s)" + % (self.name, len(self.arguments)) + ) return self._invoke(arguments, autoescape) @@ -577,16 +682,16 @@ def _invoke(self, arguments, autoescape): return rv def __repr__(self): - return '<%s %s>' % ( + return "<%s %s>" % ( self.__class__.__name__, - self.name is None and 'anonymous' or repr(self.name) + self.name is None and "anonymous" or repr(self.name), ) @implements_to_string class Undefined(object): """The default undefined type. This undefined type can be printed and - iterated over, but every other access will raise an :exc:`jinja2.exceptions.UndefinedError`: + iterated over, but every other access will raise an :exc:`UndefinedError`: >>> foo = Undefined(name='foo') >>> str(foo) @@ -598,8 +703,13 @@ class Undefined(object): ... jinja2.exceptions.UndefinedError: 'foo' is undefined """ - __slots__ = ('_undefined_hint', '_undefined_obj', '_undefined_name', - '_undefined_exception') + + __slots__ = ( + "_undefined_hint", + "_undefined_obj", + "_undefined_name", + "_undefined_exception", + ) def __init__(self, hint=None, obj=missing, name=None, exc=UndefinedError): self._undefined_hint = hint @@ -607,40 +717,86 @@ def __init__(self, hint=None, obj=missing, name=None, exc=UndefinedError): self._undefined_name = name self._undefined_exception = exc + @property + def _undefined_message(self): + """Build a message about the undefined value based on how it was + accessed. + """ + if self._undefined_hint: + return self._undefined_hint + + if self._undefined_obj is missing: + return "%r is undefined" % self._undefined_name + + if not isinstance(self._undefined_name, string_types): + return "%s has no element %r" % ( + object_type_repr(self._undefined_obj), + self._undefined_name, + ) + + return "%r has no attribute %r" % ( + object_type_repr(self._undefined_obj), + self._undefined_name, + ) + @internalcode def _fail_with_undefined_error(self, *args, **kwargs): - """Regular callback function for undefined objects that raises an - `jinja2.exceptions.UndefinedError` on call. + """Raise an :exc:`UndefinedError` when operations are performed + on the undefined value. """ - if self._undefined_hint is None: - if self._undefined_obj is missing: - hint = '%r is undefined' % self._undefined_name - elif not isinstance(self._undefined_name, string_types): - hint = '%s has no element %r' % ( - object_type_repr(self._undefined_obj), - self._undefined_name - ) - else: - hint = '%r has no attribute %r' % ( - object_type_repr(self._undefined_obj), - self._undefined_name - ) - else: - hint = self._undefined_hint - raise self._undefined_exception(hint) + raise self._undefined_exception(self._undefined_message) @internalcode def __getattr__(self, name): - if name[:2] == '__': + if name[:2] == "__": raise AttributeError(name) return self._fail_with_undefined_error() - __add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \ - __truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \ - __mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \ - __getitem__ = __lt__ = __le__ = __gt__ = __ge__ = __int__ = \ - __float__ = __complex__ = __pow__ = __rpow__ = __sub__ = \ - __rsub__ = _fail_with_undefined_error + __add__ = ( + __radd__ + ) = ( + __mul__ + ) = ( + __rmul__ + ) = ( + __div__ + ) = ( + __rdiv__ + ) = ( + __truediv__ + ) = ( + __rtruediv__ + ) = ( + __floordiv__ + ) = ( + __rfloordiv__ + ) = ( + __mod__ + ) = ( + __rmod__ + ) = ( + __pos__ + ) = ( + __neg__ + ) = ( + __call__ + ) = ( + __getitem__ + ) = ( + __lt__ + ) = ( + __le__ + ) = ( + __gt__ + ) = ( + __ge__ + ) = ( + __int__ + ) = ( + __float__ + ) = ( + __complex__ + ) = __pow__ = __rpow__ = __sub__ = __rsub__ = _fail_with_undefined_error def __eq__(self, other): return type(self) is type(other) @@ -652,7 +808,7 @@ def __hash__(self): return id(type(self)) def __str__(self): - return u'' + return u"" def __len__(self): return 0 @@ -663,10 +819,11 @@ def __iter__(self): def __nonzero__(self): return False + __bool__ = __nonzero__ def __repr__(self): - return 'Undefined' + return "Undefined" def make_logging_undefined(logger=None, base=None): @@ -691,6 +848,7 @@ def make_logging_undefined(logger=None, base=None): """ if logger is None: import logging + logger = logging.getLogger(__name__) logger.addHandler(logging.StreamHandler(sys.stderr)) if base is None: @@ -699,26 +857,27 @@ def make_logging_undefined(logger=None, base=None): def _log_message(undef): if undef._undefined_hint is None: if undef._undefined_obj is missing: - hint = '%s is undefined' % undef._undefined_name + hint = "%s is undefined" % undef._undefined_name elif not isinstance(undef._undefined_name, string_types): - hint = '%s has no element %s' % ( + hint = "%s has no element %s" % ( object_type_repr(undef._undefined_obj), - undef._undefined_name) + undef._undefined_name, + ) else: - hint = '%s has no attribute %s' % ( + hint = "%s has no attribute %s" % ( object_type_repr(undef._undefined_obj), - undef._undefined_name) + undef._undefined_name, + ) else: hint = undef._undefined_hint - logger.warning('Template variable warning: %s', hint) + logger.warning("Template variable warning: %s", hint) class LoggingUndefined(base): - def _fail_with_undefined_error(self, *args, **kwargs): try: return base._fail_with_undefined_error(self, *args, **kwargs) except self._undefined_exception as e: - logger.error('Template variable error: %s', str(e)) + logger.error("Template variable error: %s", str(e)) raise e def __str__(self): @@ -732,6 +891,7 @@ def __iter__(self): return rv if PY2: + def __nonzero__(self): rv = base.__nonzero__(self) _log_message(self) @@ -741,7 +901,9 @@ def __unicode__(self): rv = base.__unicode__(self) _log_message(self) return rv + else: + def __bool__(self): rv = base.__bool__(self) _log_message(self) @@ -750,6 +912,36 @@ def __bool__(self): return LoggingUndefined +# No @implements_to_string decorator here because __str__ +# is not overwritten from Undefined in this class. +# This would cause a recursion error in Python 2. +class ChainableUndefined(Undefined): + """An undefined that is chainable, where both ``__getattr__`` and + ``__getitem__`` return itself rather than raising an + :exc:`UndefinedError`. + + >>> foo = ChainableUndefined(name='foo') + >>> str(foo.bar['baz']) + '' + >>> foo.bar['baz'] + 42 + Traceback (most recent call last): + ... + jinja2.exceptions.UndefinedError: 'foo' is undefined + + .. versionadded:: 2.11.0 + """ + + __slots__ = () + + def __html__(self): + return self.__str__() + + def __getattr__(self, _): + return self + + __getitem__ = __getattr__ + + @implements_to_string class DebugUndefined(Undefined): """An undefined that returns the debug info when printed. @@ -764,17 +956,18 @@ class DebugUndefined(Undefined): ... jinja2.exceptions.UndefinedError: 'foo' is undefined """ + __slots__ = () def __str__(self): if self._undefined_hint is None: if self._undefined_obj is missing: - return u'{{ %s }}' % self._undefined_name - return '{{ no such element: %s[%r] }}' % ( + return u"{{ %s }}" % self._undefined_name + return "{{ no such element: %s[%r] }}" % ( object_type_repr(self._undefined_obj), - self._undefined_name + self._undefined_name, ) - return u'{{ undefined value printed: %s }}' % self._undefined_hint + return u"{{ undefined value printed: %s }}" % self._undefined_hint @implements_to_string @@ -797,12 +990,22 @@ class StrictUndefined(Undefined): ... jinja2.exceptions.UndefinedError: 'foo' is undefined """ + __slots__ = () - __iter__ = __str__ = __len__ = __nonzero__ = __eq__ = \ - __ne__ = __bool__ = __hash__ = \ - Undefined._fail_with_undefined_error + __iter__ = ( + __str__ + ) = ( + __len__ + ) = ( + __nonzero__ + ) = __eq__ = __ne__ = __bool__ = __hash__ = Undefined._fail_with_undefined_error # remove remaining slots attributes, after the metaclass did the magic they # are unneeded and irritating as they contain wrong data for the subclasses. -del Undefined.__slots__, DebugUndefined.__slots__, StrictUndefined.__slots__ +del ( + Undefined.__slots__, + ChainableUndefined.__slots__, + DebugUndefined.__slots__, + StrictUndefined.__slots__, +) diff --git a/pipenv/vendor/jinja2/sandbox.py b/pipenv/vendor/jinja2/sandbox.py index 08c22f4f13..cfd7993aee 100644 --- a/pipenv/vendor/jinja2/sandbox.py +++ b/pipenv/vendor/jinja2/sandbox.py @@ -1,70 +1,66 @@ # -*- coding: utf-8 -*- +"""A sandbox layer that ensures unsafe operations cannot be performed. +Useful when the template itself comes from an untrusted source. """ - jinja2.sandbox - ~~~~~~~~~~~~~~ - - Adds a sandbox layer to Jinja as it was the default behavior in the old - Jinja 1 releases. This sandbox is slightly different from Jinja 1 as the - default behavior is easier to use. - - The behavior can be changed by subclassing the environment. - - :copyright: (c) 2017 by the Jinja Team. - :license: BSD. -""" -import types import operator -from jinja2.environment import Environment -from jinja2.exceptions import SecurityError -from jinja2._compat import string_types, PY2, abc, range_type -from jinja2.utils import Markup +import types +import warnings +from collections import deque +from string import Formatter from markupsafe import EscapeFormatter -from string import Formatter +from markupsafe import Markup +from ._compat import abc +from ._compat import PY2 +from ._compat import range_type +from ._compat import string_types +from .environment import Environment +from .exceptions import SecurityError #: maximum number of items a range may produce MAX_RANGE = 100000 #: attributes of function objects that are considered unsafe. if PY2: - UNSAFE_FUNCTION_ATTRIBUTES = set(['func_closure', 'func_code', 'func_dict', - 'func_defaults', 'func_globals']) + UNSAFE_FUNCTION_ATTRIBUTES = { + "func_closure", + "func_code", + "func_dict", + "func_defaults", + "func_globals", + } else: # On versions > python 2 the special attributes on functions are gone, # but they remain on methods and generators for whatever reason. UNSAFE_FUNCTION_ATTRIBUTES = set() - #: unsafe method attributes. function attributes are unsafe for methods too -UNSAFE_METHOD_ATTRIBUTES = set(['im_class', 'im_func', 'im_self']) +UNSAFE_METHOD_ATTRIBUTES = {"im_class", "im_func", "im_self"} -#: unsafe generator attirbutes. -UNSAFE_GENERATOR_ATTRIBUTES = set(['gi_frame', 'gi_code']) +#: unsafe generator attributes. +UNSAFE_GENERATOR_ATTRIBUTES = {"gi_frame", "gi_code"} #: unsafe attributes on coroutines -UNSAFE_COROUTINE_ATTRIBUTES = set(['cr_frame', 'cr_code']) +UNSAFE_COROUTINE_ATTRIBUTES = {"cr_frame", "cr_code"} #: unsafe attributes on async generators -UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = set(['ag_code', 'ag_frame']) - -import warnings +UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = {"ag_code", "ag_frame"} # make sure we don't warn in python 2.6 about stuff we don't care about -warnings.filterwarnings('ignore', 'the sets module', DeprecationWarning, - module='jinja2.sandbox') - -from collections import deque +warnings.filterwarnings( + "ignore", "the sets module", DeprecationWarning, module=__name__ +) _mutable_set_types = (set,) _mutable_mapping_types = (dict,) _mutable_sequence_types = (list,) - # on python 2.x we can register the user collection types try: from UserDict import UserDict, DictMixin from UserList import UserList + _mutable_mapping_types += (UserDict, DictMixin) _mutable_set_types += (UserList,) except ImportError: @@ -73,6 +69,7 @@ # if sets is still available, register the mutable set from there as well try: from sets import Set + _mutable_set_types += (Set,) except ImportError: pass @@ -82,22 +79,46 @@ _mutable_mapping_types += (abc.MutableMapping,) _mutable_sequence_types += (abc.MutableSequence,) - _mutable_spec = ( - (_mutable_set_types, frozenset([ - 'add', 'clear', 'difference_update', 'discard', 'pop', 'remove', - 'symmetric_difference_update', 'update' - ])), - (_mutable_mapping_types, frozenset([ - 'clear', 'pop', 'popitem', 'setdefault', 'update' - ])), - (_mutable_sequence_types, frozenset([ - 'append', 'reverse', 'insert', 'sort', 'extend', 'remove' - ])), - (deque, frozenset([ - 'append', 'appendleft', 'clear', 'extend', 'extendleft', 'pop', - 'popleft', 'remove', 'rotate' - ])) + ( + _mutable_set_types, + frozenset( + [ + "add", + "clear", + "difference_update", + "discard", + "pop", + "remove", + "symmetric_difference_update", + "update", + ] + ), + ), + ( + _mutable_mapping_types, + frozenset(["clear", "pop", "popitem", "setdefault", "update"]), + ), + ( + _mutable_sequence_types, + frozenset(["append", "reverse", "insert", "sort", "extend", "remove"]), + ), + ( + deque, + frozenset( + [ + "append", + "appendleft", + "clear", + "extend", + "extendleft", + "pop", + "popleft", + "remove", + "rotate", + ] + ), + ), ) @@ -115,7 +136,7 @@ def __init__(self, args, kwargs): self._last_index = 0 def __getitem__(self, key): - if key == '': + if key == "": idx = self._last_index self._last_index += 1 try: @@ -133,9 +154,9 @@ def __len__(self): def inspect_format_method(callable): - if not isinstance(callable, (types.MethodType, - types.BuiltinMethodType)) or \ - callable.__name__ not in ('format', 'format_map'): + if not isinstance( + callable, (types.MethodType, types.BuiltinMethodType) + ) or callable.__name__ not in ("format", "format_map"): return None obj = callable.__self__ if isinstance(obj, string_types): @@ -186,24 +207,25 @@ def is_internal_attribute(obj, attr): if attr in UNSAFE_FUNCTION_ATTRIBUTES: return True elif isinstance(obj, types.MethodType): - if attr in UNSAFE_FUNCTION_ATTRIBUTES or \ - attr in UNSAFE_METHOD_ATTRIBUTES: + if attr in UNSAFE_FUNCTION_ATTRIBUTES or attr in UNSAFE_METHOD_ATTRIBUTES: return True elif isinstance(obj, type): - if attr == 'mro': + if attr == "mro": return True elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)): return True elif isinstance(obj, types.GeneratorType): if attr in UNSAFE_GENERATOR_ATTRIBUTES: return True - elif hasattr(types, 'CoroutineType') and isinstance(obj, types.CoroutineType): + elif hasattr(types, "CoroutineType") and isinstance(obj, types.CoroutineType): if attr in UNSAFE_COROUTINE_ATTRIBUTES: return True - elif hasattr(types, 'AsyncGeneratorType') and isinstance(obj, types.AsyncGeneratorType): + elif hasattr(types, "AsyncGeneratorType") and isinstance( + obj, types.AsyncGeneratorType + ): if attr in UNSAFE_ASYNC_GENERATOR_ATTRIBUTES: return True - return attr.startswith('__') + return attr.startswith("__") def modifies_known_mutable(obj, attr): @@ -244,28 +266,26 @@ class SandboxedEnvironment(Environment): raised. However also other exceptions may occur during the rendering so the caller has to ensure that all exceptions are caught. """ + sandboxed = True #: default callback table for the binary operators. A copy of this is #: available on each instance of a sandboxed environment as #: :attr:`binop_table` default_binop_table = { - '+': operator.add, - '-': operator.sub, - '*': operator.mul, - '/': operator.truediv, - '//': operator.floordiv, - '**': operator.pow, - '%': operator.mod + "+": operator.add, + "-": operator.sub, + "*": operator.mul, + "/": operator.truediv, + "//": operator.floordiv, + "**": operator.pow, + "%": operator.mod, } #: default callback table for the unary operators. A copy of this is #: available on each instance of a sandboxed environment as #: :attr:`unop_table` - default_unop_table = { - '+': operator.pos, - '-': operator.neg - } + default_unop_table = {"+": operator.pos, "-": operator.neg} #: a set of binary operators that should be intercepted. Each operator #: that is added to this set (empty by default) is delegated to the @@ -301,7 +321,7 @@ class SandboxedEnvironment(Environment): def intercept_unop(self, operator): """Called during template compilation with the name of a unary operator to check if it should be intercepted at runtime. If this - method returns `True`, :meth:`call_unop` is excuted for this unary + method returns `True`, :meth:`call_unop` is executed for this unary operator. The default implementation of :meth:`call_unop` will use the :attr:`unop_table` dictionary to perform the operator with the same logic as the builtin one. @@ -315,10 +335,9 @@ def intercept_unop(self, operator): """ return False - def __init__(self, *args, **kwargs): Environment.__init__(self, *args, **kwargs) - self.globals['range'] = safe_range + self.globals["range"] = safe_range self.binop_table = self.default_binop_table.copy() self.unop_table = self.default_unop_table.copy() @@ -329,7 +348,7 @@ def is_safe_attribute(self, obj, attr, value): special attributes of internal python objects as returned by the :func:`is_internal_attribute` function. """ - return not (attr.startswith('_') or is_internal_attribute(obj, attr)) + return not (attr.startswith("_") or is_internal_attribute(obj, attr)) def is_safe_callable(self, obj): """Check if an object is safely callable. Per default a function is @@ -337,8 +356,9 @@ def is_safe_callable(self, obj): True. Override this method to alter the behavior, but this won't affect the `unsafe` decorator from this module. """ - return not (getattr(obj, 'unsafe_callable', False) or - getattr(obj, 'alters_data', False)) + return not ( + getattr(obj, "unsafe_callable", False) or getattr(obj, "alters_data", False) + ) def call_binop(self, context, operator, left, right): """For intercepted binary operator calls (:meth:`intercepted_binops`) @@ -398,11 +418,13 @@ def getattr(self, obj, attribute): def unsafe_undefined(self, obj, attribute): """Return an undefined object for unsafe attributes.""" - return self.undefined('access to attribute %r of %r ' - 'object is unsafe.' % ( - attribute, - obj.__class__.__name__ - ), name=attribute, obj=obj, exc=SecurityError) + return self.undefined( + "access to attribute %r of %r " + "object is unsafe." % (attribute, obj.__class__.__name__), + name=attribute, + obj=obj, + exc=SecurityError, + ) def format_string(self, s, args, kwargs, format_func=None): """If a format call is detected, then this is routed through this @@ -413,10 +435,10 @@ def format_string(self, s, args, kwargs, format_func=None): else: formatter = SandboxedFormatter(self) - if format_func is not None and format_func.__name__ == 'format_map': + if format_func is not None and format_func.__name__ == "format_map": if len(args) != 1 or kwargs: raise TypeError( - 'format_map() takes exactly one argument %d given' + "format_map() takes exactly one argument %d given" % (len(args) + (kwargs is not None)) ) @@ -427,7 +449,7 @@ def format_string(self, s, args, kwargs, format_func=None): rv = formatter.vformat(s, args, kwargs) return type(s)(rv) - def call(__self, __context, __obj, *args, **kwargs): + def call(__self, __context, __obj, *args, **kwargs): # noqa: B902 """Call an object from sandboxed code.""" fmt = inspect_format_method(__obj) if fmt is not None: @@ -436,7 +458,7 @@ def call(__self, __context, __obj, *args, **kwargs): # the double prefixes are to avoid double keyword argument # errors when proxying the call. if not __self.is_safe_callable(__obj): - raise SecurityError('%r is not safely callable' % (__obj,)) + raise SecurityError("%r is not safely callable" % (__obj,)) return __context.call(__obj, *args, **kwargs) @@ -452,16 +474,16 @@ def is_safe_attribute(self, obj, attr, value): return not modifies_known_mutable(obj, attr) -# This really is not a public API apparenlty. +# This really is not a public API apparently. try: from _string import formatter_field_name_split except ImportError: + def formatter_field_name_split(field_name): return field_name._formatter_field_name_split() class SandboxedFormatterMixin(object): - def __init__(self, env): self._env = env @@ -475,14 +497,14 @@ def get_field(self, field_name, args, kwargs): obj = self._env.getitem(obj, i) return obj, first -class SandboxedFormatter(SandboxedFormatterMixin, Formatter): +class SandboxedFormatter(SandboxedFormatterMixin, Formatter): def __init__(self, env): SandboxedFormatterMixin.__init__(self, env) Formatter.__init__(self) -class SandboxedEscapeFormatter(SandboxedFormatterMixin, EscapeFormatter): +class SandboxedEscapeFormatter(SandboxedFormatterMixin, EscapeFormatter): def __init__(self, env, escape): SandboxedFormatterMixin.__init__(self, env) EscapeFormatter.__init__(self, escape) diff --git a/pipenv/vendor/jinja2/tests.py b/pipenv/vendor/jinja2/tests.py index bc99d66c83..fabd4ce51b 100644 --- a/pipenv/vendor/jinja2/tests.py +++ b/pipenv/vendor/jinja2/tests.py @@ -1,23 +1,17 @@ # -*- coding: utf-8 -*- -""" - jinja2.tests - ~~~~~~~~~~~~ - - Jinja test functions. Used with the "is" operator. - - :copyright: (c) 2017 by the Jinja Team. - :license: BSD, see LICENSE for more details. -""" +"""Built-in template tests used with the ``is`` operator.""" +import decimal import operator import re -from jinja2.runtime import Undefined -from jinja2._compat import text_type, string_types, integer_types, abc -import decimal - -number_re = re.compile(r'^-?\d+(\.\d+)?$') -regex_type = type(number_re) +from ._compat import abc +from ._compat import integer_types +from ._compat import string_types +from ._compat import text_type +from .runtime import Undefined +number_re = re.compile(r"^-?\d+(\.\d+)?$") +regex_type = type(number_re) test_callable = callable @@ -63,6 +57,48 @@ def test_none(value): return value is None +def test_boolean(value): + """Return true if the object is a boolean value. + + .. versionadded:: 2.11 + """ + return value is True or value is False + + +def test_false(value): + """Return true if the object is False. + + .. versionadded:: 2.11 + """ + return value is False + + +def test_true(value): + """Return true if the object is True. + + .. versionadded:: 2.11 + """ + return value is True + + +# NOTE: The existing 'number' test matches booleans and floats +def test_integer(value): + """Return true if the object is an integer. + + .. versionadded:: 2.11 + """ + return isinstance(value, integer_types) and value is not True and value is not False + + +# NOTE: The existing 'number' test matches booleans and integers +def test_float(value): + """Return true if the object is a float. + + .. versionadded:: 2.11 + """ + return isinstance(value, float) + + def test_lower(value): """Return true if the variable is lowercased.""" return text_type(value).islower() @@ -98,7 +134,7 @@ def test_sequence(value): try: len(value) value.__getitem__ - except: + except Exception: return False return True @@ -127,7 +163,7 @@ def test_iterable(value): def test_escaped(value): """Check if the value is escaped.""" - return hasattr(value, '__html__') + return hasattr(value, "__html__") def test_in(value, seq): @@ -139,36 +175,41 @@ def test_in(value, seq): TESTS = { - 'odd': test_odd, - 'even': test_even, - 'divisibleby': test_divisibleby, - 'defined': test_defined, - 'undefined': test_undefined, - 'none': test_none, - 'lower': test_lower, - 'upper': test_upper, - 'string': test_string, - 'mapping': test_mapping, - 'number': test_number, - 'sequence': test_sequence, - 'iterable': test_iterable, - 'callable': test_callable, - 'sameas': test_sameas, - 'escaped': test_escaped, - 'in': test_in, - '==': operator.eq, - 'eq': operator.eq, - 'equalto': operator.eq, - '!=': operator.ne, - 'ne': operator.ne, - '>': operator.gt, - 'gt': operator.gt, - 'greaterthan': operator.gt, - 'ge': operator.ge, - '>=': operator.ge, - '<': operator.lt, - 'lt': operator.lt, - 'lessthan': operator.lt, - '<=': operator.le, - 'le': operator.le, + "odd": test_odd, + "even": test_even, + "divisibleby": test_divisibleby, + "defined": test_defined, + "undefined": test_undefined, + "none": test_none, + "boolean": test_boolean, + "false": test_false, + "true": test_true, + "integer": test_integer, + "float": test_float, + "lower": test_lower, + "upper": test_upper, + "string": test_string, + "mapping": test_mapping, + "number": test_number, + "sequence": test_sequence, + "iterable": test_iterable, + "callable": test_callable, + "sameas": test_sameas, + "escaped": test_escaped, + "in": test_in, + "==": operator.eq, + "eq": operator.eq, + "equalto": operator.eq, + "!=": operator.ne, + "ne": operator.ne, + ">": operator.gt, + "gt": operator.gt, + "greaterthan": operator.gt, + "ge": operator.ge, + ">=": operator.ge, + "<": operator.lt, + "lt": operator.lt, + "lessthan": operator.lt, + "<=": operator.le, + "le": operator.le, } diff --git a/pipenv/vendor/jinja2/utils.py b/pipenv/vendor/jinja2/utils.py index db9c5d062d..e3285e8edb 100644 --- a/pipenv/vendor/jinja2/utils.py +++ b/pipenv/vendor/jinja2/utils.py @@ -1,44 +1,44 @@ # -*- coding: utf-8 -*- -""" - jinja2.utils - ~~~~~~~~~~~~ - - Utility functions. - - :copyright: (c) 2017 by the Jinja Team. - :license: BSD, see LICENSE for more details. -""" -import re import json -import errno +import os +import re +import warnings from collections import deque +from random import choice +from random import randrange from threading import Lock -from jinja2._compat import text_type, string_types, implements_iterator, \ - url_quote, abc +from markupsafe import escape +from markupsafe import Markup -_word_split_re = re.compile(r'(\s+)') +from ._compat import abc +from ._compat import string_types +from ._compat import text_type +from ._compat import url_quote + +_word_split_re = re.compile(r"(\s+)") _punctuation_re = re.compile( - '^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$' % ( - '|'.join(map(re.escape, ('(', '<', '<'))), - '|'.join(map(re.escape, ('.', ',', ')', '>', '\n', '>'))) + "^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$" + % ( + "|".join(map(re.escape, ("(", "<", "<"))), + "|".join(map(re.escape, (".", ",", ")", ">", "\n", ">"))), ) ) -_simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$') -_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)') -_entity_re = re.compile(r'&([^;]+);') -_letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' -_digits = '0123456789' +_simple_email_re = re.compile(r"^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$") +_striptags_re = re.compile(r"(<!--.*?-->|<[^>]*>)") +_entity_re = re.compile(r"&([^;]+);") +_letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" +_digits = "0123456789" # special singleton representing missing values for the runtime -missing = type('MissingType', (), {'__repr__': lambda x: 'missing'})() +missing = type("MissingType", (), {"__repr__": lambda x: "missing"})() # internal code internal_code = set() -concat = u''.join +concat = u"".join -_slash_escape = '\\/' not in json.dumps('/') +_slash_escape = "\\/" not in json.dumps("/") def contextfunction(f): @@ -98,24 +98,26 @@ def default(var, default=''): return default return var """ - from jinja2.runtime import Undefined + from .runtime import Undefined + return isinstance(obj, Undefined) def consume(iterable): """Consumes an iterable without doing anything with it.""" - for event in iterable: + for _ in iterable: pass def clear_caches(): - """Jinja2 keeps internal caches for environments and lexers. These are - used so that Jinja2 doesn't have to recreate environments and lexers all + """Jinja keeps internal caches for environments and lexers. These are + used so that Jinja doesn't have to recreate environments and lexers all the time. Normally you don't have to care about that but if you are measuring memory consumption you may want to clean the caches. """ - from jinja2.environment import _spontaneous_environments - from jinja2.lexer import _lexer_cache + from .environment import _spontaneous_environments + from .lexer import _lexer_cache + _spontaneous_environments.clear() _lexer_cache.clear() @@ -132,12 +134,10 @@ def import_string(import_name, silent=False): :return: imported object """ try: - if ':' in import_name: - module, obj = import_name.split(':', 1) - elif '.' in import_name: - items = import_name.split('.') - module = '.'.join(items[:-1]) - obj = items[-1] + if ":" in import_name: + module, obj = import_name.split(":", 1) + elif "." in import_name: + module, _, obj = import_name.rpartition(".") else: return __import__(import_name) return getattr(__import__(module, None, None, [obj]), obj) @@ -146,15 +146,14 @@ def import_string(import_name, silent=False): raise -def open_if_exists(filename, mode='rb'): +def open_if_exists(filename, mode="rb"): """Returns a file descriptor for the filename if that file exists, - otherwise `None`. + otherwise ``None``. """ - try: - return open(filename, mode) - except IOError as e: - if e.errno not in (errno.ENOENT, errno.EISDIR, errno.EINVAL): - raise + if not os.path.isfile(filename): + return None + + return open(filename, mode) def object_type_repr(obj): @@ -163,15 +162,15 @@ def object_type_repr(obj): example for `None` and `Ellipsis`). """ if obj is None: - return 'None' + return "None" elif obj is Ellipsis: - return 'Ellipsis' + return "Ellipsis" # __builtin__ in 2.x, builtins in 3.x - if obj.__class__.__module__ in ('__builtin__', 'builtins'): + if obj.__class__.__module__ in ("__builtin__", "builtins"): name = obj.__class__.__name__ else: - name = obj.__class__.__module__ + '.' + obj.__class__.__name__ - return '%s object' % name + name = obj.__class__.__module__ + "." + obj.__class__.__name__ + return "%s object" % name def pformat(obj, verbose=False): @@ -180,9 +179,11 @@ def pformat(obj, verbose=False): """ try: from pretty import pretty + return pretty(obj, verbose=verbose) except ImportError: from pprint import pformat + return pformat(obj) @@ -200,45 +201,60 @@ def urlize(text, trim_url_limit=None, rel=None, target=None): If target is not None, a target attribute will be added to the link. """ - trim_url = lambda x, limit=trim_url_limit: limit is not None \ - and (x[:limit] + (len(x) >=limit and '...' - or '')) or x + trim_url = ( + lambda x, limit=trim_url_limit: limit is not None + and (x[:limit] + (len(x) >= limit and "..." or "")) + or x + ) words = _word_split_re.split(text_type(escape(text))) - rel_attr = rel and ' rel="%s"' % text_type(escape(rel)) or '' - target_attr = target and ' target="%s"' % escape(target) or '' + rel_attr = rel and ' rel="%s"' % text_type(escape(rel)) or "" + target_attr = target and ' target="%s"' % escape(target) or "" for i, word in enumerate(words): match = _punctuation_re.match(word) if match: lead, middle, trail = match.groups() - if middle.startswith('www.') or ( - '@' not in middle and - not middle.startswith('http://') and - not middle.startswith('https://') and - len(middle) > 0 and - middle[0] in _letters + _digits and ( - middle.endswith('.org') or - middle.endswith('.net') or - middle.endswith('.com') - )): - middle = '<a href="http://%s"%s%s>%s</a>' % (middle, - rel_attr, target_attr, trim_url(middle)) - if middle.startswith('http://') or \ - middle.startswith('https://'): - middle = '<a href="%s"%s%s>%s</a>' % (middle, - rel_attr, target_attr, trim_url(middle)) - if '@' in middle and not middle.startswith('www.') and \ - not ':' in middle and _simple_email_re.match(middle): + if middle.startswith("www.") or ( + "@" not in middle + and not middle.startswith("http://") + and not middle.startswith("https://") + and len(middle) > 0 + and middle[0] in _letters + _digits + and ( + middle.endswith(".org") + or middle.endswith(".net") + or middle.endswith(".com") + ) + ): + middle = '<a href="http://%s"%s%s>%s</a>' % ( + middle, + rel_attr, + target_attr, + trim_url(middle), + ) + if middle.startswith("http://") or middle.startswith("https://"): + middle = '<a href="%s"%s%s>%s</a>' % ( + middle, + rel_attr, + target_attr, + trim_url(middle), + ) + if ( + "@" in middle + and not middle.startswith("www.") + and ":" not in middle + and _simple_email_re.match(middle) + ): middle = '<a href="mailto:%s">%s</a>' % (middle, middle) if lead + middle + trail != word: words[i] = lead + middle + trail - return u''.join(words) + return u"".join(words) def generate_lorem_ipsum(n=5, html=True, min=20, max=100): """Generate some lorem ipsum for the template.""" - from jinja2.constants import LOREM_IPSUM_WORDS - from random import choice, randrange + from .constants import LOREM_IPSUM_WORDS + words = LOREM_IPSUM_WORDS.split() result = [] @@ -263,43 +279,53 @@ def generate_lorem_ipsum(n=5, html=True, min=20, max=100): if idx - randrange(3, 8) > last_comma: last_comma = idx last_fullstop += 2 - word += ',' + word += "," # add end of sentences if idx - randrange(10, 20) > last_fullstop: last_comma = last_fullstop = idx - word += '.' + word += "." next_capitalized = True p.append(word) # ensure that the paragraph ends with a dot. - p = u' '.join(p) - if p.endswith(','): - p = p[:-1] + '.' - elif not p.endswith('.'): - p += '.' + p = u" ".join(p) + if p.endswith(","): + p = p[:-1] + "." + elif not p.endswith("."): + p += "." result.append(p) if not html: - return u'\n\n'.join(result) - return Markup(u'\n'.join(u'<p>%s</p>' % escape(x) for x in result)) + return u"\n\n".join(result) + return Markup(u"\n".join(u"<p>%s</p>" % escape(x) for x in result)) + +def unicode_urlencode(obj, charset="utf-8", for_qs=False): + """Quote a string for use in a URL using the given charset. -def unicode_urlencode(obj, charset='utf-8', for_qs=False): - """URL escapes a single bytestring or unicode string with the - given charset if applicable to URL safe quoting under all rules - that need to be considered under all supported Python versions. + This function is misnamed, it is a wrapper around + :func:`urllib.parse.quote`. - If non strings are provided they are converted to their unicode - representation first. + :param obj: String or bytes to quote. Other types are converted to + string then encoded to bytes using the given charset. + :param charset: Encode text to bytes using this charset. + :param for_qs: Quote "/" and use "+" for spaces. """ if not isinstance(obj, string_types): obj = text_type(obj) + if isinstance(obj, text_type): obj = obj.encode(charset) - safe = not for_qs and b'/' or b'' - rv = text_type(url_quote(obj, safe)) + + safe = b"" if for_qs else b"/" + rv = url_quote(obj, safe) + + if not isinstance(rv, text_type): + rv = rv.decode("utf-8") + if for_qs: - rv = rv.replace('%20', '+') + rv = rv.replace("%20", "+") + return rv @@ -326,9 +352,9 @@ def _postinit(self): def __getstate__(self): return { - 'capacity': self.capacity, - '_mapping': self._mapping, - '_queue': self._queue + "capacity": self.capacity, + "_mapping": self._mapping, + "_queue": self._queue, } def __setstate__(self, d): @@ -342,7 +368,7 @@ def copy(self): """Return a shallow copy of the instance.""" rv = self.__class__(self.capacity) rv._mapping.update(self._mapping) - rv._queue = deque(self._queue) + rv._queue.extend(self._queue) return rv def get(self, key, default=None): @@ -356,15 +382,11 @@ def setdefault(self, key, default=None): """Set `default` if the key is not in the cache otherwise leave unchanged. Return the value of this key. """ - self._wlock.acquire() try: - try: - return self[key] - except KeyError: - self[key] = default - return default - finally: - self._wlock.release() + return self[key] + except KeyError: + self[key] = default + return default def clear(self): """Clear the cache.""" @@ -384,10 +406,7 @@ def __len__(self): return len(self._mapping) def __repr__(self): - return '<%s %r>' % ( - self.__class__.__name__, - self._mapping - ) + return "<%s %r>" % (self.__class__.__name__, self._mapping) def __getitem__(self, key): """Get an item from the cache. Moves the item up so that it has the @@ -436,7 +455,6 @@ def __delitem__(self, key): try: self._remove(key) except ValueError: - # __getitem__ is not locked, it might happen pass finally: self._wlock.release() @@ -449,6 +467,12 @@ def items(self): def iteritems(self): """Iterate over all items.""" + warnings.warn( + "'iteritems()' will be removed in version 3.0. Use" + " 'iter(cache.items())' instead.", + DeprecationWarning, + stacklevel=2, + ) return iter(self.items()) def values(self): @@ -457,6 +481,22 @@ def values(self): def itervalue(self): """Iterate over all values.""" + warnings.warn( + "'itervalue()' will be removed in version 3.0. Use" + " 'iter(cache.values())' instead.", + DeprecationWarning, + stacklevel=2, + ) + return iter(self.values()) + + def itervalues(self): + """Iterate over all values.""" + warnings.warn( + "'itervalues()' will be removed in version 3.0. Use" + " 'iter(cache.values())' instead.", + DeprecationWarning, + stacklevel=2, + ) return iter(self.values()) def keys(self): @@ -467,12 +507,19 @@ def iterkeys(self): """Iterate over all keys in the cache dict, ordered by the most recent usage. """ - return reversed(tuple(self._queue)) + warnings.warn( + "'iterkeys()' will be removed in version 3.0. Use" + " 'iter(cache.keys())' instead.", + DeprecationWarning, + stacklevel=2, + ) + return iter(self) - __iter__ = iterkeys + def __iter__(self): + return reversed(tuple(self._queue)) def __reversed__(self): - """Iterate over the values in the cache dict, oldest items + """Iterate over the keys in the cache dict, oldest items coming first. """ return iter(tuple(self._queue)) @@ -483,10 +530,12 @@ def __reversed__(self): abc.MutableMapping.register(LRUCache) -def select_autoescape(enabled_extensions=('html', 'htm', 'xml'), - disabled_extensions=(), - default_for_string=True, - default=False): +def select_autoescape( + enabled_extensions=("html", "htm", "xml"), + disabled_extensions=(), + default_for_string=True, + default=False, +): """Intelligently sets the initial value of autoescaping based on the filename of the template. This is the recommended way to configure autoescaping if you do not want to write a custom function yourself. @@ -521,10 +570,9 @@ def select_autoescape(enabled_extensions=('html', 'htm', 'xml'), .. versionadded:: 2.9 """ - enabled_patterns = tuple('.' + x.lstrip('.').lower() - for x in enabled_extensions) - disabled_patterns = tuple('.' + x.lstrip('.').lower() - for x in disabled_extensions) + enabled_patterns = tuple("." + x.lstrip(".").lower() for x in enabled_extensions) + disabled_patterns = tuple("." + x.lstrip(".").lower() for x in disabled_extensions) + def autoescape(template_name): if template_name is None: return default_for_string @@ -534,6 +582,7 @@ def autoescape(template_name): if template_name.endswith(disabled_patterns): return False return default + return autoescape @@ -557,35 +606,63 @@ def htmlsafe_json_dumps(obj, dumper=None, **kwargs): """ if dumper is None: dumper = json.dumps - rv = dumper(obj, **kwargs) \ - .replace(u'<', u'\\u003c') \ - .replace(u'>', u'\\u003e') \ - .replace(u'&', u'\\u0026') \ - .replace(u"'", u'\\u0027') + rv = ( + dumper(obj, **kwargs) + .replace(u"<", u"\\u003c") + .replace(u">", u"\\u003e") + .replace(u"&", u"\\u0026") + .replace(u"'", u"\\u0027") + ) return Markup(rv) -@implements_iterator class Cycler(object): - """A cycle helper for templates.""" + """Cycle through values by yield them one at a time, then restarting + once the end is reached. Available as ``cycler`` in templates. + + Similar to ``loop.cycle``, but can be used outside loops or across + multiple loops. For example, render a list of folders and files in a + list, alternating giving them "odd" and "even" classes. + + .. code-block:: html+jinja + + {% set row_class = cycler("odd", "even") %} + <ul class="browser"> + {% for folder in folders %} + <li class="folder {{ row_class.next() }}">{{ folder }} + {% endfor %} + {% for file in files %} + <li class="file {{ row_class.next() }}">{{ file }} + {% endfor %} + </ul> + + :param items: Each positional argument will be yielded in the order + given for each cycle. + + .. versionadded:: 2.1 + """ def __init__(self, *items): if not items: - raise RuntimeError('at least one item has to be provided') + raise RuntimeError("at least one item has to be provided") self.items = items - self.reset() + self.pos = 0 def reset(self): - """Resets the cycle.""" + """Resets the current item to the first item.""" self.pos = 0 @property def current(self): - """Returns the current item.""" + """Return the current item. Equivalent to the item that will be + returned next time :meth:`next` is called. + """ return self.items[self.pos] def next(self): - """Goes one item ahead and returns it.""" + """Return the current item, then advance :attr:`current` to the + next item. + """ rv = self.current self.pos = (self.pos + 1) % len(self.items) return rv @@ -596,27 +673,27 @@ def next(self): class Joiner(object): """A joining helper for templates.""" - def __init__(self, sep=u', '): + def __init__(self, sep=u", "): self.sep = sep self.used = False def __call__(self): if not self.used: self.used = True - return u'' + return u"" return self.sep class Namespace(object): """A namespace object that can hold arbitrary attributes. It may be - initialized from a dictionary or with keyword argments.""" + initialized from a dictionary or with keyword arguments.""" - def __init__(*args, **kwargs): + def __init__(*args, **kwargs): # noqa: B902 self, args = args[0], args[1:] self.__attrs = dict(*args, **kwargs) def __getattribute__(self, name): - if name == '_Namespace__attrs': + if name == "_Namespace__attrs": return object.__getattribute__(self, name) try: return self.__attrs[name] @@ -627,16 +704,24 @@ def __setitem__(self, name, value): self.__attrs[name] = value def __repr__(self): - return '<Namespace %r>' % self.__attrs + return "<Namespace %r>" % self.__attrs # does this python version support async for in and async generators? try: - exec('async def _():\n async for _ in ():\n yield _') + exec("async def _():\n async for _ in ():\n yield _") have_async_gen = True except SyntaxError: have_async_gen = False -# Imported here because that's where it was in the past -from markupsafe import Markup, escape, soft_unicode +def soft_unicode(s): + from markupsafe import soft_unicode + + warnings.warn( + "'jinja2.utils.soft_unicode' will be removed in version 3.0." + " Use 'markupsafe.soft_unicode' instead.", + DeprecationWarning, + stacklevel=2, + ) + return soft_unicode(s) diff --git a/pipenv/vendor/jinja2/visitor.py b/pipenv/vendor/jinja2/visitor.py index ba526dfac9..d1365bf10e 100644 --- a/pipenv/vendor/jinja2/visitor.py +++ b/pipenv/vendor/jinja2/visitor.py @@ -1,14 +1,8 @@ # -*- coding: utf-8 -*- +"""API for traversing the AST nodes. Implemented by the compiler and +meta introspection. """ - jinja2.visitor - ~~~~~~~~~~~~~~ - - This module implements a visitor for the nodes. - - :copyright: (c) 2017 by the Jinja Team. - :license: BSD. -""" -from jinja2.nodes import Node +from .nodes import Node class NodeVisitor(object): @@ -28,7 +22,7 @@ def get_visitor(self, node): exists for this node. In that case the generic visit function is used instead. """ - method = 'visit_' + node.__class__.__name__ + method = "visit_" + node.__class__.__name__ return getattr(self, method, None) def visit(self, node, *args, **kwargs): diff --git a/pipenv/vendor/packaging/LICENSE.APACHE b/pipenv/vendor/packaging/LICENSE.APACHE index 4947287f7b..f433b1a53f 100644 --- a/pipenv/vendor/packaging/LICENSE.APACHE +++ b/pipenv/vendor/packaging/LICENSE.APACHE @@ -174,4 +174,4 @@ incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. - END OF TERMS AND CONDITIONS \ No newline at end of file + END OF TERMS AND CONDITIONS diff --git a/pipenv/vendor/packaging/__about__.py b/pipenv/vendor/packaging/__about__.py index dc95138d04..5161d141be 100644 --- a/pipenv/vendor/packaging/__about__.py +++ b/pipenv/vendor/packaging/__about__.py @@ -18,7 +18,7 @@ __summary__ = "Core utilities for Python packages" __uri__ = "https://github.com/pypa/packaging" -__version__ = "19.2" +__version__ = "20.3" __author__ = "Donald Stufft and individual contributors" __email__ = "donald@stufft.io" diff --git a/pipenv/vendor/packaging/_compat.py b/pipenv/vendor/packaging/_compat.py index 25da473c19..a145f7eeb3 100644 --- a/pipenv/vendor/packaging/_compat.py +++ b/pipenv/vendor/packaging/_compat.py @@ -5,6 +5,11 @@ import sys +from ._typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: # pragma: no cover + from typing import Any, Dict, Tuple, Type + PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 @@ -18,14 +23,16 @@ def with_metaclass(meta, *bases): + # type: (Type[Any], Tuple[Type[Any], ...]) -> Any """ Create a base class with a metaclass. """ # This requires a bit of explanation: the basic idea is to make a dummy # metaclass for one level of class instantiation that replaces itself with # the actual metaclass. - class metaclass(meta): + class metaclass(meta): # type: ignore def __new__(cls, name, this_bases, d): + # type: (Type[Any], str, Tuple[Any], Dict[Any, Any]) -> Any return meta(name, bases, d) return type.__new__(metaclass, "temporary_class", (), {}) diff --git a/pipenv/vendor/packaging/_structures.py b/pipenv/vendor/packaging/_structures.py index 68dcca634d..800d5c5588 100644 --- a/pipenv/vendor/packaging/_structures.py +++ b/pipenv/vendor/packaging/_structures.py @@ -4,65 +4,83 @@ from __future__ import absolute_import, division, print_function -class Infinity(object): +class InfinityType(object): def __repr__(self): + # type: () -> str return "Infinity" def __hash__(self): + # type: () -> int return hash(repr(self)) def __lt__(self, other): + # type: (object) -> bool return False def __le__(self, other): + # type: (object) -> bool return False def __eq__(self, other): + # type: (object) -> bool return isinstance(other, self.__class__) def __ne__(self, other): + # type: (object) -> bool return not isinstance(other, self.__class__) def __gt__(self, other): + # type: (object) -> bool return True def __ge__(self, other): + # type: (object) -> bool return True def __neg__(self): + # type: (object) -> NegativeInfinityType return NegativeInfinity -Infinity = Infinity() +Infinity = InfinityType() -class NegativeInfinity(object): +class NegativeInfinityType(object): def __repr__(self): + # type: () -> str return "-Infinity" def __hash__(self): + # type: () -> int return hash(repr(self)) def __lt__(self, other): + # type: (object) -> bool return True def __le__(self, other): + # type: (object) -> bool return True def __eq__(self, other): + # type: (object) -> bool return isinstance(other, self.__class__) def __ne__(self, other): + # type: (object) -> bool return not isinstance(other, self.__class__) def __gt__(self, other): + # type: (object) -> bool return False def __ge__(self, other): + # type: (object) -> bool return False def __neg__(self): + # type: (object) -> InfinityType return Infinity -NegativeInfinity = NegativeInfinity() +NegativeInfinity = NegativeInfinityType() diff --git a/pipenv/vendor/packaging/_typing.py b/pipenv/vendor/packaging/_typing.py new file mode 100644 index 0000000000..dc6dfce7ad --- /dev/null +++ b/pipenv/vendor/packaging/_typing.py @@ -0,0 +1,39 @@ +"""For neatly implementing static typing in packaging. + +`mypy` - the static type analysis tool we use - uses the `typing` module, which +provides core functionality fundamental to mypy's functioning. + +Generally, `typing` would be imported at runtime and used in that fashion - +it acts as a no-op at runtime and does not have any run-time overhead by +design. + +As it turns out, `typing` is not vendorable - it uses separate sources for +Python 2/Python 3. Thus, this codebase can not expect it to be present. +To work around this, mypy allows the typing import to be behind a False-y +optional to prevent it from running at runtime and type-comments can be used +to remove the need for the types to be accessible directly during runtime. + +This module provides the False-y guard in a nicely named fashion so that a +curious maintainer can reach here to read this. + +In packaging, all static-typing related imports should be guarded as follows: + + from packaging._typing import MYPY_CHECK_RUNNING + + if MYPY_CHECK_RUNNING: + from typing import ... + +Ref: https://github.com/python/mypy/issues/3216 +""" + +MYPY_CHECK_RUNNING = False + +if MYPY_CHECK_RUNNING: # pragma: no cover + import typing + + cast = typing.cast +else: + # typing's cast() is needed at runtime, but we don't want to import typing. + # Thus, we use a dummy no-op version, which we tell mypy to ignore. + def cast(type_, value): # type: ignore + return value diff --git a/pipenv/vendor/packaging/markers.py b/pipenv/vendor/packaging/markers.py index 3b8af3242e..f017471139 100644 --- a/pipenv/vendor/packaging/markers.py +++ b/pipenv/vendor/packaging/markers.py @@ -13,8 +13,14 @@ from pyparsing import Literal as L # noqa from ._compat import string_types +from ._typing import MYPY_CHECK_RUNNING from .specifiers import Specifier, InvalidSpecifier +if MYPY_CHECK_RUNNING: # pragma: no cover + from typing import Any, Callable, Dict, List, Optional, Tuple, Union + + Operator = Callable[[str, str], bool] + __all__ = [ "InvalidMarker", @@ -46,30 +52,37 @@ class UndefinedEnvironmentName(ValueError): class Node(object): def __init__(self, value): + # type: (Any) -> None self.value = value def __str__(self): + # type: () -> str return str(self.value) def __repr__(self): + # type: () -> str return "<{0}({1!r})>".format(self.__class__.__name__, str(self)) def serialize(self): + # type: () -> str raise NotImplementedError class Variable(Node): def serialize(self): + # type: () -> str return str(self) class Value(Node): def serialize(self): + # type: () -> str return '"{0}"'.format(self) class Op(Node): def serialize(self): + # type: () -> str return str(self) @@ -85,13 +98,13 @@ def serialize(self): | L("python_version") | L("sys_platform") | L("os_name") - | L("os.name") + | L("os.name") # PEP-345 | L("sys.platform") # PEP-345 | L("platform.version") # PEP-345 | L("platform.machine") # PEP-345 | L("platform.python_implementation") # PEP-345 - | L("python_implementation") # PEP-345 - | L("extra") # undocumented setuptools legacy + | L("python_implementation") # undocumented setuptools legacy + | L("extra") # PEP-508 ) ALIASES = { "os.name": "os_name", @@ -131,6 +144,7 @@ def serialize(self): def _coerce_parse_result(results): + # type: (Union[ParseResults, List[Any]]) -> List[Any] if isinstance(results, ParseResults): return [_coerce_parse_result(i) for i in results] else: @@ -138,6 +152,8 @@ def _coerce_parse_result(results): def _format_marker(marker, first=True): + # type: (Union[List[str], Tuple[Node, ...], str], Optional[bool]) -> str + assert isinstance(marker, (list, tuple, string_types)) # Sometimes we have a structure like [[...]] which is a single item list @@ -172,10 +188,11 @@ def _format_marker(marker, first=True): "!=": operator.ne, ">=": operator.ge, ">": operator.gt, -} +} # type: Dict[str, Operator] def _eval_op(lhs, op, rhs): + # type: (str, Op, str) -> bool try: spec = Specifier("".join([op.serialize(), rhs])) except InvalidSpecifier: @@ -183,7 +200,7 @@ def _eval_op(lhs, op, rhs): else: return spec.contains(lhs) - oper = _operators.get(op.serialize()) + oper = _operators.get(op.serialize()) # type: Optional[Operator] if oper is None: raise UndefinedComparison( "Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs) @@ -192,13 +209,18 @@ def _eval_op(lhs, op, rhs): return oper(lhs, rhs) -_undefined = object() +class Undefined(object): + pass + + +_undefined = Undefined() def _get_env(environment, name): - value = environment.get(name, _undefined) + # type: (Dict[str, str], str) -> str + value = environment.get(name, _undefined) # type: Union[str, Undefined] - if value is _undefined: + if isinstance(value, Undefined): raise UndefinedEnvironmentName( "{0!r} does not exist in evaluation environment.".format(name) ) @@ -207,7 +229,8 @@ def _get_env(environment, name): def _evaluate_markers(markers, environment): - groups = [[]] + # type: (List[Any], Dict[str, str]) -> bool + groups = [[]] # type: List[List[bool]] for marker in markers: assert isinstance(marker, (list, tuple, string_types)) @@ -234,6 +257,7 @@ def _evaluate_markers(markers, environment): def format_full_version(info): + # type: (sys._version_info) -> str version = "{0.major}.{0.minor}.{0.micro}".format(info) kind = info.releaselevel if kind != "final": @@ -242,9 +266,13 @@ def format_full_version(info): def default_environment(): + # type: () -> Dict[str, str] if hasattr(sys, "implementation"): - iver = format_full_version(sys.implementation.version) - implementation_name = sys.implementation.name + # Ignoring the `sys.implementation` reference for type checking due to + # mypy not liking that the attribute doesn't exist in Python 2.7 when + # run with the `--py27` flag. + iver = format_full_version(sys.implementation.version) # type: ignore + implementation_name = sys.implementation.name # type: ignore else: iver = "0" implementation_name = "" @@ -266,6 +294,7 @@ def default_environment(): class Marker(object): def __init__(self, marker): + # type: (str) -> None try: self._markers = _coerce_parse_result(MARKER.parseString(marker)) except ParseException as e: @@ -275,12 +304,15 @@ def __init__(self, marker): raise InvalidMarker(err_str) def __str__(self): + # type: () -> str return _format_marker(self._markers) def __repr__(self): + # type: () -> str return "<Marker({0!r})>".format(str(self)) def evaluate(self, environment=None): + # type: (Optional[Dict[str, str]]) -> bool """Evaluate a marker. Return the boolean from evaluating the given marker against the diff --git a/pipenv/vendor/packaging/py.typed b/pipenv/vendor/packaging/py.typed new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pipenv/vendor/packaging/requirements.py b/pipenv/vendor/packaging/requirements.py index 4d9688b932..1b547927df 100644 --- a/pipenv/vendor/packaging/requirements.py +++ b/pipenv/vendor/packaging/requirements.py @@ -11,9 +11,13 @@ from pyparsing import Literal as L # noqa from six.moves.urllib import parse as urlparse +from ._typing import MYPY_CHECK_RUNNING from .markers import MARKER_EXPR, Marker from .specifiers import LegacySpecifier, Specifier, SpecifierSet +if MYPY_CHECK_RUNNING: # pragma: no cover + from typing import List + class InvalidRequirement(ValueError): """ @@ -89,6 +93,7 @@ class Requirement(object): # TODO: Can we normalize the name and extra name? def __init__(self, requirement_string): + # type: (str) -> None try: req = REQUIREMENT.parseString(requirement_string) except ParseException as e: @@ -116,7 +121,8 @@ def __init__(self, requirement_string): self.marker = req.marker if req.marker else None def __str__(self): - parts = [self.name] + # type: () -> str + parts = [self.name] # type: List[str] if self.extras: parts.append("[{0}]".format(",".join(sorted(self.extras)))) @@ -135,4 +141,5 @@ def __str__(self): return "".join(parts) def __repr__(self): + # type: () -> str return "<Requirement({0!r})>".format(str(self)) diff --git a/pipenv/vendor/packaging/specifiers.py b/pipenv/vendor/packaging/specifiers.py index 743576a080..94987486d4 100644 --- a/pipenv/vendor/packaging/specifiers.py +++ b/pipenv/vendor/packaging/specifiers.py @@ -9,8 +9,26 @@ import re from ._compat import string_types, with_metaclass +from ._typing import MYPY_CHECK_RUNNING from .version import Version, LegacyVersion, parse +if MYPY_CHECK_RUNNING: # pragma: no cover + from typing import ( + List, + Dict, + Union, + Iterable, + Iterator, + Optional, + Callable, + Tuple, + FrozenSet, + ) + + ParsedVersion = Union[Version, LegacyVersion] + UnparsedVersion = Union[Version, LegacyVersion, str] + CallableOperator = Callable[[ParsedVersion, str], bool] + class InvalidSpecifier(ValueError): """ @@ -18,9 +36,10 @@ class InvalidSpecifier(ValueError): """ -class BaseSpecifier(with_metaclass(abc.ABCMeta, object)): +class BaseSpecifier(with_metaclass(abc.ABCMeta, object)): # type: ignore @abc.abstractmethod def __str__(self): + # type: () -> str """ Returns the str representation of this Specifier like object. This should be representative of the Specifier itself. @@ -28,12 +47,14 @@ def __str__(self): @abc.abstractmethod def __hash__(self): + # type: () -> int """ Returns a hash value for this Specifier like object. """ @abc.abstractmethod def __eq__(self, other): + # type: (object) -> bool """ Returns a boolean representing whether or not the two Specifier like objects are equal. @@ -41,6 +62,7 @@ def __eq__(self, other): @abc.abstractmethod def __ne__(self, other): + # type: (object) -> bool """ Returns a boolean representing whether or not the two Specifier like objects are not equal. @@ -48,6 +70,7 @@ def __ne__(self, other): @abc.abstractproperty def prereleases(self): + # type: () -> Optional[bool] """ Returns whether or not pre-releases as a whole are allowed by this specifier. @@ -55,6 +78,7 @@ def prereleases(self): @prereleases.setter def prereleases(self, value): + # type: (bool) -> None """ Sets whether or not pre-releases as a whole are allowed by this specifier. @@ -62,12 +86,14 @@ def prereleases(self, value): @abc.abstractmethod def contains(self, item, prereleases=None): + # type: (str, Optional[bool]) -> bool """ Determines if the given item is contained within this specifier. """ @abc.abstractmethod def filter(self, iterable, prereleases=None): + # type: (Iterable[UnparsedVersion], Optional[bool]) -> Iterable[UnparsedVersion] """ Takes an iterable of items and filters them so that only items which are contained within this specifier are allowed in it. @@ -76,19 +102,24 @@ def filter(self, iterable, prereleases=None): class _IndividualSpecifier(BaseSpecifier): - _operators = {} + _operators = {} # type: Dict[str, str] def __init__(self, spec="", prereleases=None): + # type: (str, Optional[bool]) -> None match = self._regex.search(spec) if not match: raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec)) - self._spec = (match.group("operator").strip(), match.group("version").strip()) + self._spec = ( + match.group("operator").strip(), + match.group("version").strip(), + ) # type: Tuple[str, str] # Store whether or not this Specifier should accept prereleases self._prereleases = prereleases def __repr__(self): + # type: () -> str pre = ( ", prereleases={0!r}".format(self.prereleases) if self._prereleases is not None @@ -98,15 +129,18 @@ def __repr__(self): return "<{0}({1!r}{2})>".format(self.__class__.__name__, str(self), pre) def __str__(self): + # type: () -> str return "{0}{1}".format(*self._spec) def __hash__(self): + # type: () -> int return hash(self._spec) def __eq__(self, other): + # type: (object) -> bool if isinstance(other, string_types): try: - other = self.__class__(other) + other = self.__class__(str(other)) except InvalidSpecifier: return NotImplemented elif not isinstance(other, self.__class__): @@ -115,9 +149,10 @@ def __eq__(self, other): return self._spec == other._spec def __ne__(self, other): + # type: (object) -> bool if isinstance(other, string_types): try: - other = self.__class__(other) + other = self.__class__(str(other)) except InvalidSpecifier: return NotImplemented elif not isinstance(other, self.__class__): @@ -126,52 +161,67 @@ def __ne__(self, other): return self._spec != other._spec def _get_operator(self, op): - return getattr(self, "_compare_{0}".format(self._operators[op])) + # type: (str) -> CallableOperator + operator_callable = getattr( + self, "_compare_{0}".format(self._operators[op]) + ) # type: CallableOperator + return operator_callable def _coerce_version(self, version): + # type: (UnparsedVersion) -> ParsedVersion if not isinstance(version, (LegacyVersion, Version)): version = parse(version) return version @property def operator(self): + # type: () -> str return self._spec[0] @property def version(self): + # type: () -> str return self._spec[1] @property def prereleases(self): + # type: () -> Optional[bool] return self._prereleases @prereleases.setter def prereleases(self, value): + # type: (bool) -> None self._prereleases = value def __contains__(self, item): + # type: (str) -> bool return self.contains(item) def contains(self, item, prereleases=None): + # type: (UnparsedVersion, Optional[bool]) -> bool + # Determine if prereleases are to be allowed or not. if prereleases is None: prereleases = self.prereleases # Normalize item to a Version or LegacyVersion, this allows us to have # a shortcut for ``"2.0" in Specifier(">=2") - item = self._coerce_version(item) + normalized_item = self._coerce_version(item) # Determine if we should be supporting prereleases in this specifier # or not, if we do not support prereleases than we can short circuit # logic if this version is a prereleases. - if item.is_prerelease and not prereleases: + if normalized_item.is_prerelease and not prereleases: return False # Actually do the comparison to determine if this item is contained # within this Specifier or not. - return self._get_operator(self.operator)(item, self.version) + operator_callable = self._get_operator(self.operator) # type: CallableOperator + return operator_callable(normalized_item, self.version) def filter(self, iterable, prereleases=None): + # type: (Iterable[UnparsedVersion], Optional[bool]) -> Iterable[UnparsedVersion] + yielded = False found_prereleases = [] @@ -230,32 +280,43 @@ class LegacySpecifier(_IndividualSpecifier): } def _coerce_version(self, version): + # type: (Union[ParsedVersion, str]) -> LegacyVersion if not isinstance(version, LegacyVersion): version = LegacyVersion(str(version)) return version def _compare_equal(self, prospective, spec): + # type: (LegacyVersion, str) -> bool return prospective == self._coerce_version(spec) def _compare_not_equal(self, prospective, spec): + # type: (LegacyVersion, str) -> bool return prospective != self._coerce_version(spec) def _compare_less_than_equal(self, prospective, spec): + # type: (LegacyVersion, str) -> bool return prospective <= self._coerce_version(spec) def _compare_greater_than_equal(self, prospective, spec): + # type: (LegacyVersion, str) -> bool return prospective >= self._coerce_version(spec) def _compare_less_than(self, prospective, spec): + # type: (LegacyVersion, str) -> bool return prospective < self._coerce_version(spec) def _compare_greater_than(self, prospective, spec): + # type: (LegacyVersion, str) -> bool return prospective > self._coerce_version(spec) -def _require_version_compare(fn): +def _require_version_compare( + fn # type: (Callable[[Specifier, ParsedVersion, str], bool]) +): + # type: (...) -> Callable[[Specifier, ParsedVersion, str], bool] @functools.wraps(fn) def wrapped(self, prospective, spec): + # type: (Specifier, ParsedVersion, str) -> bool if not isinstance(prospective, Version): return False return fn(self, prospective, spec) @@ -373,6 +434,8 @@ class Specifier(_IndividualSpecifier): @_require_version_compare def _compare_compatible(self, prospective, spec): + # type: (ParsedVersion, str) -> bool + # Compatible releases have an equivalent combination of >= and ==. That # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to # implement this in terms of the other specifiers instead of @@ -400,56 +463,67 @@ def _compare_compatible(self, prospective, spec): @_require_version_compare def _compare_equal(self, prospective, spec): + # type: (ParsedVersion, str) -> bool + # We need special logic to handle prefix matching if spec.endswith(".*"): # In the case of prefix matching we want to ignore local segment. prospective = Version(prospective.public) # Split the spec out by dots, and pretend that there is an implicit # dot in between a release segment and a pre-release segment. - spec = _version_split(spec[:-2]) # Remove the trailing .* + split_spec = _version_split(spec[:-2]) # Remove the trailing .* # Split the prospective version out by dots, and pretend that there # is an implicit dot in between a release segment and a pre-release # segment. - prospective = _version_split(str(prospective)) + split_prospective = _version_split(str(prospective)) # Shorten the prospective version to be the same length as the spec # so that we can determine if the specifier is a prefix of the # prospective version or not. - prospective = prospective[: len(spec)] + shortened_prospective = split_prospective[: len(split_spec)] # Pad out our two sides with zeros so that they both equal the same # length. - spec, prospective = _pad_version(spec, prospective) + padded_spec, padded_prospective = _pad_version( + split_spec, shortened_prospective + ) + + return padded_prospective == padded_spec else: # Convert our spec string into a Version - spec = Version(spec) + spec_version = Version(spec) # If the specifier does not have a local segment, then we want to # act as if the prospective version also does not have a local # segment. - if not spec.local: + if not spec_version.local: prospective = Version(prospective.public) - return prospective == spec + return prospective == spec_version @_require_version_compare def _compare_not_equal(self, prospective, spec): + # type: (ParsedVersion, str) -> bool return not self._compare_equal(prospective, spec) @_require_version_compare def _compare_less_than_equal(self, prospective, spec): + # type: (ParsedVersion, str) -> bool return prospective <= Version(spec) @_require_version_compare def _compare_greater_than_equal(self, prospective, spec): + # type: (ParsedVersion, str) -> bool return prospective >= Version(spec) @_require_version_compare - def _compare_less_than(self, prospective, spec): + def _compare_less_than(self, prospective, spec_str): + # type: (ParsedVersion, str) -> bool + # Convert our spec to a Version instance, since we'll want to work with # it as a version. - spec = Version(spec) + spec = Version(spec_str) # Check to see if the prospective version is less than the spec # version. If it's not we can short circuit and just return False now @@ -471,10 +545,12 @@ def _compare_less_than(self, prospective, spec): return True @_require_version_compare - def _compare_greater_than(self, prospective, spec): + def _compare_greater_than(self, prospective, spec_str): + # type: (ParsedVersion, str) -> bool + # Convert our spec to a Version instance, since we'll want to work with # it as a version. - spec = Version(spec) + spec = Version(spec_str) # Check to see if the prospective version is greater than the spec # version. If it's not we can short circuit and just return False now @@ -502,10 +578,13 @@ def _compare_greater_than(self, prospective, spec): return True def _compare_arbitrary(self, prospective, spec): + # type: (Version, str) -> bool return str(prospective).lower() == str(spec).lower() @property def prereleases(self): + # type: () -> bool + # If there is an explicit prereleases set for this, then we'll just # blindly use that. if self._prereleases is not None: @@ -530,6 +609,7 @@ def prereleases(self): @prereleases.setter def prereleases(self, value): + # type: (bool) -> None self._prereleases = value @@ -537,7 +617,8 @@ def prereleases(self, value): def _version_split(version): - result = [] + # type: (str) -> List[str] + result = [] # type: List[str] for item in version.split("."): match = _prefix_regex.search(item) if match: @@ -548,6 +629,7 @@ def _version_split(version): def _pad_version(left, right): + # type: (List[str], List[str]) -> Tuple[List[str], List[str]] left_split, right_split = [], [] # Get the release segment of our versions @@ -567,14 +649,16 @@ def _pad_version(left, right): class SpecifierSet(BaseSpecifier): def __init__(self, specifiers="", prereleases=None): - # Split on , to break each indidivual specifier into it's own item, and + # type: (str, Optional[bool]) -> None + + # Split on , to break each individual specifier into it's own item, and # strip each item to remove leading/trailing whitespace. - specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] + split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] # Parsed each individual specifier, attempting first to make it a # Specifier and falling back to a LegacySpecifier. parsed = set() - for specifier in specifiers: + for specifier in split_specifiers: try: parsed.add(Specifier(specifier)) except InvalidSpecifier: @@ -588,6 +672,7 @@ def __init__(self, specifiers="", prereleases=None): self._prereleases = prereleases def __repr__(self): + # type: () -> str pre = ( ", prereleases={0!r}".format(self.prereleases) if self._prereleases is not None @@ -597,12 +682,15 @@ def __repr__(self): return "<SpecifierSet({0!r}{1})>".format(str(self), pre) def __str__(self): + # type: () -> str return ",".join(sorted(str(s) for s in self._specs)) def __hash__(self): + # type: () -> int return hash(self._specs) def __and__(self, other): + # type: (Union[SpecifierSet, str]) -> SpecifierSet if isinstance(other, string_types): other = SpecifierSet(other) elif not isinstance(other, SpecifierSet): @@ -626,9 +714,8 @@ def __and__(self, other): return specifier def __eq__(self, other): - if isinstance(other, string_types): - other = SpecifierSet(other) - elif isinstance(other, _IndividualSpecifier): + # type: (object) -> bool + if isinstance(other, (string_types, _IndividualSpecifier)): other = SpecifierSet(str(other)) elif not isinstance(other, SpecifierSet): return NotImplemented @@ -636,9 +723,8 @@ def __eq__(self, other): return self._specs == other._specs def __ne__(self, other): - if isinstance(other, string_types): - other = SpecifierSet(other) - elif isinstance(other, _IndividualSpecifier): + # type: (object) -> bool + if isinstance(other, (string_types, _IndividualSpecifier)): other = SpecifierSet(str(other)) elif not isinstance(other, SpecifierSet): return NotImplemented @@ -646,13 +732,17 @@ def __ne__(self, other): return self._specs != other._specs def __len__(self): + # type: () -> int return len(self._specs) def __iter__(self): + # type: () -> Iterator[FrozenSet[_IndividualSpecifier]] return iter(self._specs) @property def prereleases(self): + # type: () -> Optional[bool] + # If we have been given an explicit prerelease modifier, then we'll # pass that through here. if self._prereleases is not None: @@ -670,12 +760,16 @@ def prereleases(self): @prereleases.setter def prereleases(self, value): + # type: (bool) -> None self._prereleases = value def __contains__(self, item): + # type: (Union[ParsedVersion, str]) -> bool return self.contains(item) def contains(self, item, prereleases=None): + # type: (Union[ParsedVersion, str], Optional[bool]) -> bool + # Ensure that our item is a Version or LegacyVersion instance. if not isinstance(item, (LegacyVersion, Version)): item = parse(item) @@ -701,7 +795,13 @@ def contains(self, item, prereleases=None): # will always return True, this is an explicit design decision. return all(s.contains(item, prereleases=prereleases) for s in self._specs) - def filter(self, iterable, prereleases=None): + def filter( + self, + iterable, # type: Iterable[Union[ParsedVersion, str]] + prereleases=None, # type: Optional[bool] + ): + # type: (...) -> Iterable[Union[ParsedVersion, str]] + # Determine if we're forcing a prerelease or not, if we're not forcing # one for this particular filter call, then we'll use whatever the # SpecifierSet thinks for whether or not we should support prereleases. @@ -719,8 +819,8 @@ def filter(self, iterable, prereleases=None): # which will filter out any pre-releases, unless there are no final # releases, and which will filter out LegacyVersion in general. else: - filtered = [] - found_prereleases = [] + filtered = [] # type: List[Union[ParsedVersion, str]] + found_prereleases = [] # type: List[Union[ParsedVersion, str]] for item in iterable: # Ensure that we some kind of Version class for this item. diff --git a/pipenv/vendor/packaging/tags.py b/pipenv/vendor/packaging/tags.py index ec9942f0f6..300faab847 100644 --- a/pipenv/vendor/packaging/tags.py +++ b/pipenv/vendor/packaging/tags.py @@ -13,12 +13,37 @@ EXTENSION_SUFFIXES = [x[0] for x in imp.get_suffixes()] del imp +import logging +import os import platform import re +import struct import sys import sysconfig import warnings +from ._typing import MYPY_CHECK_RUNNING, cast + +if MYPY_CHECK_RUNNING: # pragma: no cover + from typing import ( + Dict, + FrozenSet, + IO, + Iterable, + Iterator, + List, + Optional, + Sequence, + Tuple, + Union, + ) + + PythonVersion = Sequence[int] + MacVersion = Tuple[int, int] + GlibcVersion = Tuple[int, int] + + +logger = logging.getLogger(__name__) INTERPRETER_SHORT_NAMES = { "python": "py", # Generic. @@ -26,7 +51,7 @@ "pypy": "pp", "ironpython": "ip", "jython": "jy", -} +} # type: Dict[str, str] _32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32 @@ -37,23 +62,31 @@ class Tag(object): __slots__ = ["_interpreter", "_abi", "_platform"] def __init__(self, interpreter, abi, platform): + # type: (str, str, str) -> None self._interpreter = interpreter.lower() self._abi = abi.lower() self._platform = platform.lower() @property def interpreter(self): + # type: () -> str return self._interpreter @property def abi(self): + # type: () -> str return self._abi @property def platform(self): + # type: () -> str return self._platform def __eq__(self, other): + # type: (object) -> bool + if not isinstance(other, Tag): + return NotImplemented + return ( (self.platform == other.platform) and (self.abi == other.abi) @@ -61,16 +94,20 @@ def __eq__(self, other): ) def __hash__(self): + # type: () -> int return hash((self._interpreter, self._abi, self._platform)) def __str__(self): + # type: () -> str return "{}-{}-{}".format(self._interpreter, self._abi, self._platform) def __repr__(self): + # type: () -> str return "<{self} @ {self_id}>".format(self=self, self_id=id(self)) def parse_tag(tag): + # type: (str) -> FrozenSet[Tag] tags = set() interpreters, abis, platforms = tag.split("-") for interpreter in interpreters.split("."): @@ -80,20 +117,54 @@ def parse_tag(tag): return frozenset(tags) +def _warn_keyword_parameter(func_name, kwargs): + # type: (str, Dict[str, bool]) -> bool + """ + Backwards-compatibility with Python 2.7 to allow treating 'warn' as keyword-only. + """ + if not kwargs: + return False + elif len(kwargs) > 1 or "warn" not in kwargs: + kwargs.pop("warn", None) + arg = next(iter(kwargs.keys())) + raise TypeError( + "{}() got an unexpected keyword argument {!r}".format(func_name, arg) + ) + return kwargs["warn"] + + +def _get_config_var(name, warn=False): + # type: (str, bool) -> Union[int, str, None] + value = sysconfig.get_config_var(name) + if value is None and warn: + logger.debug( + "Config variable '%s' is unset, Python ABI tag may be incorrect", name + ) + return value + + def _normalize_string(string): + # type: (str) -> str return string.replace(".", "_").replace("-", "_") -def _cpython_interpreter(py_version): - # TODO: Is using py_version_nodot for interpreter version critical? - return "cp{major}{minor}".format(major=py_version[0], minor=py_version[1]) +def _abi3_applies(python_version): + # type: (PythonVersion) -> bool + """ + Determine if the Python version supports abi3. + + PEP 384 was first implemented in Python 3.2. + """ + return len(python_version) > 1 and tuple(python_version) >= (3, 2) -def _cpython_abis(py_version): +def _cpython_abis(py_version, warn=False): + # type: (PythonVersion, bool) -> List[str] + py_version = tuple(py_version) # To allow for version comparison. abis = [] - version = "{}{}".format(*py_version[:2]) + version = _version_nodot(py_version[:2]) debug = pymalloc = ucs4 = "" - with_debug = sysconfig.get_config_var("Py_DEBUG") + with_debug = _get_config_var("Py_DEBUG", warn) has_refcount = hasattr(sys, "gettotalrefcount") # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled # extension modules is the best option. @@ -102,11 +173,11 @@ def _cpython_abis(py_version): if with_debug or (with_debug is None and (has_refcount or has_ext)): debug = "d" if py_version < (3, 8): - with_pymalloc = sysconfig.get_config_var("WITH_PYMALLOC") + with_pymalloc = _get_config_var("WITH_PYMALLOC", warn) if with_pymalloc or with_pymalloc is None: pymalloc = "m" if py_version < (3, 3): - unicode_size = sysconfig.get_config_var("Py_UNICODE_SIZE") + unicode_size = _get_config_var("Py_UNICODE_SIZE", warn) if unicode_size == 4 or ( unicode_size is None and sys.maxunicode == 0x10FFFF ): @@ -124,86 +195,148 @@ def _cpython_abis(py_version): return abis -def _cpython_tags(py_version, interpreter, abis, platforms): +def cpython_tags( + python_version=None, # type: Optional[PythonVersion] + abis=None, # type: Optional[Iterable[str]] + platforms=None, # type: Optional[Iterable[str]] + **kwargs # type: bool +): + # type: (...) -> Iterator[Tag] + """ + Yields the tags for a CPython interpreter. + + The tags consist of: + - cp<python_version>-<abi>-<platform> + - cp<python_version>-abi3-<platform> + - cp<python_version>-none-<platform> + - cp<less than python_version>-abi3-<platform> # Older Python versions down to 3.2. + + If python_version only specifies a major version then user-provided ABIs and + the 'none' ABItag will be used. + + If 'abi3' or 'none' are specified in 'abis' then they will be yielded at + their normal position and not at the beginning. + """ + warn = _warn_keyword_parameter("cpython_tags", kwargs) + if not python_version: + python_version = sys.version_info[:2] + + interpreter = "cp{}".format(_version_nodot(python_version[:2])) + + if abis is None: + if len(python_version) > 1: + abis = _cpython_abis(python_version, warn) + else: + abis = [] + abis = list(abis) + # 'abi3' and 'none' are explicitly handled later. + for explicit_abi in ("abi3", "none"): + try: + abis.remove(explicit_abi) + except ValueError: + pass + + platforms = list(platforms or _platform_tags()) for abi in abis: for platform_ in platforms: yield Tag(interpreter, abi, platform_) - for tag in (Tag(interpreter, "abi3", platform_) for platform_ in platforms): - yield tag + if _abi3_applies(python_version): + for tag in (Tag(interpreter, "abi3", platform_) for platform_ in platforms): + yield tag for tag in (Tag(interpreter, "none", platform_) for platform_ in platforms): yield tag - # PEP 384 was first implemented in Python 3.2. - for minor_version in range(py_version[1] - 1, 1, -1): - for platform_ in platforms: - interpreter = "cp{major}{minor}".format( - major=py_version[0], minor=minor_version - ) - yield Tag(interpreter, "abi3", platform_) - -def _pypy_interpreter(): - return "pp{py_major}{pypy_major}{pypy_minor}".format( - py_major=sys.version_info[0], - pypy_major=sys.pypy_version_info.major, - pypy_minor=sys.pypy_version_info.minor, - ) + if _abi3_applies(python_version): + for minor_version in range(python_version[1] - 1, 1, -1): + for platform_ in platforms: + interpreter = "cp{version}".format( + version=_version_nodot((python_version[0], minor_version)) + ) + yield Tag(interpreter, "abi3", platform_) def _generic_abi(): + # type: () -> Iterator[str] abi = sysconfig.get_config_var("SOABI") if abi: - return _normalize_string(abi) - else: - return "none" + yield _normalize_string(abi) -def _pypy_tags(py_version, interpreter, abi, platforms): - for tag in (Tag(interpreter, abi, platform) for platform in platforms): - yield tag - for tag in (Tag(interpreter, "none", platform) for platform in platforms): - yield tag +def generic_tags( + interpreter=None, # type: Optional[str] + abis=None, # type: Optional[Iterable[str]] + platforms=None, # type: Optional[Iterable[str]] + **kwargs # type: bool +): + # type: (...) -> Iterator[Tag] + """ + Yields the tags for a generic interpreter. + The tags consist of: + - <interpreter>-<abi>-<platform> -def _generic_tags(interpreter, py_version, abi, platforms): - for tag in (Tag(interpreter, abi, platform) for platform in platforms): - yield tag - if abi != "none": - tags = (Tag(interpreter, "none", platform_) for platform_ in platforms) - for tag in tags: - yield tag + The "none" ABI will be added if it was not explicitly provided. + """ + warn = _warn_keyword_parameter("generic_tags", kwargs) + if not interpreter: + interp_name = interpreter_name() + interp_version = interpreter_version(warn=warn) + interpreter = "".join([interp_name, interp_version]) + if abis is None: + abis = _generic_abi() + platforms = list(platforms or _platform_tags()) + abis = list(abis) + if "none" not in abis: + abis.append("none") + for abi in abis: + for platform_ in platforms: + yield Tag(interpreter, abi, platform_) def _py_interpreter_range(py_version): + # type: (PythonVersion) -> Iterator[str] """ - Yield Python versions in descending order. + Yields Python versions in descending order. After the latest version, the major-only version will be yielded, and then - all following versions up to 'end'. + all previous versions of that major version. """ - yield "py{major}{minor}".format(major=py_version[0], minor=py_version[1]) + if len(py_version) > 1: + yield "py{version}".format(version=_version_nodot(py_version[:2])) yield "py{major}".format(major=py_version[0]) - for minor in range(py_version[1] - 1, -1, -1): - yield "py{major}{minor}".format(major=py_version[0], minor=minor) + if len(py_version) > 1: + for minor in range(py_version[1] - 1, -1, -1): + yield "py{version}".format(version=_version_nodot((py_version[0], minor))) -def _independent_tags(interpreter, py_version, platforms): +def compatible_tags( + python_version=None, # type: Optional[PythonVersion] + interpreter=None, # type: Optional[str] + platforms=None, # type: Optional[Iterable[str]] +): + # type: (...) -> Iterator[Tag] """ - Return the sequence of tags that are consistent across implementations. + Yields the sequence of tags that are compatible with a specific version of Python. The tags consist of: - py*-none-<platform> - - <interpreter>-none-any + - <interpreter>-none-any # ... if `interpreter` is provided. - py*-none-any """ - for version in _py_interpreter_range(py_version): + if not python_version: + python_version = sys.version_info[:2] + platforms = list(platforms or _platform_tags()) + for version in _py_interpreter_range(python_version): for platform_ in platforms: yield Tag(version, "none", platform_) - yield Tag(interpreter, "none", "any") - for version in _py_interpreter_range(py_version): + if interpreter: + yield Tag(interpreter, "none", "any") + for version in _py_interpreter_range(python_version): yield Tag(version, "none", "any") def _mac_arch(arch, is_32bit=_32_BIT_INTERPRETER): + # type: (str, bool) -> str if not is_32bit: return arch @@ -214,6 +347,7 @@ def _mac_arch(arch, is_32bit=_32_BIT_INTERPRETER): def _mac_binary_formats(version, cpu_arch): + # type: (MacVersion, str) -> List[str] formats = [cpu_arch] if cpu_arch == "x86_64": if version < (10, 4): @@ -240,32 +374,42 @@ def _mac_binary_formats(version, cpu_arch): return formats -def _mac_platforms(version=None, arch=None): - version_str, _, cpu_arch = platform.mac_ver() +def mac_platforms(version=None, arch=None): + # type: (Optional[MacVersion], Optional[str]) -> Iterator[str] + """ + Yields the platform tags for a macOS system. + + The `version` parameter is a two-item tuple specifying the macOS version to + generate platform tags for. The `arch` parameter is the CPU architecture to + generate platform tags for. Both parameters default to the appropriate value + for the current system. + """ + version_str, _, cpu_arch = platform.mac_ver() # type: ignore if version is None: - version = tuple(map(int, version_str.split(".")[:2])) + version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2]))) + else: + version = version if arch is None: arch = _mac_arch(cpu_arch) - platforms = [] + else: + arch = arch for minor_version in range(version[1], -1, -1): compat_version = version[0], minor_version binary_formats = _mac_binary_formats(compat_version, arch) for binary_format in binary_formats: - platforms.append( - "macosx_{major}_{minor}_{binary_format}".format( - major=compat_version[0], - minor=compat_version[1], - binary_format=binary_format, - ) + yield "macosx_{major}_{minor}_{binary_format}".format( + major=compat_version[0], + minor=compat_version[1], + binary_format=binary_format, ) - return platforms # From PEP 513. def _is_manylinux_compatible(name, glibc_version): + # type: (str, GlibcVersion) -> bool # Check for presence of _manylinux module. try: - import _manylinux + import _manylinux # noqa return bool(getattr(_manylinux, name + "_compatible")) except (ImportError, AttributeError): @@ -276,14 +420,50 @@ def _is_manylinux_compatible(name, glibc_version): def _glibc_version_string(): + # type: () -> Optional[str] # Returns glibc version string, or None if not using glibc. - import ctypes + return _glibc_version_string_confstr() or _glibc_version_string_ctypes() + + +def _glibc_version_string_confstr(): + # type: () -> Optional[str] + """ + Primary implementation of glibc_version_string using os.confstr. + """ + # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely + # to be broken or missing. This strategy is used in the standard library + # platform module. + # https://github.com/python/cpython/blob/fcf1d003bf4f0100c9d0921ff3d70e1127ca1b71/Lib/platform.py#L175-L183 + try: + # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17". + version_string = os.confstr( # type: ignore[attr-defined] # noqa: F821 + "CS_GNU_LIBC_VERSION" + ) + assert version_string is not None + _, version = version_string.split() # type: Tuple[str, str] + except (AssertionError, AttributeError, OSError, ValueError): + # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)... + return None + return version + + +def _glibc_version_string_ctypes(): + # type: () -> Optional[str] + """ + Fallback implementation of glibc_version_string using ctypes. + """ + try: + import ctypes + except ImportError: + return None # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen # manpage says, "If filename is NULL, then the returned handle is for the # main program". This way we can let the linker do the work to figure out # which libc our process is actually using. - process_namespace = ctypes.CDLL(None) + # + # Note: typeshed is wrong here so we are ignoring this line. + process_namespace = ctypes.CDLL(None) # type: ignore try: gnu_get_libc_version = process_namespace.gnu_get_libc_version except AttributeError: @@ -293,7 +473,7 @@ def _glibc_version_string(): # Call gnu_get_libc_version, which returns a string like "2.5" gnu_get_libc_version.restype = ctypes.c_char_p - version_str = gnu_get_libc_version() + version_str = gnu_get_libc_version() # type: str # py2 / py3 compatibility: if not isinstance(version_str, str): version_str = version_str.decode("ascii") @@ -303,6 +483,7 @@ def _glibc_version_string(): # Separated out from have_compatible_glibc for easier unit testing. def _check_glibc_version(version_str, required_major, minimum_minor): + # type: (str, int, int) -> bool # Parse string and check against requested version. # # We use a regexp instead of str.split because we want to discard any @@ -324,81 +505,235 @@ def _check_glibc_version(version_str, required_major, minimum_minor): def _have_compatible_glibc(required_major, minimum_minor): + # type: (int, int) -> bool version_str = _glibc_version_string() if version_str is None: return False return _check_glibc_version(version_str, required_major, minimum_minor) +# Python does not provide platform information at sufficient granularity to +# identify the architecture of the running executable in some cases, so we +# determine it dynamically by reading the information from the running +# process. This only applies on Linux, which uses the ELF format. +class _ELFFileHeader(object): + # https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header + class _InvalidELFFileHeader(ValueError): + """ + An invalid ELF file header was found. + """ + + ELF_MAGIC_NUMBER = 0x7F454C46 + ELFCLASS32 = 1 + ELFCLASS64 = 2 + ELFDATA2LSB = 1 + ELFDATA2MSB = 2 + EM_386 = 3 + EM_S390 = 22 + EM_ARM = 40 + EM_X86_64 = 62 + EF_ARM_ABIMASK = 0xFF000000 + EF_ARM_ABI_VER5 = 0x05000000 + EF_ARM_ABI_FLOAT_HARD = 0x00000400 + + def __init__(self, file): + # type: (IO[bytes]) -> None + def unpack(fmt): + # type: (str) -> int + try: + result, = struct.unpack( + fmt, file.read(struct.calcsize(fmt)) + ) # type: (int, ) + except struct.error: + raise _ELFFileHeader._InvalidELFFileHeader() + return result + + self.e_ident_magic = unpack(">I") + if self.e_ident_magic != self.ELF_MAGIC_NUMBER: + raise _ELFFileHeader._InvalidELFFileHeader() + self.e_ident_class = unpack("B") + if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}: + raise _ELFFileHeader._InvalidELFFileHeader() + self.e_ident_data = unpack("B") + if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}: + raise _ELFFileHeader._InvalidELFFileHeader() + self.e_ident_version = unpack("B") + self.e_ident_osabi = unpack("B") + self.e_ident_abiversion = unpack("B") + self.e_ident_pad = file.read(7) + format_h = "<H" if self.e_ident_data == self.ELFDATA2LSB else ">H" + format_i = "<I" if self.e_ident_data == self.ELFDATA2LSB else ">I" + format_q = "<Q" if self.e_ident_data == self.ELFDATA2LSB else ">Q" + format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q + self.e_type = unpack(format_h) + self.e_machine = unpack(format_h) + self.e_version = unpack(format_i) + self.e_entry = unpack(format_p) + self.e_phoff = unpack(format_p) + self.e_shoff = unpack(format_p) + self.e_flags = unpack(format_i) + self.e_ehsize = unpack(format_h) + self.e_phentsize = unpack(format_h) + self.e_phnum = unpack(format_h) + self.e_shentsize = unpack(format_h) + self.e_shnum = unpack(format_h) + self.e_shstrndx = unpack(format_h) + + +def _get_elf_header(): + # type: () -> Optional[_ELFFileHeader] + try: + with open(sys.executable, "rb") as f: + elf_header = _ELFFileHeader(f) + except (IOError, OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader): + return None + return elf_header + + +def _is_linux_armhf(): + # type: () -> bool + # hard-float ABI can be detected from the ELF header of the running + # process + # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf + elf_header = _get_elf_header() + if elf_header is None: + return False + result = elf_header.e_ident_class == elf_header.ELFCLASS32 + result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB + result &= elf_header.e_machine == elf_header.EM_ARM + result &= ( + elf_header.e_flags & elf_header.EF_ARM_ABIMASK + ) == elf_header.EF_ARM_ABI_VER5 + result &= ( + elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD + ) == elf_header.EF_ARM_ABI_FLOAT_HARD + return result + + +def _is_linux_i686(): + # type: () -> bool + elf_header = _get_elf_header() + if elf_header is None: + return False + result = elf_header.e_ident_class == elf_header.ELFCLASS32 + result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB + result &= elf_header.e_machine == elf_header.EM_386 + return result + + +def _have_compatible_manylinux_abi(arch): + # type: (str) -> bool + if arch == "armv7l": + return _is_linux_armhf() + if arch == "i686": + return _is_linux_i686() + return True + + def _linux_platforms(is_32bit=_32_BIT_INTERPRETER): + # type: (bool) -> Iterator[str] linux = _normalize_string(distutils.util.get_platform()) - if linux == "linux_x86_64" and is_32bit: - linux = "linux_i686" - manylinux_support = ( - ("manylinux2014", (2, 17)), # CentOS 7 w/ glibc 2.17 (PEP 599) - ("manylinux2010", (2, 12)), # CentOS 6 w/ glibc 2.12 (PEP 571) - ("manylinux1", (2, 5)), # CentOS 5 w/ glibc 2.5 (PEP 513) - ) + if is_32bit: + if linux == "linux_x86_64": + linux = "linux_i686" + elif linux == "linux_aarch64": + linux = "linux_armv7l" + manylinux_support = [] + _, arch = linux.split("_", 1) + if _have_compatible_manylinux_abi(arch): + if arch in {"x86_64", "i686", "aarch64", "armv7l", "ppc64", "ppc64le", "s390x"}: + manylinux_support.append( + ("manylinux2014", (2, 17)) + ) # CentOS 7 w/ glibc 2.17 (PEP 599) + if arch in {"x86_64", "i686"}: + manylinux_support.append( + ("manylinux2010", (2, 12)) + ) # CentOS 6 w/ glibc 2.12 (PEP 571) + manylinux_support.append( + ("manylinux1", (2, 5)) + ) # CentOS 5 w/ glibc 2.5 (PEP 513) manylinux_support_iter = iter(manylinux_support) for name, glibc_version in manylinux_support_iter: if _is_manylinux_compatible(name, glibc_version): - platforms = [linux.replace("linux", name)] + yield linux.replace("linux", name) break - else: - platforms = [] # Support for a later manylinux implies support for an earlier version. - platforms += [linux.replace("linux", name) for name, _ in manylinux_support_iter] - platforms.append(linux) - return platforms + for name, _ in manylinux_support_iter: + yield linux.replace("linux", name) + yield linux def _generic_platforms(): - platform = _normalize_string(distutils.util.get_platform()) - return [platform] + # type: () -> Iterator[str] + yield _normalize_string(distutils.util.get_platform()) + + +def _platform_tags(): + # type: () -> Iterator[str] + """ + Provides the platform tags for this installation. + """ + if platform.system() == "Darwin": + return mac_platforms() + elif platform.system() == "Linux": + return _linux_platforms() + else: + return _generic_platforms() -def _interpreter_name(): - name = platform.python_implementation().lower() +def interpreter_name(): + # type: () -> str + """ + Returns the name of the running interpreter. + """ + try: + name = sys.implementation.name # type: ignore + except AttributeError: # pragma: no cover + # Python 2.7 compatibility. + name = platform.python_implementation().lower() return INTERPRETER_SHORT_NAMES.get(name) or name -def _generic_interpreter(name, py_version): - version = sysconfig.get_config_var("py_version_nodot") - if not version: - version = "".join(map(str, py_version[:2])) - return "{name}{version}".format(name=name, version=version) +def interpreter_version(**kwargs): + # type: (bool) -> str + """ + Returns the version of the running interpreter. + """ + warn = _warn_keyword_parameter("interpreter_version", kwargs) + version = _get_config_var("py_version_nodot", warn=warn) + if version: + version = str(version) + else: + version = _version_nodot(sys.version_info[:2]) + return version + + +def _version_nodot(version): + # type: (PythonVersion) -> str + if any(v >= 10 for v in version): + sep = "_" + else: + sep = "" + return sep.join(map(str, version)) -def sys_tags(): +def sys_tags(**kwargs): + # type: (bool) -> Iterator[Tag] """ Returns the sequence of tag triples for the running interpreter. The order of the sequence corresponds to priority order for the interpreter, from most to least important. """ - py_version = sys.version_info[:2] - interpreter_name = _interpreter_name() - if platform.system() == "Darwin": - platforms = _mac_platforms() - elif platform.system() == "Linux": - platforms = _linux_platforms() - else: - platforms = _generic_platforms() + warn = _warn_keyword_parameter("sys_tags", kwargs) - if interpreter_name == "cp": - interpreter = _cpython_interpreter(py_version) - abis = _cpython_abis(py_version) - for tag in _cpython_tags(py_version, interpreter, abis, platforms): - yield tag - elif interpreter_name == "pp": - interpreter = _pypy_interpreter() - abi = _generic_abi() - for tag in _pypy_tags(py_version, interpreter, abi, platforms): + interp_name = interpreter_name() + if interp_name == "cp": + for tag in cpython_tags(warn=warn): yield tag else: - interpreter = _generic_interpreter(interpreter_name, py_version) - abi = _generic_abi() - for tag in _generic_tags(interpreter, py_version, abi, platforms): + for tag in generic_tags(): yield tag - for tag in _independent_tags(interpreter, py_version, platforms): + + for tag in compatible_tags(): yield tag diff --git a/pipenv/vendor/packaging/utils.py b/pipenv/vendor/packaging/utils.py index 8841878693..44f1bf9873 100644 --- a/pipenv/vendor/packaging/utils.py +++ b/pipenv/vendor/packaging/utils.py @@ -5,28 +5,33 @@ import re +from ._typing import MYPY_CHECK_RUNNING from .version import InvalidVersion, Version +if MYPY_CHECK_RUNNING: # pragma: no cover + from typing import Union _canonicalize_regex = re.compile(r"[-_.]+") def canonicalize_name(name): + # type: (str) -> str # This is taken from PEP 503. return _canonicalize_regex.sub("-", name).lower() -def canonicalize_version(version): +def canonicalize_version(_version): + # type: (str) -> Union[Version, str] """ - This is very similar to Version.__str__, but has one subtle differences + This is very similar to Version.__str__, but has one subtle difference with the way it handles the release segment. """ try: - version = Version(version) + version = Version(_version) except InvalidVersion: # Legacy versions cannot be normalized - return version + return _version parts = [] diff --git a/pipenv/vendor/packaging/version.py b/pipenv/vendor/packaging/version.py index 95157a1f78..f39a2a12a1 100644 --- a/pipenv/vendor/packaging/version.py +++ b/pipenv/vendor/packaging/version.py @@ -7,8 +7,35 @@ import itertools import re -from ._structures import Infinity - +from ._structures import Infinity, NegativeInfinity +from ._typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: # pragma: no cover + from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union + + from ._structures import InfinityType, NegativeInfinityType + + InfiniteTypes = Union[InfinityType, NegativeInfinityType] + PrePostDevType = Union[InfiniteTypes, Tuple[str, int]] + SubLocalType = Union[InfiniteTypes, int, str] + LocalType = Union[ + NegativeInfinityType, + Tuple[ + Union[ + SubLocalType, + Tuple[SubLocalType, str], + Tuple[NegativeInfinityType, SubLocalType], + ], + ..., + ], + ] + CmpKey = Tuple[ + int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType + ] + LegacyCmpKey = Tuple[int, Tuple[str, ...]] + VersionComparisonMethod = Callable[ + [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool + ] __all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"] @@ -19,6 +46,7 @@ def parse(version): + # type: (str) -> Union[LegacyVersion, Version] """ Parse the given version string and return either a :class:`Version` object or a :class:`LegacyVersion` object depending on if the given version is @@ -37,28 +65,38 @@ class InvalidVersion(ValueError): class _BaseVersion(object): + _key = None # type: Union[CmpKey, LegacyCmpKey] + def __hash__(self): + # type: () -> int return hash(self._key) def __lt__(self, other): + # type: (_BaseVersion) -> bool return self._compare(other, lambda s, o: s < o) def __le__(self, other): + # type: (_BaseVersion) -> bool return self._compare(other, lambda s, o: s <= o) def __eq__(self, other): + # type: (object) -> bool return self._compare(other, lambda s, o: s == o) def __ge__(self, other): + # type: (_BaseVersion) -> bool return self._compare(other, lambda s, o: s >= o) def __gt__(self, other): + # type: (_BaseVersion) -> bool return self._compare(other, lambda s, o: s > o) def __ne__(self, other): + # type: (object) -> bool return self._compare(other, lambda s, o: s != o) def _compare(self, other, method): + # type: (object, VersionComparisonMethod) -> Union[bool, NotImplemented] if not isinstance(other, _BaseVersion): return NotImplemented @@ -67,57 +105,71 @@ def _compare(self, other, method): class LegacyVersion(_BaseVersion): def __init__(self, version): + # type: (str) -> None self._version = str(version) self._key = _legacy_cmpkey(self._version) def __str__(self): + # type: () -> str return self._version def __repr__(self): + # type: () -> str return "<LegacyVersion({0})>".format(repr(str(self))) @property def public(self): + # type: () -> str return self._version @property def base_version(self): + # type: () -> str return self._version @property def epoch(self): + # type: () -> int return -1 @property def release(self): + # type: () -> None return None @property def pre(self): + # type: () -> None return None @property def post(self): + # type: () -> None return None @property def dev(self): + # type: () -> None return None @property def local(self): + # type: () -> None return None @property def is_prerelease(self): + # type: () -> bool return False @property def is_postrelease(self): + # type: () -> bool return False @property def is_devrelease(self): + # type: () -> bool return False @@ -133,6 +185,7 @@ def is_devrelease(self): def _parse_version_parts(s): + # type: (str) -> Iterator[str] for part in _legacy_version_component_re.split(s): part = _legacy_version_replacement_map.get(part, part) @@ -150,6 +203,8 @@ def _parse_version_parts(s): def _legacy_cmpkey(version): + # type: (str) -> LegacyCmpKey + # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch # greater than or equal to 0. This will effectively put the LegacyVersion, # which uses the defacto standard originally implemented by setuptools, @@ -158,7 +213,7 @@ def _legacy_cmpkey(version): # This scheme is taken from pkg_resources.parse_version setuptools prior to # it's adoption of the packaging library. - parts = [] + parts = [] # type: List[str] for part in _parse_version_parts(version.lower()): if part.startswith("*"): # remove "-" before a prerelease tag @@ -171,9 +226,8 @@ def _legacy_cmpkey(version): parts.pop() parts.append(part) - parts = tuple(parts) - return epoch, parts + return epoch, tuple(parts) # Deliberately not anchored to the start and end of the string, to make it @@ -215,6 +269,8 @@ class Version(_BaseVersion): _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE) def __init__(self, version): + # type: (str) -> None + # Validate the version and parse it into pieces match = self._regex.search(version) if not match: @@ -243,9 +299,11 @@ def __init__(self, version): ) def __repr__(self): + # type: () -> str return "<Version({0})>".format(repr(str(self))) def __str__(self): + # type: () -> str parts = [] # Epoch @@ -275,26 +333,35 @@ def __str__(self): @property def epoch(self): - return self._version.epoch + # type: () -> int + _epoch = self._version.epoch # type: int + return _epoch @property def release(self): - return self._version.release + # type: () -> Tuple[int, ...] + _release = self._version.release # type: Tuple[int, ...] + return _release @property def pre(self): - return self._version.pre + # type: () -> Optional[Tuple[str, int]] + _pre = self._version.pre # type: Optional[Tuple[str, int]] + return _pre @property def post(self): + # type: () -> Optional[Tuple[str, int]] return self._version.post[1] if self._version.post else None @property def dev(self): + # type: () -> Optional[Tuple[str, int]] return self._version.dev[1] if self._version.dev else None @property def local(self): + # type: () -> Optional[str] if self._version.local: return ".".join(str(x) for x in self._version.local) else: @@ -302,10 +369,12 @@ def local(self): @property def public(self): + # type: () -> str return str(self).split("+", 1)[0] @property def base_version(self): + # type: () -> str parts = [] # Epoch @@ -319,18 +388,41 @@ def base_version(self): @property def is_prerelease(self): + # type: () -> bool return self.dev is not None or self.pre is not None @property def is_postrelease(self): + # type: () -> bool return self.post is not None @property def is_devrelease(self): + # type: () -> bool return self.dev is not None + @property + def major(self): + # type: () -> int + return self.release[0] if len(self.release) >= 1 else 0 + + @property + def minor(self): + # type: () -> int + return self.release[1] if len(self.release) >= 2 else 0 + + @property + def micro(self): + # type: () -> int + return self.release[2] if len(self.release) >= 3 else 0 + + +def _parse_letter_version( + letter, # type: str + number, # type: Union[str, bytes, SupportsInt] +): + # type: (...) -> Optional[Tuple[str, int]] -def _parse_letter_version(letter, number): if letter: # We consider there to be an implicit 0 in a pre-release if there is # not a numeral associated with it. @@ -360,11 +452,14 @@ def _parse_letter_version(letter, number): return letter, int(number) + return None + _local_version_separators = re.compile(r"[\._-]") def _parse_local_version(local): + # type: (str) -> Optional[LocalType] """ Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve"). """ @@ -373,15 +468,25 @@ def _parse_local_version(local): part.lower() if not part.isdigit() else int(part) for part in _local_version_separators.split(local) ) + return None + +def _cmpkey( + epoch, # type: int + release, # type: Tuple[int, ...] + pre, # type: Optional[Tuple[str, int]] + post, # type: Optional[Tuple[str, int]] + dev, # type: Optional[Tuple[str, int]] + local, # type: Optional[Tuple[SubLocalType]] +): + # type: (...) -> CmpKey -def _cmpkey(epoch, release, pre, post, dev, local): # When we compare a release version, we want to compare it with all of the # trailing zeros removed. So we'll use a reverse the list, drop all the now # leading zeros until we come to something non zero, then take the rest # re-reverse it back into the correct order and make it a tuple and use # that for our sorting key. - release = tuple( + _release = tuple( reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release)))) ) @@ -390,23 +495,31 @@ def _cmpkey(epoch, release, pre, post, dev, local): # if there is not a pre or a post segment. If we have one of those then # the normal sorting rules will handle this case correctly. if pre is None and post is None and dev is not None: - pre = -Infinity + _pre = NegativeInfinity # type: PrePostDevType # Versions without a pre-release (except as noted above) should sort after # those with one. elif pre is None: - pre = Infinity + _pre = Infinity + else: + _pre = pre # Versions without a post segment should sort before those with one. if post is None: - post = -Infinity + _post = NegativeInfinity # type: PrePostDevType + + else: + _post = post # Versions without a development segment should sort after those with one. if dev is None: - dev = Infinity + _dev = Infinity # type: PrePostDevType + + else: + _dev = dev if local is None: # Versions without a local segment should sort before those with one. - local = -Infinity + _local = NegativeInfinity # type: LocalType else: # Versions with a local segment need that segment parsed to implement # the sorting rules in PEP440. @@ -415,6 +528,8 @@ def _cmpkey(epoch, release, pre, post, dev, local): # - Numeric segments sort numerically # - Shorter versions sort before longer versions when the prefixes # match exactly - local = tuple((i, "") if isinstance(i, int) else (-Infinity, i) for i in local) + _local = tuple( + (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local + ) - return epoch, release, pre, post, dev, local + return epoch, _release, _pre, _post, _dev, _local diff --git a/pipenv/vendor/parse.py b/pipenv/vendor/parse.py index 1a5f9e634f..9c8cae7047 100644 --- a/pipenv/vendor/parse.py +++ b/pipenv/vendor/parse.py @@ -346,6 +346,9 @@ **Version history (in brief)**: +- 1.15.0 Several fixes for parsing non-base 10 numbers (thanks @vladikcomper) +- 1.14.0 More broad acceptance of Fortran number format (thanks @purpleskyfall) +- 1.13.1 Project metadata correction. - 1.13.0 Handle Fortran formatted numbers with no leading 0 before decimal point (thanks @purpleskyfall). Handle comparison of FixedTzOffset with other types of object. @@ -421,7 +424,7 @@ ''' from __future__ import absolute_import -__version__ = '1.13.0' +__version__ = '1.15.0' # yes, I now have two problems import re @@ -465,15 +468,16 @@ def decorator(func): return decorator -def int_convert(base): +def int_convert(base=None): '''Convert a string to an integer. The string may start with a sign. - It may be of a base other than 10. + It may be of a base other than 2, 8, 10 or 16. - If may start with a base indicator, 0#nnnn, which we assume should - override the specified base. + If base isn't specified, it will be detected automatically based + on a string format. When string starts with a base indicator, 0#nnnn, + it overrides the default base of 10. It may also have other non-numeric characters that we can ignore. ''' @@ -482,19 +486,28 @@ def int_convert(base): def f(string, match, base=base): if string[0] == '-': sign = -1 + number_start = 1 + elif string[0] == '+': + sign = 1 + number_start = 1 else: sign = 1 + number_start = 0 - if string[0] == '0' and len(string) > 2: - if string[1] in 'bB': - base = 2 - elif string[1] in 'oO': - base = 8 - elif string[1] in 'xX': - base = 16 - else: - # just go with the base specifed - pass + # If base wasn't specified, detect it automatically + if base is None: + + # Assume decimal number, unless different base is detected + base = 10 + + # For number formats starting with 0b, 0o, 0x, use corresponding base ... + if string[number_start] == '0' and len(string) - number_start > 2: + if string[number_start+1] in 'bB': + base = 2 + elif string[number_start+1] in 'oO': + base = 8 + elif string[number_start+1] in 'xX': + base = 16 chars = CHARS[:base] string = re.sub('[^%s]' % chars, '', string.lower()) @@ -965,7 +978,7 @@ def _handle_field(self, field): # figure type conversions, if any type = format['type'] - is_numeric = type and type in 'n%fegdobh' + is_numeric = type and type in 'n%fegdobx' if type in self._extra_types: type_converter = self._extra_types[type] s = getattr(type_converter, 'pattern', r'.+?') @@ -998,10 +1011,10 @@ def f(string, m): self._group_index += 1 self._type_conversions[group] = percentage elif type == 'f': - s = r'\d+\.\d+' + s = r'\d*\.\d+' self._type_conversions[group] = lambda s, m: float(s) elif type == 'F': - s = r'\d+\.\d+' + s = r'\d*\.\d+' self._type_conversions[group] = lambda s, m: Decimal(s) elif type == 'e': s = r'\d*\.\d+[eE][-+]?\d+|nan|NAN|[-+]?inf|[-+]?INF' @@ -1015,8 +1028,8 @@ def f(string, m): width = r'{1,%s}' % int(format['width']) else: width = '+' - s = r'\d{w}|0[xX][0-9a-fA-F]{w}|0[bB][01]{w}|0[oO][0-7]{w}'.format(w=width) - self._type_conversions[group] = int_convert(10) + s = r'\d{w}|[-+ ]?0[xX][0-9a-fA-F]{w}|[-+ ]?0[bB][01]{w}|[-+ ]?0[oO][0-7]{w}'.format(w=width) + self._type_conversions[group] = int_convert() # do not specify numeber base, determine it automatically elif type == 'ti': s = r'(\d{4}-\d\d-\d\d)((\s+|T)%s)?(Z|\s*[-+]\d\d:?\d\d)?' % \ TIME_PAT @@ -1320,7 +1333,7 @@ def compile(format, extra_types=None, case_sensitive=False): return Parser(format, extra_types=extra_types, case_sensitive=case_sensitive) -# Copyright (c) 2012-2019 Richard Jones <richard@python.org> +# Copyright (c) 2012-2020 Richard Jones <richard@python.org> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal diff --git a/pipenv/vendor/pexpect/__init__.py b/pipenv/vendor/pexpect/__init__.py index cf7a70d0a4..7e30453787 100644 --- a/pipenv/vendor/pexpect/__init__.py +++ b/pipenv/vendor/pexpect/__init__.py @@ -75,7 +75,7 @@ from .pty_spawn import spawn, spawnu from .run import run, runu -__version__ = '4.7.0' +__version__ = '4.8.0' __revision__ = '' __all__ = ['ExceptionPexpect', 'EOF', 'TIMEOUT', 'spawn', 'spawnu', 'run', 'runu', 'which', 'split_command_line', '__version__', '__revision__'] diff --git a/pipenv/vendor/pexpect/_async.py b/pipenv/vendor/pexpect/_async.py index ca2044e1cc..dfbfeef5fb 100644 --- a/pipenv/vendor/pexpect/_async.py +++ b/pipenv/vendor/pexpect/_async.py @@ -8,10 +8,7 @@ def expect_async(expecter, timeout=None): # First process data that was previously read - if it maches, we don't need # async stuff. - previously_read = expecter.spawn.buffer - expecter.spawn._buffer = expecter.spawn.buffer_type() - expecter.spawn._before = expecter.spawn.buffer_type() - idx = expecter.new_data(previously_read) + idx = expecter.existing_data() if idx is not None: return idx if not expecter.spawn.async_pw_transport: @@ -74,6 +71,7 @@ def data_received(self, data): spawn._log(s, 'read') if self.fut.done(): + spawn._before.write(s) spawn._buffer.write(s) return diff --git a/pipenv/vendor/pexpect/expect.py b/pipenv/vendor/pexpect/expect.py index db376d593c..d3409db9d7 100644 --- a/pipenv/vendor/pexpect/expect.py +++ b/pipenv/vendor/pexpect/expect.py @@ -6,45 +6,101 @@ class Expecter(object): def __init__(self, spawn, searcher, searchwindowsize=-1): self.spawn = spawn self.searcher = searcher + # A value of -1 means to use the figure from spawn, which should + # be None or a positive number. if searchwindowsize == -1: searchwindowsize = spawn.searchwindowsize self.searchwindowsize = searchwindowsize + self.lookback = None + if hasattr(searcher, 'longest_string'): + self.lookback = searcher.longest_string - def new_data(self, data): + def do_search(self, window, freshlen): spawn = self.spawn searcher = self.searcher - - pos = spawn._buffer.tell() - spawn._buffer.write(data) - spawn._before.write(data) - - # determine which chunk of data to search; if a windowsize is - # specified, this is the *new* data + the preceding <windowsize> bytes - if self.searchwindowsize: - spawn._buffer.seek(max(0, pos - self.searchwindowsize)) - window = spawn._buffer.read(self.searchwindowsize + len(data)) - else: - # otherwise, search the whole buffer (really slow for large datasets) - window = spawn.buffer - index = searcher.search(window, len(data)) + if freshlen > len(window): + freshlen = len(window) + index = searcher.search(window, freshlen, self.searchwindowsize) if index >= 0: spawn._buffer = spawn.buffer_type() spawn._buffer.write(window[searcher.end:]) - spawn.before = spawn._before.getvalue()[0:-(len(window) - searcher.start)] + spawn.before = spawn._before.getvalue()[ + 0:-(len(window) - searcher.start)] spawn._before = spawn.buffer_type() - spawn.after = window[searcher.start: searcher.end] + spawn._before.write(window[searcher.end:]) + spawn.after = window[searcher.start:searcher.end] spawn.match = searcher.match spawn.match_index = index # Found a match return index - elif self.searchwindowsize: - spawn._buffer = spawn.buffer_type() - spawn._buffer.write(window) + elif self.searchwindowsize or self.lookback: + maintain = self.searchwindowsize or self.lookback + if spawn._buffer.tell() > maintain: + spawn._buffer = spawn.buffer_type() + spawn._buffer.write(window[-maintain:]) + + def existing_data(self): + # First call from a new call to expect_loop or expect_async. + # self.searchwindowsize may have changed. + # Treat all data as fresh. + spawn = self.spawn + before_len = spawn._before.tell() + buf_len = spawn._buffer.tell() + freshlen = before_len + if before_len > buf_len: + if not self.searchwindowsize: + spawn._buffer = spawn.buffer_type() + window = spawn._before.getvalue() + spawn._buffer.write(window) + elif buf_len < self.searchwindowsize: + spawn._buffer = spawn.buffer_type() + spawn._before.seek( + max(0, before_len - self.searchwindowsize)) + window = spawn._before.read() + spawn._buffer.write(window) + else: + spawn._buffer.seek(max(0, buf_len - self.searchwindowsize)) + window = spawn._buffer.read() + else: + if self.searchwindowsize: + spawn._buffer.seek(max(0, buf_len - self.searchwindowsize)) + window = spawn._buffer.read() + else: + window = spawn._buffer.getvalue() + return self.do_search(window, freshlen) + + def new_data(self, data): + # A subsequent call, after a call to existing_data. + spawn = self.spawn + freshlen = len(data) + spawn._before.write(data) + if not self.searchwindowsize: + if self.lookback: + # search lookback + new data. + old_len = spawn._buffer.tell() + spawn._buffer.write(data) + spawn._buffer.seek(max(0, old_len - self.lookback)) + window = spawn._buffer.read() + else: + # copy the whole buffer (really slow for large datasets). + spawn._buffer.write(data) + window = spawn.buffer + else: + if len(data) >= self.searchwindowsize or not spawn._buffer.tell(): + window = data[-self.searchwindowsize:] + spawn._buffer = spawn.buffer_type() + spawn._buffer.write(window[-self.searchwindowsize:]) + else: + spawn._buffer.write(data) + new_len = spawn._buffer.tell() + spawn._buffer.seek(max(0, new_len - self.searchwindowsize)) + window = spawn._buffer.read() + return self.do_search(window, freshlen) def eof(self, err=None): spawn = self.spawn - spawn.before = spawn.buffer + spawn.before = spawn._before.getvalue() spawn._buffer = spawn.buffer_type() spawn._before = spawn.buffer_type() spawn.after = EOF @@ -60,12 +116,15 @@ def eof(self, err=None): msg += '\nsearcher: %s' % self.searcher if err is not None: msg = str(err) + '\n' + msg - raise EOF(msg) - + + exc = EOF(msg) + exc.__cause__ = None # in Python 3.x we can use "raise exc from None" + raise exc + def timeout(self, err=None): spawn = self.spawn - spawn.before = spawn.buffer + spawn.before = spawn._before.getvalue() spawn.after = TIMEOUT index = self.searcher.timeout_index if index >= 0: @@ -79,15 +138,18 @@ def timeout(self, err=None): msg += '\nsearcher: %s' % self.searcher if err is not None: msg = str(err) + '\n' + msg - raise TIMEOUT(msg) + + exc = TIMEOUT(msg) + exc.__cause__ = None # in Python 3.x we can use "raise exc from None" + raise exc def errored(self): spawn = self.spawn - spawn.before = spawn.buffer + spawn.before = spawn._before.getvalue() spawn.after = None spawn.match = None spawn.match_index = None - + def expect_loop(self, timeout=-1): """Blocking expect""" spawn = self.spawn @@ -96,14 +158,10 @@ def expect_loop(self, timeout=-1): end_time = time.time() + timeout try: - incoming = spawn.buffer - spawn._buffer = spawn.buffer_type() - spawn._before = spawn.buffer_type() + idx = self.existing_data() + if idx is not None: + return idx while True: - idx = self.new_data(incoming) - # Keep reading until exception or return. - if idx is not None: - return idx # No match at this point if (timeout is not None) and (timeout < 0): return self.timeout() @@ -111,6 +169,10 @@ def expect_loop(self, timeout=-1): incoming = spawn.read_nonblocking(spawn.maxread, timeout) if self.spawn.delayafterread is not None: time.sleep(self.spawn.delayafterread) + idx = self.new_data(incoming) + # Keep reading until exception or return. + if idx is not None: + return idx if timeout is not None: timeout = end_time - time.time() except EOF as e: @@ -148,6 +210,7 @@ def __init__(self, strings): self.eof_index = -1 self.timeout_index = -1 self._strings = [] + self.longest_string = 0 for n, s in enumerate(strings): if s is EOF: self.eof_index = n @@ -156,6 +219,8 @@ def __init__(self, strings): self.timeout_index = n continue self._strings.append((n, s)) + if len(s) > self.longest_string: + self.longest_string = len(s) def __str__(self): '''This returns a human-readable string that represents the state of diff --git a/pipenv/vendor/pexpect/pty_spawn.py b/pipenv/vendor/pexpect/pty_spawn.py index 691c2c63f0..8e28ca7cd7 100644 --- a/pipenv/vendor/pexpect/pty_spawn.py +++ b/pipenv/vendor/pexpect/pty_spawn.py @@ -191,6 +191,7 @@ def __init__(self, command, args=[], timeout=30, maxread=2000, self.STDIN_FILENO = pty.STDIN_FILENO self.STDOUT_FILENO = pty.STDOUT_FILENO self.STDERR_FILENO = pty.STDERR_FILENO + self.str_last_chars = 100 self.cwd = cwd self.env = env self.echo = echo @@ -212,8 +213,8 @@ def __str__(self): s.append(repr(self)) s.append('command: ' + str(self.command)) s.append('args: %r' % (self.args,)) - s.append('buffer (last 100 chars): %r' % self.buffer[-100:]) - s.append('before (last 100 chars): %r' % self.before[-100:] if self.before else '') + s.append('buffer (last %s chars): %r' % (self.str_last_chars,self.buffer[-self.str_last_chars:])) + s.append('before (last %s chars): %r' % (self.str_last_chars,self.before[-self.str_last_chars:] if self.before else '')) s.append('after: %r' % (self.after,)) s.append('match: %r' % (self.match,)) s.append('match_index: ' + str(self.match_index)) @@ -752,10 +753,14 @@ def interact(self, escape_character=chr(29), child process in interact mode is duplicated to the given log. You may pass in optional input and output filter functions. These - functions should take a string and return a string. The output_filter - will be passed all the output from the child process. The input_filter - will be passed all the keyboard input from the user. The input_filter - is run BEFORE the check for the escape_character. + functions should take bytes array and return bytes array too. Even + with ``encoding='utf-8'`` support, meth:`interact` will always pass + input_filter and output_filter bytes. You may need to wrap your + function to decode and encode back to UTF-8. + + The output_filter will be passed all the output from the child process. + The input_filter will be passed all the keyboard input from the user. + The input_filter is run BEFORE the check for the escape_character. Note that if you change the window size of the parent the SIGWINCH signal will not be passed through to the child. If you want the child diff --git a/pipenv/vendor/pexpect/run.py b/pipenv/vendor/pexpect/run.py index d9dfe76ba5..ff288a1246 100644 --- a/pipenv/vendor/pexpect/run.py +++ b/pipenv/vendor/pexpect/run.py @@ -67,7 +67,7 @@ def print_ticks(d): contains patterns and responses. Whenever one of the patterns is seen in the command output, run() will send the associated response string. So, run() in the above example can be also written as: - + run("mencoder dvd://1 -o video.avi -oac copy -ovc copy", events=[(TIMEOUT,print_ticks)], timeout=5) diff --git a/pipenv/vendor/pexpect/screen.py b/pipenv/vendor/pexpect/screen.py index 5ab45b9467..79f95c4e54 100644 --- a/pipenv/vendor/pexpect/screen.py +++ b/pipenv/vendor/pexpect/screen.py @@ -90,7 +90,7 @@ def __init__(self, r=24, c=80, encoding='latin-1', encoding_errors='replace'): self.encoding = encoding self.encoding_errors = encoding_errors if encoding is not None: - self.decoder = codecs.getincrementaldecoder(encoding)(encoding_errors) + self.decoder = codecs.getincrementaldecoder(encoding)(encoding_errors) else: self.decoder = None self.cur_r = 1 diff --git a/pipenv/vendor/pexpect/spawnbase.py b/pipenv/vendor/pexpect/spawnbase.py index 63c0b4204e..59e905764c 100644 --- a/pipenv/vendor/pexpect/spawnbase.py +++ b/pipenv/vendor/pexpect/spawnbase.py @@ -120,6 +120,9 @@ def write_to_stdout(b): self.async_pw_transport = None # This is the read buffer. See maxread. self._buffer = self.buffer_type() + # The buffer may be trimmed for efficiency reasons. This is the + # untrimmed buffer, used to create the before attribute. + self._before = self.buffer_type() def _log(self, s, direction): if self.logfile is not None: diff --git a/pipenv/vendor/pip_shims/backports.py b/pipenv/vendor/pip_shims/backports.py deleted file mode 100644 index 9206cbe061..0000000000 --- a/pipenv/vendor/pip_shims/backports.py +++ /dev/null @@ -1,1183 +0,0 @@ -# -*- coding=utf-8 -*- -from __future__ import absolute_import - -import atexit -import contextlib -import functools -import inspect -import os -import sys -import types - -import six -from packaging import specifiers -from vistir.compat import TemporaryDirectory - -from .environment import MYPY_RUNNING -from .utils import ( - call_function_with_correct_args, - get_method_args, - nullcontext, - suppress_setattr, -) - -if six.PY3: - from contextlib import ExitStack -else: - from contextlib2 import ExitStack - - -if MYPY_RUNNING: - from optparse import Values - from requests import Session - from typing import ( - Any, - Callable, - Dict, - Generator, - Generic, - Iterator, - List, - Optional, - Tuple, - Type, - TypeVar, - Union, - ) - from .utils import TShimmedPath, TShim, TShimmedFunc - - TFinder = TypeVar("TFinder") - TResolver = TypeVar("TResolver") - TReqTracker = TypeVar("TReqTracker") - TLink = TypeVar("TLink") - TSession = TypeVar("TSession", bound=Session) - TCommand = TypeVar("TCommand", covariant=True) - TCommandInstance = TypeVar("TCommandInstance") - TCmdDict = Dict[str, Union[Tuple[str, str, str], TCommandInstance]] - TInstallRequirement = TypeVar("TInstallRequirement") - TShimmedCmdDict = Union[TShim, TCmdDict] - TWheelCache = TypeVar("TWheelCache") - TPreparer = TypeVar("TPreparer") - - -class SearchScope(object): - def __init__(self, find_links=None, index_urls=None): - self.index_urls = index_urls if index_urls else [] - self.find_links = find_links - - @classmethod - def create(cls, find_links=None, index_urls=None): - if not index_urls: - index_urls = ["https://pypi.org/simple"] - return cls(find_links=find_links, index_urls=index_urls) - - -class SelectionPreferences(object): - def __init__( - self, - allow_yanked=True, - allow_all_prereleases=False, - format_control=None, - prefer_binary=False, - ignore_requires_python=False, - ): - self.allow_yanked = allow_yanked - self.allow_all_prereleases = allow_all_prereleases - self.format_control = format_control - self.prefer_binary = prefer_binary - self.ignore_requires_python = ignore_requires_python - - -class TargetPython(object): - fallback_get_tags = None # type: Optional[TShimmedFunc] - - def __init__( - self, - platform=None, # type: Optional[str] - py_version_info=None, # type: Optional[Tuple[int, ...]] - abi=None, # type: Optional[str] - implementation=None, # type: Optional[str] - ): - # type: (...) -> None - self._given_py_version_info = py_version_info - if py_version_info is None: - py_version_info = sys.version_info[:3] - elif len(py_version_info) < 3: - py_version_info += (3 - len(py_version_info)) * (0,) - else: - py_version_info = py_version_info[:3] - py_version = ".".join(map(str, py_version_info[:2])) - self.abi = abi - self.implementation = implementation - self.platform = platform - self.py_version = py_version - self.py_version_info = py_version_info - self._valid_tags = None - - def get_tags(self): - if self._valid_tags is None and self.fallback_get_tags: - fallback_func = resolve_possible_shim(self.fallback_get_tags) - versions = None - if self._given_py_version_info: - versions = ["".join(map(str, self._given_py_version_info[:2]))] - self._valid_tags = fallback_func( - versions=versions, - platform=self.platform, - abi=self.abi, - impl=self.implementation, - ) - return self._valid_tags - - -class CandidatePreferences(object): - def __init__(self, prefer_binary=False, allow_all_prereleases=False): - self.prefer_binary = prefer_binary - self.allow_all_prereleases = allow_all_prereleases - - -class LinkCollector(object): - def __init__(self, session=None, search_scope=None): - self.session = session - self.search_scope = search_scope - - -class CandidateEvaluator(object): - @classmethod - def create( - cls, - project_name, # type: str - target_python=None, # type: Optional[TargetPython] - prefer_binary=False, # type: bool - allow_all_prereleases=False, # type: bool - specifier=None, # type: Optional[specifiers.BaseSpecifier] - hashes=None, # type: Optional[Any] - ): - if target_python is None: - target_python = TargetPython() - if specifier is None: - specifier = specifiers.SpecifierSet() - - supported_tags = target_python.get_tags() - - return cls( - project_name=project_name, - supported_tags=supported_tags, - specifier=specifier, - prefer_binary=prefer_binary, - allow_all_prereleases=allow_all_prereleases, - hashes=hashes, - ) - - def __init__( - self, - project_name, # type: str - supported_tags, # type: List[Any] - specifier, # type: specifiers.BaseSpecifier - prefer_binary=False, # type: bool - allow_all_prereleases=False, # type: bool - hashes=None, # type: Optional[Any] - ): - self._allow_all_prereleases = allow_all_prereleases - self._hashes = hashes - self._prefer_binary = prefer_binary - self._project_name = project_name - self._specifier = specifier - self._supported_tags = supported_tags - - -class LinkEvaluator(object): - def __init__( - self, - allow_yanked, - project_name, - canonical_name, - formats, - target_python, - ignore_requires_python=False, - ignore_compatibility=True, - ): - self._allow_yanked = allow_yanked - self._canonical_name = canonical_name - self._ignore_requires_python = ignore_requires_python - self._formats = formats - self._target_python = target_python - self._ignore_compatibility = ignore_compatibility - - self.project_name = project_name - - -def resolve_possible_shim(target): - # type: (TShimmedFunc) -> Optional[Union[Type, Callable]] - if target is None: - return target - if getattr(target, "shim", None) and isinstance( - target.shim, (types.MethodType, types.FunctionType) - ): - return target.shim() - return target - - -@contextlib.contextmanager -def temp_environ(): - """Allow the ability to set os.environ temporarily""" - environ = dict(os.environ) - try: - yield - finally: - os.environ.clear() - os.environ.update(environ) - - -@contextlib.contextmanager -def get_requirement_tracker(req_tracker_creator=None): - # type: (Optional[Callable]) -> Generator[Optional[TReqTracker], None, None] - root = os.environ.get("PIP_REQ_TRACKER") - if not req_tracker_creator: - yield None - else: - req_tracker_args = [] - _, required_args = get_method_args(req_tracker_creator.__init__) # type: ignore - with ExitStack() as ctx: - if root is None: - root = ctx.enter_context(TemporaryDirectory(prefix="req-tracker")).name - if root: - root = str(root) - ctx.enter_context(temp_environ()) - os.environ["PIP_REQ_TRACKER"] = root - if required_args is not None and "root" in required_args: - req_tracker_args.append(root) - with req_tracker_creator(*req_tracker_args) as tracker: - yield tracker - - -@contextlib.contextmanager -def ensure_resolution_dirs(**kwargs): - # type: (Any) -> Iterator[Dict[str, Any]] - """ - Ensures that the proper directories are scaffolded and present in the provided kwargs - for performing dependency resolution via pip. - - :return: A new kwargs dictionary with scaffolded directories for **build_dir**, **src_dir**, - **download_dir**, and **wheel_download_dir** added to the key value pairs. - :rtype: Dict[str, Any] - """ - keys = ("build_dir", "src_dir", "download_dir", "wheel_download_dir") - if not any(kwargs.get(key) is None for key in keys): - yield kwargs - else: - with TemporaryDirectory(prefix="pip-shims-") as base_dir: - for key in keys: - if kwargs.get(key) is not None: - continue - target = os.path.join(base_dir.name, key) - os.makedirs(target) - kwargs[key] = target - yield kwargs - - -def partial_command(shimmed_path, cmd_mapping=None): - # type: (Type, Optional[TShimmedCmdDict]) -> Union[Type[TCommandInstance], functools.partial] - """ - Maps a default set of arguments across all members of a - :class:`~pip_shims.models.ShimmedPath` instance, specifically for - :class:`~pip._internal.command.Command` instances which need - `summary` and `name` arguments. - - :param :class:`~pip_shims.models.ShimmedPath` shimmed_path: A - :class:`~pip_shims.models.ShimmedCollection` instance - :param Any cmd_mapping: A reference to use for mapping against, e.g. an - import that depends on pip also - :return: A dictionary mapping new arguments to their default values - :rtype: Dict[str, str] - """ - basecls = shimmed_path.shim() - resolved_cmd_mapping = None # type: Optional[Dict[str, Any]] - cmd_mapping = resolve_possible_shim(cmd_mapping) - if cmd_mapping is not None and isinstance(cmd_mapping, dict): - resolved_cmd_mapping = cmd_mapping.copy() - base_args = [] # type: List[str] - for root_cls in basecls.mro(): - if root_cls.__name__ == "Command": - _, root_init_args = get_method_args(root_cls.__init__) - if root_init_args is not None: - base_args = root_init_args.args - needs_name_and_summary = any(arg in base_args for arg in ("name", "summary")) - if not needs_name_and_summary: - basecls.name = shimmed_path.name - return basecls - elif ( - not resolved_cmd_mapping - and needs_name_and_summary - and getattr(functools, "partialmethod", None) - ): - new_init = functools.partial( - basecls.__init__, name=shimmed_path.name, summary="Summary" - ) - basecls.__init__ = new_init - result = basecls - assert resolved_cmd_mapping is not None - for command_name, command_info in resolved_cmd_mapping.items(): - if getattr(command_info, "class_name", None) == shimmed_path.name: - summary = getattr(command_info, "summary", "Command summary") - result = functools.partial(basecls, command_name, summary) - break - return result - - -def get_session( - install_cmd_provider=None, # type: Optional[TShimmedFunc] - install_cmd=None, # type: TCommandInstance - options=None, # type: Optional[Values] -): - # type: (...) -> TSession - session = None # type: Optional[TSession] - if install_cmd is None: - assert install_cmd_provider is not None - install_cmd_provider = resolve_possible_shim(install_cmd_provider) - assert isinstance(install_cmd_provider, (type, functools.partial)) - install_cmd = install_cmd_provider() - if options is None: - options = install_cmd.parser.parse_args([]) # type: ignore - session = install_cmd._build_session(options) # type: ignore - assert session is not None - atexit.register(session.close) - return session - - -def populate_options( - install_command=None, # type: TCommandInstance - options=None, # type: Optional[Values] - **kwargs # type: Any -): - # (...) -> Tuple[Dict[str, Any], Values] - results = {} - if install_command is None and options is None: - raise TypeError("Must pass either options or InstallCommand to populate options") - if options is None and install_command is not None: - options, _ = install_command.parser.parse_args([]) # type: ignore - options_dict = options.__dict__ - for provided_key, provided_value in kwargs.items(): - if provided_key == "isolated": - options_key = "isolated_mode" - elif provided_key == "source_dir": - options_key = "src_dir" - else: - options_key = provided_key - if provided_key in options_dict and provided_value is not None: - setattr(options, options_key, provided_value) - results[provided_key] = provided_value - elif getattr(options, options_key, None) is not None: - results[provided_key] = getattr(options, options_key) - else: - results[provided_key] = provided_value - return results, options - - -def get_requirement_set( - install_command=None, # type: Optional[TCommandInstance] - req_set_provider=None, # type: Optional[TShimmedFunc] - build_dir=None, # type: Optional[str] - src_dir=None, # type: Optional[str] - download_dir=None, # type: Optional[str] - wheel_download_dir=None, # type: Optional[str] - session=None, # type: Optional[TSession] - wheel_cache=None, # type: Optional[TWheelCache] - upgrade=False, # type: bool - upgrade_strategy=None, # type: Optional[str] - ignore_installed=False, # type: bool - ignore_dependencies=False, # type: bool - force_reinstall=False, # type: bool - use_user_site=False, # type: bool - isolated=False, # type: bool - ignore_requires_python=False, # type: bool - require_hashes=None, # type: bool - cache_dir=None, # type: Optional[str] - options=None, # type: Optional[Values] - install_cmd_provider=None, # type: Optional[TShimmedFunc] -): - # (...) -> TRequirementSet - """ - Creates a requirement set from the supplied parameters. - - Not all parameters are passed through for all pip versions, but any - invalid parameters will be ignored if they are not needed to generate a - requirement set on the current pip version. - - :param install_command: A :class:`~pip._internal.commands.install.InstallCommand` - instance which is used to generate the finder. - :param :class:`~pip_shims.models.ShimmedPathCollection` req_set_provider: A provider - to build requirement set instances. - :param str build_dir: The directory to build requirements in. Removed in pip 10, - defeaults to None - :param str source_dir: The directory to use for source requirements. Removed in - pip 10, defaults to None - :param str download_dir: The directory to download requirement artifacts to. Removed - in pip 10, defaults to None - :param str wheel_download_dir: The directory to download wheels to. Removed in pip - 10, defaults ot None - :param :class:`~requests.Session` session: The pip session to use. Removed in pip 10, - defaults to None - :param WheelCache wheel_cache: The pip WheelCache instance to use for caching wheels. - Removed in pip 10, defaults to None - :param bool upgrade: Whether to try to upgrade existing requirements. Removed in pip - 10, defaults to False. - :param str upgrade_strategy: The upgrade strategy to use, e.g. "only-if-needed". - Removed in pip 10, defaults to None. - :param bool ignore_installed: Whether to ignore installed packages when resolving. - Removed in pip 10, defaults to False. - :param bool ignore_dependencies: Whether to ignore dependencies of requirements - when resolving. Removed in pip 10, defaults to False. - :param bool force_reinstall: Whether to force reinstall of packages when resolving. - Removed in pip 10, defaults to False. - :param bool use_user_site: Whether to use user site packages when resolving. Removed - in pip 10, defaults to False. - :param bool isolated: Whether to resolve in isolation. Removed in pip 10, defaults - to False. - :param bool ignore_requires_python: Removed in pip 10, defaults to False. - :param bool require_hashes: Whether to require hashes when resolving. Defaults to - False. - :param Values options: An :class:`~optparse.Values` instance from an install cmd - :param install_cmd_provider: A shim for providing new install command instances. - :type install_cmd_provider: :class:`~pip_shims.models.ShimmedPathCollection` - :return: A new requirement set instance - :rtype: :class:`~pip._internal.req.req_set.RequirementSet` - """ - req_set_provider = resolve_possible_shim(req_set_provider) - if install_command is None: - install_cmd_provider = resolve_possible_shim(install_cmd_provider) - assert isinstance(install_cmd_provider, (type, functools.partial)) - install_command = install_cmd_provider() - required_args = inspect.getargs( - req_set_provider.__init__.__code__ - ).args # type: ignore - results, options = populate_options( - install_command, - options, - build_dir=build_dir, - src_dir=src_dir, - download_dir=download_dir, - upgrade=upgrade, - upgrade_strategy=upgrade_strategy, - ignore_installed=ignore_installed, - ignore_dependencies=ignore_dependencies, - force_reinstall=force_reinstall, - use_user_site=use_user_site, - isolated=isolated, - ignore_requires_python=ignore_requires_python, - require_hashes=require_hashes, - cache_dir=cache_dir, - ) - if session is None and "session" in required_args: - session = get_session(install_cmd=install_command, options=options) - results["wheel_cache"] = wheel_cache - results["session"] = session - results["wheel_download_dir"] = wheel_download_dir - return call_function_with_correct_args(req_set_provider, **results) - - -def get_package_finder( - install_cmd=None, # type: Optional[TCommand] - options=None, # type: Optional[Values] - session=None, # type: Optional[TSession] - platform=None, # type: Optional[str] - python_versions=None, # type: Optional[Tuple[str, ...]] - abi=None, # type: Optional[str] - implementation=None, # type: Optional[str] - target_python=None, # type: Optional[Any] - ignore_requires_python=None, # type: Optional[bool] - target_python_builder=None, # type: Optional[TShimmedFunc] - install_cmd_provider=None, # type: Optional[TShimmedFunc] -): - # type: (...) -> TFinder - """Shim for compatibility to generate package finders. - - Build and return a :class:`~pip._internal.index.package_finder.PackageFinder` - instance using the :class:`~pip._internal.commands.install.InstallCommand` helper - method to construct the finder, shimmed with backports as needed for compatibility. - - :param install_cmd_provider: A shim for providing new install command instances. - :type install_cmd_provider: :class:`~pip_shims.models.ShimmedPathCollection` - :param install_cmd: A :class:`~pip._internal.commands.install.InstallCommand` - instance which is used to generate the finder. - :param optparse.Values options: An optional :class:`optparse.Values` instance - generated by calling `install_cmd.parser.parse_args()` typically. - :param session: An optional session instance, can be created by the `install_cmd`. - :param Optional[str] platform: An optional platform string, e.g. linux_x86_64 - :param Optional[Tuple[str, ...]] python_versions: A tuple of 2-digit strings - representing python versions, e.g. ("27", "35", "36", "37"...) - :param Optional[str] abi: The target abi to support, e.g. "cp38" - :param Optional[str] implementation: An optional implementation string for limiting - searches to a specific implementation, e.g. "cp" or "py" - :param target_python: A :class:`~pip._internal.models.target_python.TargetPython` - instance (will be translated to alternate arguments if necessary on incompatible - pip versions). - :param Optional[bool] ignore_requires_python: Whether to ignore `requires_python` - on resulting candidates, only valid after pip version 19.3.1 - :param target_python_builder: A 'TargetPython' builder (e.g. the class itself, - uninstantiated) - :return: A :class:`pip._internal.index.package_finder.PackageFinder` instance - :rtype: :class:`pip._internal.index.package_finder.PackageFinder` - - :Example: - - >>> from pip_shims.shims import InstallCommand, get_package_finder - >>> install_cmd = InstallCommand() - >>> finder = get_package_finder( - ... install_cmd, python_versions=("27", "35", "36", "37", "38"), implementation=" - cp" - ... ) - >>> candidates = finder.find_all_candidates("requests") - >>> requests_222 = next(iter(c for c in candidates if c.version.public == "2.22.0")) - >>> requests_222 - <InstallationCandidate('requests', <Version('2.22.0')>, <Link https://files.pythonhos - ted.org/packages/51/bd/23c926cd341ea6b7dd0b2a00aba99ae0f828be89d72b2190f27c11d4b7fb/r - equests-2.22.0-py2.py3-none-any.whl#sha256=9cf5292fcd0f598c671cfc1e0d7d1a7f13bb8085e9 - a590f48c010551dc6c4b31 (from https://pypi.org/simple/requests/) (requires-python:>=2. - 7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*)>)> - """ - if install_cmd is None: - install_cmd_provider = resolve_possible_shim(install_cmd_provider) - assert isinstance(install_cmd_provider, (type, functools.partial)) - install_cmd = install_cmd_provider() - if options is None: - options, _ = install_cmd.parser.parse_args([]) # type: ignore - if session is None: - session = get_session(install_cmd=install_cmd, options=options) # type: ignore - builder_args = inspect.getargs( - install_cmd._build_package_finder.__code__ - ) # type: ignore - build_kwargs = {"options": options, "session": session} - expects_targetpython = "target_python" in builder_args.args - received_python = any(arg for arg in [platform, python_versions, abi, implementation]) - if expects_targetpython and received_python and not target_python: - if target_python_builder is None: - target_python_builder = TargetPython - py_version_info = None - if python_versions: - py_version_info_python = max(python_versions) - py_version_info = tuple([int(part) for part in py_version_info_python]) - target_python = target_python_builder( - platform=platform, - abi=abi, - implementation=implementation, - py_version_info=py_version_info, - ) - build_kwargs["target_python"] = target_python - elif any( - arg in builder_args.args - for arg in ["platform", "python_versions", "abi", "implementation"] - ): - if target_python and not received_python: - tags = target_python.get_tags() - version_impl = set([t[0] for t in tags]) - # impls = set([v[:2] for v in version_impl]) - # impls.remove("py") - # impl = next(iter(impls), "py") if not target_python - versions = set([v[2:] for v in version_impl]) - build_kwargs.update( - { - "platform": target_python.platform, - "python_versions": versions, - "abi": target_python.abi, - "implementation": target_python.implementation, - } - ) - if ( - ignore_requires_python is not None - and "ignore_requires_python" in builder_args.args - ): - build_kwargs["ignore_requires_python"] = ignore_requires_python - return install_cmd._build_package_finder(**build_kwargs) # type: ignore - - -def shim_unpack( - unpack_fn, # type: TShimmedFunc - download_dir, # type str - ireq=None, # type: Optional[Any] - link=None, # type: Optional[Any] - location=None, # type Optional[str], - hashes=None, # type: Optional[Any] - progress_bar="off", # type: str - only_download=None, # type: Optional[bool] - session=None, # type: Optional[Any] -): - # (...) -> None - """ - Accepts all parameters that have been valid to pass - to :func:`pip._internal.download.unpack_url` and selects or - drops parameters as needed before invoking the provided - callable. - - :param unpack_fn: A callable or shim referring to the pip implementation - :type unpack_fn: Callable - :param str download_dir: The directory to download the file to - :param Optional[:class:`~pip._internal.req.req_install.InstallRequirement`] ireq: - an Install Requirement instance, defaults to None - :param Optional[:class:`~pip._internal.models.link.Link`] link: A Link instance, - defaults to None. - :param Optional[str] location: A location or source directory if the target is - a VCS url, defaults to None. - :param Optional[Any] hashes: A Hashes instance, defaults to None - :param str progress_bar: Indicates progress par usage during download, defatuls to - off. - :param Optional[bool] only_download: Whether to skip install, defaults to None. - :param Optional[`~requests.Session`] session: A PipSession instance, defaults to - None. - :return: The result of unpacking the url. - :rtype: None - """ - unpack_fn = resolve_possible_shim(unpack_fn) - required_args = inspect.getargs(unpack_fn.__code__).args # type: ignore - unpack_kwargs = {"download_dir": download_dir} - if ireq: - if not link and ireq.link: - link = ireq.link - if only_download is None: - only_download = ireq.is_wheel - if hashes is None: - hashes = ireq.hashes(True) - if location is None and getattr(ireq, "source_dir", None): - location = ireq.source_dir - unpack_kwargs.update({"link": link, "location": location}) - if hashes is not None and "hashes" in required_args: - unpack_kwargs["hashes"] = hashes - if "progress_bar" in required_args: - unpack_kwargs["progress_bar"] = progress_bar - if only_download is not None and "only_download" in required_args: - unpack_kwargs["only_download"] = only_download - if session is not None and "session" in required_args: - unpack_kwargs["session"] = session - return unpack_fn(**unpack_kwargs) # type: ignore - - -@contextlib.contextmanager -def make_preparer( - preparer_fn, # type: TShimmedFunc - req_tracker_fn=None, # type: Optional[TShimmedFunc] - build_dir=None, # type: Optional[str] - src_dir=None, # type: Optional[str] - download_dir=None, # type: Optional[str] - wheel_download_dir=None, # type: Optional[str] - progress_bar="off", # type: str - build_isolation=False, # type: bool - session=None, # type: Optional[TSession] - finder=None, # type: Optional[TFinder] - options=None, # type: Optional[Values] - require_hashes=None, # type: Optional[bool] - use_user_site=None, # type: Optional[bool] - req_tracker=None, # type: Optional[Union[TReqTracker, TShimmedFunc]] - install_cmd_provider=None, # type: Optional[TShimmedFunc] - install_cmd=None, # type: Optional[TCommandInstance] -): - # (...) -> ContextManager - """ - Creates a requirement preparer for preparing pip requirements. - - Provides a compatibilty shim that accepts all previously valid arguments and - discards any that are no longer used. - - :raises TypeError: No requirement tracker provided and one cannot be generated - :raises TypeError: No valid sessions provided and one cannot be generated - :raises TypeError: No valid finders provided and one cannot be generated - :param TShimmedFunc preparer_fn: Callable or shim for generating preparers. - :param Optional[TShimmedFunc] req_tracker_fn: Callable or shim for generating - requirement trackers, defualts to None - :param Optional[str] build_dir: Directory for building packages and wheels, - defaults to None - :param Optional[str] src_dir: Directory to find or extract source files, defaults - to None - :param Optional[str] download_dir: Target directory to download files, defaults to - None - :param Optional[str] wheel_download_dir: Target directoryto download wheels, defaults - to None - :param str progress_bar: Whether to display a progress bar, defaults to off - :param bool build_isolation: Whether to build requirements in isolation, defaults - to False - :param Optional[TSession] session: Existing session to use for getting requirements, - defaults to None - :param Optional[TFinder] finder: The package finder to use during resolution, - defaults to None - :param Optional[Values] options: Pip options to use if needed, defaults to None - :param Optional[bool] require_hashes: Whether to require hashes for preparation - :param Optional[bool] use_user_site: Whether to use the user site directory for - preparing requirements - :param Optional[Union[TReqTracker, TShimmedFunc]] req_tracker: The requirement - tracker to use for building packages, defaults to None - :param Optional[TCommandInstance] install_cmd: The install command used to create - the finder, session, and options if needed, defaults to None - :yield: A new requirement preparer instance - :rtype: ContextManager[:class:`~pip._internal.operations.prepare.RequirementPreparer`] - - :Example: - - >>> from pip_shims.shims import ( - ... InstallCommand, get_package_finder, make_preparer, get_requirement_tracker - ... ) - >>> install_cmd = InstallCommand() - >>> pip_options, _ = install_cmd.parser.parse_args([]) - >>> session = install_cmd._build_session(pip_options) - >>> finder = get_package_finder( - ... install_cmd, session=session, options=pip_options - ... ) - >>> with make_preparer( - ... options=pip_options, finder=finder, session=session, install_cmd=ic - ... ) as preparer: - ... print(preparer) - <pip._internal.operations.prepare.RequirementPreparer object at 0x7f8a2734be80> - """ - preparer_fn = resolve_possible_shim(preparer_fn) - required_args = inspect.getargs(preparer_fn.__init__.__code__).args # type: ignore - if not req_tracker and not req_tracker_fn and "req_tracker" in required_args: - raise TypeError("No requirement tracker and no req tracker generator found!") - req_tracker_fn = resolve_possible_shim(req_tracker_fn) - pip_options_created = options is None - session_is_required = "session" in required_args - finder_is_required = "finder" in required_args - options_map = { - "src_dir": src_dir, - "download_dir": download_dir, - "wheel_download_dir": wheel_download_dir, - "build_dir": build_dir, - "progress_bar": progress_bar, - "build_isolation": build_isolation, - "require_hashes": require_hashes, - "use_user_site": use_user_site, - } - if install_cmd is None: - assert install_cmd_provider is not None - install_cmd_provider = resolve_possible_shim(install_cmd_provider) - assert isinstance(install_cmd_provider, (type, functools.partial)) - install_cmd = install_cmd_provider() - preparer_args, options = populate_options(install_cmd, options, **options_map) - if options is not None and pip_options_created: - for k, v in options_map.items(): - suppress_setattr(options, k, v, filter_none=True) - if all([session is None, install_cmd is None, session_is_required]): - raise TypeError( - "Preparer requires a session instance which was not supplied and cannot be " - "created without an InstallCommand." - ) - elif all([session is None, session_is_required]): - session = get_session(install_cmd=install_cmd, options=options) - if all([finder is None, install_cmd is None, finder_is_required]): - raise TypeError( - "RequirementPreparer requires a packagefinder but no InstallCommand" - " was provided to build one and none was passed in." - ) - elif all([finder is None, finder_is_required]): - finder = get_package_finder(install_cmd, options=options, session=session) - preparer_args.update({"finder": finder, "session": session}) - req_tracker_fn = nullcontext if not req_tracker_fn else req_tracker_fn - with req_tracker_fn() as tracker_ctx: - if "req_tracker" in required_args: - req_tracker = tracker_ctx if req_tracker is None else req_tracker - preparer_args["req_tracker"] = req_tracker - - result = call_function_with_correct_args(preparer_fn, **preparer_args) - yield result - - -def get_resolver( - resolver_fn, # type: TShimmedFunc - install_req_provider=None, # type: Optional[TShimmedFunc] - format_control_provider=None, # type: Optional[TShimmedFunc] - wheel_cache_provider=None, # type: Optional[TShimmedFunc] - finder=None, # type: Optional[TFinder] - upgrade_strategy="to-satisfy-only", # type: str - force_reinstall=None, # type: Optional[bool] - ignore_dependencies=None, # type: Optional[bool] - ignore_requires_python=None, # type: Optional[bool] - ignore_installed=True, # type: bool - use_user_site=False, # type: bool - isolated=None, # type: Optional[bool] - wheel_cache=None, # type: Optional[TWheelCache] - preparer=None, # type: Optional[TPreparer] - session=None, # type: Optional[TSession] - options=None, # type: Optional[Values] - make_install_req=None, # type: Optional[Callable] - install_cmd_provider=None, # type: Optional[TShimmedFunc] - install_cmd=None, # type: Optional[TCommandInstance] -): - # (...) -> TResolver - """ - A resolver creation compatibility shim for generating a resolver. - - Consumes any argument that was previously used to instantiate a - resolver, discards anything that is no longer valid. - - .. note:: This is only valid for **pip >= 10.0.0** - - :raises ValueError: A session is required but not provided and one cannot be created - :raises ValueError: A finder is required but not provided and one cannot be created - :raises ValueError: An install requirement provider is required and has not been - provided - :param TShimmedFunc resolver_fn: The resolver function used to create new resolver - instances. - :param TShimmedFunc install_req_provider: The provider function to use to generate - install requirements if needed. - :param TShimmedFunc format_control_provider: The provider function to use to generate - a format_control instance if needed. - :param TShimmedFunc wheel_cache_provider: The provider function to use to generate - a wheel cache if needed. - :param Optional[TFinder] finder: The package finder to use during resolution, - defaults to None. - :param str upgrade_strategy: Upgrade strategy to use, defaults to ``only-if-needed``. - :param Optional[bool] force_reinstall: Whether to simulate or assume package - reinstallation during resolution, defaults to None - :param Optional[bool] ignore_dependencies: Whether to ignore package dependencies, - defaults to None - :param Optional[bool] ignore_requires_python: Whether to ignore indicated - required_python versions on packages, defaults to None - :param bool ignore_installed: Whether to ignore installed packages during resolution, - defaults to True - :param bool use_user_site: Whether to use the user site location during resolution, - defaults to False - :param Optional[bool] isolated: Whether to isolate the resolution process, defaults - to None - :param Optional[TWheelCache] wheel_cache: The wheel cache to use, defaults to None - :param Optional[TPreparer] preparer: The requirement preparer to use, defaults to - None - :param Optional[TSession] session: Existing session to use for getting requirements, - defaults to None - :param Optional[Values] options: Pip options to use if needed, defaults to None - :param Optional[functools.partial] make_install_req: The partial function to pass in - to the resolver for actually generating install requirements, if necessary - :param Optional[TCommandInstance] install_cmd: The install command used to create - the finder, session, and options if needed, defaults to None. - :return: A new resolver instance. - :rtype: :class:`~pip._internal.legacy_resolve.Resolver` - - :Example: - - >>> import os - >>> from tempdir import TemporaryDirectory - >>> from pip_shims.shims import ( - ... InstallCommand, get_package_finder, make_preparer, get_requirement_tracker, - ... get_resolver, InstallRequirement, RequirementSet - ... ) - >>> install_cmd = InstallCommand() - >>> pip_options, _ = install_cmd.parser.parse_args([]) - >>> session = install_cmd._build_session(pip_options) - >>> finder = get_package_finder( - ... install_cmd, session=session, options=pip_options - ... ) - >>> wheel_cache = WheelCache(USER_CACHE_DIR, FormatControl(None, None)) - >>> with TemporaryDirectory() as temp_base: - ... reqset = RequirementSet() - ... ireq = InstallRequirement.from_line("requests") - ... ireq.is_direct = True - ... build_dir = os.path.join(temp_base, "build") - ... src_dir = os.path.join(temp_base, "src") - ... ireq.build_location(build_dir) - ... with make_preparer( - ... options=pip_options, finder=finder, session=session, - ... build_dir=build_dir, install_cmd=install_cmd, - ... ) as preparer: - ... resolver = get_resolver( - ... finder=finder, ignore_dependencies=False, ignore_requires_python=True, - ... preparer=preparer, session=session, options=pip_options, - ... install_cmd=install_cmd, wheel_cache=wheel_cache, - ... ) - ... resolver.require_hashes = False - ... reqset.add_requirement(ireq) - ... results = resolver.resolve(reqset) - ... #reqset.cleanup_files() - ... for result_req in reqset.requirements: - ... print(result_req) - requests - chardet - certifi - urllib3 - idna - """ - resolver_fn = resolve_possible_shim(resolver_fn) - install_req_provider = resolve_possible_shim(install_req_provider) - format_control_provider = resolve_possible_shim(format_control_provider) - wheel_cache_provider = resolve_possible_shim(wheel_cache_provider) - install_cmd_provider = resolve_possible_shim(install_cmd_provider) - required_args = inspect.getargs(resolver_fn.__init__.__code__).args # type: ignore - install_cmd_dependency_map = {"session": session, "finder": finder} - resolver_kwargs = {} # type: Dict[str, Any] - if install_cmd is None: - assert isinstance(install_cmd_provider, (type, functools.partial)) - install_cmd = install_cmd_provider() - if options is None and install_cmd is not None: - options = install_cmd.parser.parse_args([]) # type: ignore - for arg, val in install_cmd_dependency_map.items(): - if arg not in required_args: - continue - elif val is None and install_cmd is None: - raise TypeError( - "Preparer requires a {0} but did not receive one " - "and cannot generate one".format(arg) - ) - elif arg == "session" and val is None: - val = get_session(install_cmd=install_cmd, options=options) - elif arg == "finder" and val is None: - val = get_package_finder(install_cmd, options=options, session=session) - resolver_kwargs[arg] = val - if "make_install_req" in required_args: - if make_install_req is None and install_req_provider is not None: - make_install_req = functools.partial( - install_req_provider, - isolated=isolated, - wheel_cache=wheel_cache, - # use_pep517=use_pep517, - ) - assert make_install_req is not None - resolver_kwargs["make_install_req"] = make_install_req - if "isolated" in required_args: - resolver_kwargs["isolated"] = isolated - if "wheel_cache" in required_args: - if wheel_cache is None and wheel_cache_provider is not None: - cache_dir = getattr(options, "cache_dir", None) - format_control = getattr( - options, - "format_control", - format_control_provider(None, None), # type: ignore - ) - wheel_cache = wheel_cache_provider(cache_dir, format_control) - resolver_kwargs["wheel_cache"] = wheel_cache - resolver_kwargs.update( - { - "upgrade_strategy": upgrade_strategy, - "force_reinstall": force_reinstall, - "ignore_dependencies": ignore_dependencies, - "ignore_requires_python": ignore_requires_python, - "ignore_installed": ignore_installed, - "use_user_site": use_user_site, - "preparer": preparer, - } - ) - return resolver_fn(**resolver_kwargs) # type: ignore - - -def resolve( - ireq, # type: TInstallRequirement - reqset_provider=None, # type: Optional[TShimmedFunc] - req_tracker_provider=None, # type: Optional[TShimmedFunc] - install_cmd_provider=None, # type: Optional[TShimmedFunc] - install_command=None, # type: Optional[TCommand] - finder_provider=None, # type: Optional[TShimmedFunc] - resolver_provider=None, # type: Optional[TShimmedFunc] - wheel_cache_provider=None, # type: Optional[TShimmedFunc] - format_control_provider=None, # type: Optional[TShimmedFunc] - make_preparer_provider=None, # type: Optional[TShimmedFunc] - options=None, # type: Optional[Values] - session=None, # type: Optional[TSession] - resolver=None, # type: Optional[TResolver] - finder=None, # type: Optional[TFinder] - upgrade_strategy="to-satisfy-only", # type: str - force_reinstall=None, # type: Optional[bool] - ignore_dependencies=None, # type: Optional[bool] - ignore_requires_python=None, # type: Optional[bool] - ignore_installed=True, # type: bool - use_user_site=False, # type: bool - isolated=None, # type: Optional[bool] - build_dir=None, # type: Optional[str] - source_dir=None, # type: Optional[str] - download_dir=None, # type: Optional[str] - cache_dir=None, # type: Optional[str] - wheel_download_dir=None, # type: Optional[str] - wheel_cache=None, # type: Optional[TWheelCache] - require_hashes=None, # type: bool -): - # (...) -> Set[TInstallRequirement] - """ - Resolves the provided **InstallRequirement**, returning a dictionary. - - Maps a dictionary of names to corresponding ``InstallRequirement`` values. - - :param :class:`~pip._internal.req.req_install.InstallRequirement` ireq: An - InstallRequirement to initiate the resolution process - :param :class:`~pip_shims.models.ShimmedPathCollection` reqset_provider: A provider - to build requirement set instances. - :param :class:`~pip_shims.models.ShimmedPathCollection` req_tracker_provider: A - provider to build requirement tracker instances - :param install_cmd_provider: A shim for providing new install command instances. - :type install_cmd_provider: :class:`~pip_shims.models.ShimmedPathCollection` - :param Optional[TCommandInstance] install_command: The install command used to - create the finder, session, and options if needed, defaults to None. - :param :class:`~pip_shims.models.ShimmedPathCollection` finder_provider: A provider - to package finder instances. - :param :class:`~pip_shims.models.ShimmedPathCollection` resolver_provider: A provider - to build resolver instances - :param TShimmedFunc wheel_cache_provider: The provider function to use to generate a - wheel cache if needed. - :param TShimmedFunc format_control_provider: The provider function to use to generate - a format_control instance if needed. - :param TShimmedFunc make_preparer_provider: Callable or shim for generating preparers. - :param Optional[Values] options: Pip options to use if needed, defaults to None - :param Optional[TSession] session: Existing session to use for getting requirements, - defaults to None - :param :class:`~pip._internal.legacy_resolve.Resolver` resolver: A pre-existing - resolver instance to use for resolution - :param Optional[TFinder] finder: The package finder to use during resolution, - defaults to None. - :param str upgrade_strategy: Upgrade strategy to use, defaults to ``only-if-needed``. - :param Optional[bool] force_reinstall: Whether to simulate or assume package - reinstallation during resolution, defaults to None - :param Optional[bool] ignore_dependencies: Whether to ignore package dependencies, - defaults to None - :param Optional[bool] ignore_requires_python: Whether to ignore indicated - required_python versions on packages, defaults to None - :param bool ignore_installed: Whether to ignore installed packages during - resolution, defaults to True - :param bool use_user_site: Whether to use the user site location during resolution, - defaults to False - :param Optional[bool] isolated: Whether to isolate the resolution process, defaults - to None - :param Optional[str] build_dir: Directory for building packages and wheels, defaults - to None - :param str source_dir: The directory to use for source requirements. Removed in pip - 10, defaults to None - :param Optional[str] download_dir: Target directory to download files, defaults to - None - :param str cache_dir: The cache directory to use for caching artifacts during - resolution - :param Optional[str] wheel_download_dir: Target directoryto download wheels, defaults - to None - :param Optional[TWheelCache] wheel_cache: The wheel cache to use, defaults to None - :param bool require_hashes: Whether to require hashes when resolving. Defaults to - False. - :return: A dictionary mapping requirements to corresponding - :class:`~pip._internal.req.req_install.InstallRequirement`s - :rtype: :class:`~pip._internal.req.req_install.InstallRequirement` - - :Example: - - >>> from pip_shims.shims import resolve, InstallRequirement - >>> ireq = InstallRequirement.from_line("requests>=2.20") - >>> results = resolve(ireq) - >>> for k, v in results.items(): - ... print("{0}: {1!r}".format(k, v)) - requests: <InstallRequirement object: requests>=2.20 from https://files.pythonhosted. - org/packages/51/bd/23c926cd341ea6b7dd0b2a00aba99ae0f828be89d72b2190f27c11d4b7fb/reque - sts-2.22.0-py2.py3-none-any.whl#sha256=9cf5292fcd0f598c671cfc1e0d7d1a7f13bb8085e9a590 - f48c010551dc6c4b31 editable=False> - idna: <InstallRequirement object: idna<2.9,>=2.5 from https://files.pythonhosted.org/ - packages/14/2c/cd551d81dbe15200be1cf41cd03869a46fe7226e7450af7a6545bfc474c9/idna-2.8- - py2.py3-none-any.whl#sha256=ea8b7f6188e6fa117537c3df7da9fc686d485087abf6ac197f9c46432 - f7e4a3c (from requests>=2.20) editable=False> - urllib3: <InstallRequirement object: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 from htt - ps://files.pythonhosted.org/packages/b4/40/a9837291310ee1ccc242ceb6ebfd9eb21539649f19 - 3a7c8c86ba15b98539/urllib3-1.25.7-py2.py3-none-any.whl#sha256=a8a318824cc77d1fd4b2bec - 2ded92646630d7fe8619497b142c84a9e6f5a7293 (from requests>=2.20) editable=False> - chardet: <InstallRequirement object: chardet<3.1.0,>=3.0.2 from https://files.pythonh - osted.org/packages/bc/a9/01ffebfb562e4274b6487b4bb1ddec7ca55ec7510b22e4c51f14098443b8 - /chardet-3.0.4-py2.py3-none-any.whl#sha256=fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed - 4531e3e15460124c106691 (from requests>=2.20) editable=False> - certifi: <InstallRequirement object: certifi>=2017.4.17 from https://files.pythonhost - ed.org/packages/18/b0/8146a4f8dd402f60744fa380bc73ca47303cccf8b9190fd16a827281eac2/ce - rtifi-2019.9.11-py2.py3-none-any.whl#sha256=fd7c7c74727ddcf00e9acd26bba8da604ffec95bf - 1c2144e67aff7a8b50e6cef (from requests>=2.20) editable=False> - """ - reqset_provider = resolve_possible_shim(reqset_provider) - finder_provider = resolve_possible_shim(finder_provider) - resolver_provider = resolve_possible_shim(resolver_provider) - wheel_cache_provider = resolve_possible_shim(wheel_cache_provider) - format_control_provider = resolve_possible_shim(format_control_provider) - make_preparer_provider = resolve_possible_shim(make_preparer_provider) - req_tracker_provider = resolve_possible_shim(req_tracker_provider) - install_cmd_provider = resolve_possible_shim(install_cmd_provider) - if install_command is None: - assert isinstance(install_cmd_provider, (type, functools.partial)) - install_command = install_cmd_provider() - kwarg_map = { - "upgrade_strategy": upgrade_strategy, - "force_reinstall": force_reinstall, - "ignore_dependencies": ignore_dependencies, - "ignore_requires_python": ignore_requires_python, - "ignore_installed": ignore_installed, - "use_user_site": use_user_site, - "isolated": isolated, - "build_dir": build_dir, - "src_dir": source_dir, - "download_dir": download_dir, - "require_hashes": require_hashes, - "cache_dir": cache_dir, - } - kwargs, options = populate_options(install_command, options, **kwarg_map) - with ExitStack() as ctx: - kwargs = ctx.enter_context( - ensure_resolution_dirs(wheel_download_dir=wheel_download_dir, **kwargs) - ) - wheel_download_dir = kwargs.pop("wheel_download_dir") - if session is None: - session = get_session(install_cmd=install_command, options=options) - if finder is None: - finder = finder_provider( - install_command, options=options, session=session - ) # type: ignore - format_control = getattr(options, "format_control", None) - if not format_control: - format_control = format_control_provider(None, None) # type: ignore - wheel_cache = wheel_cache_provider( - kwargs["cache_dir"], format_control - ) # type: ignore - ireq.is_direct = True # type: ignore - ireq.build_location(kwargs["build_dir"]) # type: ignore - if reqset_provider is None: - raise TypeError( - "cannot resolve without a requirement set provider... failed!" - ) - reqset = reqset_provider( - install_command, - options=options, - session=session, - wheel_download_dir=wheel_download_dir, - **kwargs - ) # type: ignore - if getattr(reqset, "prepare_files", None): - reqset.add_requirement(ireq) - results = reqset.prepare_files(finder) - result = reqset.requirements - reqset.cleanup_files() - return result - if make_preparer_provider is None: - raise TypeError("Cannot create requirement preparer, cannot resolve!") - - preparer_args = { - "build_dir": kwargs["build_dir"], - "src_dir": kwargs["src_dir"], - "download_dir": kwargs["download_dir"], - "wheel_download_dir": wheel_download_dir, - "build_isolation": kwargs["isolated"], - "install_cmd": install_command, - "options": options, - "finder": finder, - "session": session, - "use_user_site": use_user_site, - "require_hashes": require_hashes, - } - # with req_tracker_provider() as req_tracker: - if isinstance(req_tracker_provider, (types.FunctionType, functools.partial)): - preparer_args["req_tracker"] = ctx.enter_context(req_tracker_provider()) - resolver_keys = [ - "upgrade_strategy", - "force_reinstall", - "ignore_dependencies", - "ignore_installed", - "use_user_site", - "isolated", - "use_user_site", - ] - resolver_args = {key: kwargs[key] for key in resolver_keys if key in kwargs} - if resolver_provider is None: - raise TypeError("Cannot resolve without a resolver provider... failed!") - preparer = ctx.enter_context(make_preparer_provider(**preparer_args)) - resolver = resolver_provider( - finder=finder, - preparer=preparer, - session=session, - options=options, - install_cmd=install_command, - wheel_cache=wheel_cache, - **resolver_args - ) # type: ignore - reqset.add_requirement(ireq) - resolver.require_hashes = kwargs.get("require_hashes", False) # type: ignore - resolver.resolve(reqset) # type: ignore - results = reqset.requirements - reqset.cleanup_files() - return results diff --git a/pipenv/vendor/pyparsing.py b/pipenv/vendor/pyparsing.py index 9a2dd7bf36..4d2f98e46e 100644 --- a/pipenv/vendor/pyparsing.py +++ b/pipenv/vendor/pyparsing.py @@ -95,8 +95,8 @@ namespace class """ -__version__ = "2.4.5" -__versionTime__ = "09 Nov 2019 23:03 UTC" +__version__ = "2.4.6" +__versionTime__ = "24 Dec 2019 04:27 UTC" __author__ = "Paul McGuire <ptmcg@users.sourceforge.net>" import string @@ -114,6 +114,7 @@ from operator import itemgetter import itertools from functools import wraps +from contextlib import contextmanager try: # Python 3 @@ -184,6 +185,7 @@ class SimpleNamespace: pass __diag__.warn_name_set_on_empty_Forward = False __diag__.warn_on_multiple_string_args_to_oneof = False __diag__.enable_debug_on_named_expressions = False +__diag__._all_names = [nm for nm in vars(__diag__) if nm.startswith("enable_") or nm.startswith("warn_")] def _enable_all_warnings(): __diag__.warn_multiple_tokens_in_named_alternation = True @@ -3630,24 +3632,24 @@ class White(Token): '\n': '<LF>', '\r': '<CR>', '\f': '<FF>', - 'u\00A0': '<NBSP>', - 'u\1680': '<OGHAM_SPACE_MARK>', - 'u\180E': '<MONGOLIAN_VOWEL_SEPARATOR>', - 'u\2000': '<EN_QUAD>', - 'u\2001': '<EM_QUAD>', - 'u\2002': '<EN_SPACE>', - 'u\2003': '<EM_SPACE>', - 'u\2004': '<THREE-PER-EM_SPACE>', - 'u\2005': '<FOUR-PER-EM_SPACE>', - 'u\2006': '<SIX-PER-EM_SPACE>', - 'u\2007': '<FIGURE_SPACE>', - 'u\2008': '<PUNCTUATION_SPACE>', - 'u\2009': '<THIN_SPACE>', - 'u\200A': '<HAIR_SPACE>', - 'u\200B': '<ZERO_WIDTH_SPACE>', - 'u\202F': '<NNBSP>', - 'u\205F': '<MMSP>', - 'u\3000': '<IDEOGRAPHIC_SPACE>', + u'\u00A0': '<NBSP>', + u'\u1680': '<OGHAM_SPACE_MARK>', + u'\u180E': '<MONGOLIAN_VOWEL_SEPARATOR>', + u'\u2000': '<EN_QUAD>', + u'\u2001': '<EM_QUAD>', + u'\u2002': '<EN_SPACE>', + u'\u2003': '<EM_SPACE>', + u'\u2004': '<THREE-PER-EM_SPACE>', + u'\u2005': '<FOUR-PER-EM_SPACE>', + u'\u2006': '<SIX-PER-EM_SPACE>', + u'\u2007': '<FIGURE_SPACE>', + u'\u2008': '<PUNCTUATION_SPACE>', + u'\u2009': '<THIN_SPACE>', + u'\u200A': '<HAIR_SPACE>', + u'\u200B': '<ZERO_WIDTH_SPACE>', + u'\u202F': '<NNBSP>', + u'\u205F': '<MMSP>', + u'\u3000': '<IDEOGRAPHIC_SPACE>', } def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0): super(White, self).__init__() @@ -6064,7 +6066,7 @@ def parseImpl(self, instring, loc, doActions=True): matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr + OneOrMore(lastExpr)) elif arity == 3: matchExpr = (_FB(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) - + Group(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr)) + + Group(lastExpr + OneOrMore(opExpr1 + lastExpr + opExpr2 + lastExpr))) else: raise ValueError("operator must be unary (1), binary (2), or ternary (3)") elif rightLeftAssoc == opAssoc.RIGHT: @@ -6835,6 +6837,187 @@ class Devanagari(unicode_set): setattr(pyparsing_unicode, u"देवनागरी", pyparsing_unicode.Devanagari) +class pyparsing_test: + """ + namespace class for classes useful in writing unit tests + """ + + class reset_pyparsing_context: + """ + Context manager to be used when writing unit tests that modify pyparsing config values: + - packrat parsing + - default whitespace characters. + - default keyword characters + - literal string auto-conversion class + - __diag__ settings + + Example: + with reset_pyparsing_context(): + # test that literals used to construct a grammar are automatically suppressed + ParserElement.inlineLiteralsUsing(Suppress) + + term = Word(alphas) | Word(nums) + group = Group('(' + term[...] + ')') + + # assert that the '()' characters are not included in the parsed tokens + self.assertParseAndCheckLisst(group, "(abc 123 def)", ['abc', '123', 'def']) + + # after exiting context manager, literals are converted to Literal expressions again + """ + + def __init__(self): + self._save_context = {} + + def save(self): + self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS + self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS + self._save_context[ + "literal_string_class" + ] = ParserElement._literalStringClass + self._save_context["packrat_enabled"] = ParserElement._packratEnabled + self._save_context["packrat_parse"] = ParserElement._parse + self._save_context["__diag__"] = { + name: getattr(__diag__, name) for name in __diag__._all_names + } + self._save_context["__compat__"] = { + "collect_all_And_tokens": __compat__.collect_all_And_tokens + } + return self + + def restore(self): + # reset pyparsing global state + if ( + ParserElement.DEFAULT_WHITE_CHARS + != self._save_context["default_whitespace"] + ): + ParserElement.setDefaultWhitespaceChars( + self._save_context["default_whitespace"] + ) + Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"] + ParserElement.inlineLiteralsUsing( + self._save_context["literal_string_class"] + ) + for name, value in self._save_context["__diag__"].items(): + setattr(__diag__, name, value) + ParserElement._packratEnabled = self._save_context["packrat_enabled"] + ParserElement._parse = self._save_context["packrat_parse"] + __compat__.collect_all_And_tokens = self._save_context["__compat__"] + + def __enter__(self): + return self.save() + + def __exit__(self, *args): + return self.restore() + + class TestParseResultsAsserts: + """ + A mixin class to add parse results assertion methods to normal unittest.TestCase classes. + """ + def assertParseResultsEquals( + self, result, expected_list=None, expected_dict=None, msg=None + ): + """ + Unit test assertion to compare a ParseResults object with an optional expected_list, + and compare any defined results names with an optional expected_dict. + """ + if expected_list is not None: + self.assertEqual(expected_list, result.asList(), msg=msg) + if expected_dict is not None: + self.assertEqual(expected_dict, result.asDict(), msg=msg) + + def assertParseAndCheckList( + self, expr, test_string, expected_list, msg=None, verbose=True + ): + """ + Convenience wrapper assert to test a parser element and input string, and assert that + the resulting ParseResults.asList() is equal to the expected_list. + """ + result = expr.parseString(test_string, parseAll=True) + if verbose: + print(result.dump()) + self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg) + + def assertParseAndCheckDict( + self, expr, test_string, expected_dict, msg=None, verbose=True + ): + """ + Convenience wrapper assert to test a parser element and input string, and assert that + the resulting ParseResults.asDict() is equal to the expected_dict. + """ + result = expr.parseString(test_string, parseAll=True) + if verbose: + print(result.dump()) + self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg) + + def assertRunTestResults( + self, run_tests_report, expected_parse_results=None, msg=None + ): + """ + Unit test assertion to evaluate output of ParserElement.runTests(). If a list of + list-dict tuples is given as the expected_parse_results argument, then these are zipped + with the report tuples returned by runTests and evaluated using assertParseResultsEquals. + Finally, asserts that the overall runTests() success value is True. + + :param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests + :param expected_parse_results (optional): [tuple(str, list, dict, Exception)] + """ + run_test_success, run_test_results = run_tests_report + + if expected_parse_results is not None: + merged = [ + (rpt[0], rpt[1], expected) + for rpt, expected in zip(run_test_results, expected_parse_results) + ] + for test_string, result, expected in merged: + # expected should be a tuple containing a list and/or a dict or an exception, + # and optional failure message string + # an empty tuple will skip any result validation + fail_msg = next( + (exp for exp in expected if isinstance(exp, str)), None + ) + expected_exception = next( + ( + exp + for exp in expected + if isinstance(exp, type) and issubclass(exp, Exception) + ), + None, + ) + if expected_exception is not None: + with self.assertRaises( + expected_exception=expected_exception, msg=fail_msg or msg + ): + if isinstance(result, Exception): + raise result + else: + expected_list = next( + (exp for exp in expected if isinstance(exp, list)), None + ) + expected_dict = next( + (exp for exp in expected if isinstance(exp, dict)), None + ) + if (expected_list, expected_dict) != (None, None): + self.assertParseResultsEquals( + result, + expected_list=expected_list, + expected_dict=expected_dict, + msg=fail_msg or msg, + ) + else: + # warning here maybe? + print("no validation for {!r}".format(test_string)) + + # do this last, in case some specific test results can be reported instead + self.assertTrue( + run_test_success, msg=msg if msg is not None else "failed runTests" + ) + + @contextmanager + def assertRaisesParseException(self, exc_type=ParseException, msg=None): + with self.assertRaises(exc_type, msg=msg): + yield + + if __name__ == "__main__": selectToken = CaselessLiteral("select") diff --git a/pipenv/vendor/requests/LICENSE b/pipenv/vendor/requests/LICENSE index 841c6023b9..13d91ddc7a 100644 --- a/pipenv/vendor/requests/LICENSE +++ b/pipenv/vendor/requests/LICENSE @@ -1,4 +1,4 @@ -Copyright 2018 Kenneth Reitz +Copyright 2019 Kenneth Reitz Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pipenv/vendor/requests/__init__.py b/pipenv/vendor/requests/__init__.py index 9a899df67f..626247cbba 100644 --- a/pipenv/vendor/requests/__init__.py +++ b/pipenv/vendor/requests/__init__.py @@ -9,14 +9,14 @@ Requests HTTP Library ~~~~~~~~~~~~~~~~~~~~~ -Requests is an HTTP library, written in Python, for human beings. Basic GET -usage: +Requests is an HTTP library, written in Python, for human beings. +Basic GET usage: >>> import requests >>> r = requests.get('https://www.python.org') >>> r.status_code 200 - >>> 'Python is a programming language' in r.content + >>> b'Python is a programming language' in r.content True ... or POST: @@ -27,14 +27,14 @@ { ... "form": { - "key2": "value2", - "key1": "value1" + "key1": "value1", + "key2": "value2" }, ... } The other HTTP methods are supported - see `requests.api`. Full documentation -is at <http://python-requests.org>. +is at <https://requests.readthedocs.io>. :copyright: (c) 2017 by Kenneth Reitz. :license: Apache 2.0, see LICENSE for more details. diff --git a/pipenv/vendor/requests/__version__.py b/pipenv/vendor/requests/__version__.py index 9844f740ab..b9e7df4881 100644 --- a/pipenv/vendor/requests/__version__.py +++ b/pipenv/vendor/requests/__version__.py @@ -4,11 +4,11 @@ __title__ = 'requests' __description__ = 'Python HTTP for Humans.' -__url__ = 'http://python-requests.org' -__version__ = '2.22.0' -__build__ = 0x022200 +__url__ = 'https://requests.readthedocs.io' +__version__ = '2.23.0' +__build__ = 0x022300 __author__ = 'Kenneth Reitz' __author_email__ = 'me@kennethreitz.org' __license__ = 'Apache 2.0' -__copyright__ = 'Copyright 2019 Kenneth Reitz' +__copyright__ = 'Copyright 2020 Kenneth Reitz' __cake__ = u'\u2728 \U0001f370 \u2728' diff --git a/pipenv/vendor/requests/api.py b/pipenv/vendor/requests/api.py index ef71d0759e..e978e20311 100644 --- a/pipenv/vendor/requests/api.py +++ b/pipenv/vendor/requests/api.py @@ -16,7 +16,7 @@ def request(method, url, **kwargs): """Constructs and sends a :class:`Request <Request>`. - :param method: method for the new :class:`Request` object. + :param method: method for the new :class:`Request` object: ``GET``, ``OPTIONS``, ``HEAD``, ``POST``, ``PUT``, ``PATCH``, or ``DELETE``. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary, list of tuples or bytes to send in the query string for the :class:`Request`. @@ -50,6 +50,7 @@ def request(method, url, **kwargs): >>> import requests >>> req = requests.request('GET', 'https://httpbin.org/get') + >>> req <Response [200]> """ @@ -92,7 +93,9 @@ def head(url, **kwargs): r"""Sends a HEAD request. :param url: URL for the new :class:`Request` object. - :param \*\*kwargs: Optional arguments that ``request`` takes. + :param \*\*kwargs: Optional arguments that ``request`` takes. If + `allow_redirects` is not provided, it will be set to `False` (as + opposed to the default :meth:`request` behavior). :return: :class:`Response <Response>` object :rtype: requests.Response """ diff --git a/pipenv/vendor/requests/auth.py b/pipenv/vendor/requests/auth.py index bdde51c7fd..eeface39ae 100644 --- a/pipenv/vendor/requests/auth.py +++ b/pipenv/vendor/requests/auth.py @@ -50,7 +50,7 @@ def _basic_auth_str(username, password): "Non-string passwords will no longer be supported in Requests " "3.0.0. Please convert the object you've passed in ({!r}) to " "a string or bytes object in the near future to avoid " - "problems.".format(password), + "problems.".format(type(password)), category=DeprecationWarning, ) password = str(password) @@ -239,7 +239,7 @@ def handle_401(self, r, **kwargs): """ # If response is not 4xx, do not auth - # See https://github.com/requests/requests/issues/3772 + # See https://github.com/psf/requests/issues/3772 if not 400 <= r.status_code < 500: self._thread_local.num_401_calls = 1 return r diff --git a/pipenv/vendor/requests/compat.py b/pipenv/vendor/requests/compat.py index c44b35efb9..5de0769f50 100644 --- a/pipenv/vendor/requests/compat.py +++ b/pipenv/vendor/requests/compat.py @@ -43,6 +43,7 @@ import cookielib from Cookie import Morsel from StringIO import StringIO + # Keep OrderedDict for backwards compatibility. from collections import Callable, Mapping, MutableMapping, OrderedDict @@ -59,6 +60,7 @@ from http import cookiejar as cookielib from http.cookies import Morsel from io import StringIO + # Keep OrderedDict for backwards compatibility. from collections import OrderedDict from collections.abc import Callable, Mapping, MutableMapping diff --git a/pipenv/vendor/requests/models.py b/pipenv/vendor/requests/models.py index 62dcd0b7c8..357988327e 100644 --- a/pipenv/vendor/requests/models.py +++ b/pipenv/vendor/requests/models.py @@ -12,7 +12,7 @@ # Import encoding now, to avoid implicit import later. # Implicit import within threads may cause LookupError when standard library is in a ZIP, -# such as in Embedded Python. See https://github.com/requests/requests/issues/3578. +# such as in Embedded Python. See https://github.com/psf/requests/issues/3578. import encodings.idna from urllib3.fields import RequestField @@ -280,6 +280,7 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): >>> import requests >>> req = requests.Request('GET', 'https://httpbin.org/get') >>> r = req.prepare() + >>> r <PreparedRequest [GET]> >>> s = requests.Session() @@ -358,7 +359,7 @@ def prepare_url(self, url, params): #: We're unable to blindly call unicode/str functions #: as this will include the bytestring indicator (b'') #: on python 3.x. - #: https://github.com/requests/requests/pull/2238 + #: https://github.com/psf/requests/pull/2238 if isinstance(url, bytes): url = url.decode('utf8') else: @@ -608,7 +609,7 @@ def __init__(self): #: File-like object representation of response (for advanced usage). #: Use of ``raw`` requires that ``stream=True`` be set on the request. - # This requirement does not apply for use internally to Requests. + #: This requirement does not apply for use internally to Requests. self.raw = None #: Final URL location of Response. diff --git a/pipenv/vendor/requests/sessions.py b/pipenv/vendor/requests/sessions.py index d73d700fa6..2845880bf4 100644 --- a/pipenv/vendor/requests/sessions.py +++ b/pipenv/vendor/requests/sessions.py @@ -11,9 +11,10 @@ import sys import time from datetime import timedelta +from collections import OrderedDict from .auth import _basic_auth_str -from .compat import cookielib, is_py3, OrderedDict, urljoin, urlparse, Mapping +from .compat import cookielib, is_py3, urljoin, urlparse, Mapping from .cookies import ( cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies) from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT @@ -162,7 +163,7 @@ def resolve_redirects(self, resp, req, stream=False, timeout=None, resp.raw.read(decode_content=False) if len(resp.history) >= self.max_redirects: - raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects, response=resp) + raise TooManyRedirects('Exceeded {} redirects.'.format(self.max_redirects), response=resp) # Release the connection back into the pool. resp.close() @@ -170,7 +171,7 @@ def resolve_redirects(self, resp, req, stream=False, timeout=None, # Handle redirection without scheme (see: RFC 1808 Section 4) if url.startswith('//'): parsed_rurl = urlparse(resp.url) - url = '%s:%s' % (to_native_string(parsed_rurl.scheme), url) + url = ':'.join([to_native_string(parsed_rurl.scheme), url]) # Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2) parsed = urlparse(url) @@ -192,19 +193,16 @@ def resolve_redirects(self, resp, req, stream=False, timeout=None, self.rebuild_method(prepared_request, resp) - # https://github.com/requests/requests/issues/1084 + # https://github.com/psf/requests/issues/1084 if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect): - # https://github.com/requests/requests/issues/3490 + # https://github.com/psf/requests/issues/3490 purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding') for header in purged_headers: prepared_request.headers.pop(header, None) prepared_request.body = None headers = prepared_request.headers - try: - del headers['Cookie'] - except KeyError: - pass + headers.pop('Cookie', None) # Extract any cookies sent on the response to the cookiejar # in the new request. Because we've mutated our copied prepared @@ -271,7 +269,6 @@ def rebuild_auth(self, prepared_request, response): if new_auth is not None: prepared_request.prepare_auth(new_auth) - return def rebuild_proxies(self, prepared_request, proxies): """This method re-evaluates the proxy configuration by considering the @@ -352,13 +349,13 @@ class Session(SessionRedirectMixin): Or as a context manager:: >>> with requests.Session() as s: - >>> s.get('https://httpbin.org/get') + ... s.get('https://httpbin.org/get') <Response [200]> """ __attrs__ = [ 'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify', - 'cert', 'prefetch', 'adapters', 'stream', 'trust_env', + 'cert', 'adapters', 'stream', 'trust_env', 'max_redirects', ] @@ -728,7 +725,7 @@ def get_adapter(self, url): return adapter # Nothing matches :-/ - raise InvalidSchema("No connection adapters were found for '%s'" % url) + raise InvalidSchema("No connection adapters were found for {!r}".format(url)) def close(self): """Closes all adapters and as such the session""" diff --git a/pipenv/vendor/requests/status_codes.py b/pipenv/vendor/requests/status_codes.py index 813e8c4e62..d80a7cd4dd 100644 --- a/pipenv/vendor/requests/status_codes.py +++ b/pipenv/vendor/requests/status_codes.py @@ -5,12 +5,15 @@ to their numerical codes, accessible either as attributes or as dictionary items. ->>> requests.codes['temporary_redirect'] -307 ->>> requests.codes.teapot -418 ->>> requests.codes['\o/'] -200 +Example:: + + >>> import requests + >>> requests.codes['temporary_redirect'] + 307 + >>> requests.codes.teapot + 418 + >>> requests.codes['\o/'] + 200 Some codes have multiple names, and both upper- and lower-case versions of the names are allowed. For example, ``codes.ok``, ``codes.OK``, and diff --git a/pipenv/vendor/requests/structures.py b/pipenv/vendor/requests/structures.py index da930e2852..8ee0ba7a08 100644 --- a/pipenv/vendor/requests/structures.py +++ b/pipenv/vendor/requests/structures.py @@ -7,7 +7,9 @@ Data structures that power Requests. """ -from .compat import OrderedDict, Mapping, MutableMapping +from collections import OrderedDict + +from .compat import Mapping, MutableMapping class CaseInsensitiveDict(MutableMapping): diff --git a/pipenv/vendor/requests/utils.py b/pipenv/vendor/requests/utils.py index 8170a8d2c4..c1700d7fe8 100644 --- a/pipenv/vendor/requests/utils.py +++ b/pipenv/vendor/requests/utils.py @@ -19,6 +19,7 @@ import tempfile import warnings import zipfile +from collections import OrderedDict from .__version__ import __version__ from . import certs @@ -26,7 +27,7 @@ from ._internal_utils import to_native_string from .compat import parse_http_list as _parse_list_header from .compat import ( - quote, urlparse, bytes, str, OrderedDict, unquote, getproxies, + quote, urlparse, bytes, str, unquote, getproxies, proxy_bypass, urlunparse, basestring, integer_types, is_py3, proxy_bypass_environment, getproxies_environment, Mapping) from .cookies import cookiejar_from_dict @@ -179,7 +180,7 @@ def get_netrc_auth(url, raise_errors=False): except KeyError: # os.path.expanduser can fail when $HOME is undefined and # getpwuid fails. See https://bugs.python.org/issue20164 & - # https://github.com/requests/requests/issues/1846 + # https://github.com/psf/requests/issues/1846 return if os.path.exists(loc): @@ -266,6 +267,8 @@ def from_key_val_list(value): >>> from_key_val_list([('key', 'val')]) OrderedDict([('key', 'val')]) >>> from_key_val_list('string') + Traceback (most recent call last): + ... ValueError: cannot encode objects that are not 2-tuples >>> from_key_val_list({'key': 'val'}) OrderedDict([('key', 'val')]) @@ -292,7 +295,9 @@ def to_key_val_list(value): >>> to_key_val_list({'key': 'val'}) [('key', 'val')] >>> to_key_val_list('string') - ValueError: cannot encode objects that are not 2-tuples. + Traceback (most recent call last): + ... + ValueError: cannot encode objects that are not 2-tuples :rtype: list """ diff --git a/pipenv/vendor/requirementslib/LICENSE b/pipenv/vendor/requirementslib/LICENSE index 8c731e2798..a6b8c96a23 100644 --- a/pipenv/vendor/requirementslib/LICENSE +++ b/pipenv/vendor/requirementslib/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright 2018 Dan Ryan. +Copyright 2019 Dan Ryan. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pipenv/vendor/requirementslib/utils.py b/pipenv/vendor/requirementslib/utils.py index b9db5d1686..d76f82e974 100644 --- a/pipenv/vendor/requirementslib/utils.py +++ b/pipenv/vendor/requirementslib/utils.py @@ -121,7 +121,7 @@ def strip_ssh_from_git_uri(uri): def add_ssh_scheme_to_git_uri(uri): # type: (S) -> S - """Cleans VCS uris from pip format""" + """Cleans VCS uris from pipenv.patched.notpip format""" if isinstance(uri, six.string_types): # Add scheme for parsing purposes, this is also what pip does if uri.startswith("git+") and "://" not in uri: diff --git a/pipenv/vendor/shellingham/__init__.py b/pipenv/vendor/shellingham/__init__.py index b834b74b3f..2e7c0b79e6 100644 --- a/pipenv/vendor/shellingham/__init__.py +++ b/pipenv/vendor/shellingham/__init__.py @@ -4,7 +4,7 @@ from ._core import ShellDetectionFailure -__version__ = '1.3.1' +__version__ = '1.3.2' def detect_shell(pid=None, max_depth=6): diff --git a/pipenv/vendor/shellingham/posix/__init__.py b/pipenv/vendor/shellingham/posix/__init__.py index 923032b609..164bbc1da0 100644 --- a/pipenv/vendor/shellingham/posix/__init__.py +++ b/pipenv/vendor/shellingham/posix/__init__.py @@ -1,4 +1,5 @@ import os +import re from .._core import SHELL_NAMES, ShellDetectionFailure from . import proc, ps @@ -19,20 +20,16 @@ def _get_process_mapping(): raise ShellDetectionFailure('compatible proc fs or ps utility is required') -def _iter_process_command(mapping, pid, max_depth): - """Iterator to traverse up the tree, yielding `argv[0]` of each process. +def _iter_process_args(mapping, pid, max_depth): + """Iterator to traverse up the tree, yielding each process's argument list. """ for _ in range(max_depth): try: proc = mapping[pid] except KeyError: # We've reached the root process. Give up. break - try: - cmd = proc.args[0] - except IndexError: # Process has no name? Whatever, ignore it. - pass - else: - yield cmd + if proc.args: # Persumably the process should always have a name? + yield proc.args pid = proc.ppid # Go up one level. @@ -47,15 +44,50 @@ def _get_login_shell(proc_cmd): return (os.path.basename(proc_cmd).lower(), proc_cmd) +_INTERPRETER_SHELL_NAMES = [ + (re.compile(r'^python(\d+(\.\d+)?)?$'), {'xonsh'}), +] + + +def _get_interpreter_shell(proc_name, proc_args): + """Get shell invoked via an interpreter. + + Some shells are implemented on, and invoked with an interpreter, e.g. xonsh + is commonly executed with an executable Python script. This detects what + script the interpreter is actually running, and check whether that looks + like a shell. + + See sarugaku/shellingham#26 for rational. + """ + for pattern, shell_names in _INTERPRETER_SHELL_NAMES: + if not pattern.match(proc_name): + continue + for arg in proc_args: + name = os.path.basename(arg).lower() + if os.path.isfile(arg) and name in shell_names: + return (name, arg) + return None + + +def _get_shell(cmd, *args): + if cmd.startswith('-'): # Login shell! Let's use this. + return _get_login_shell(cmd) + name = os.path.basename(cmd).lower() + if name in SHELL_NAMES: # Command looks like a shell. + return (name, cmd) + shell = _get_interpreter_shell(name, args) + if shell: + return shell + return None + + def get_shell(pid=None, max_depth=6): """Get the shell that the supplied pid or os.getpid() is running in. """ pid = str(pid or os.getpid()) mapping = _get_process_mapping() - for proc_cmd in _iter_process_command(mapping, pid, max_depth): - if proc_cmd.startswith('-'): # Login shell! Let's use this. - return _get_login_shell(proc_cmd) - name = os.path.basename(proc_cmd).lower() - if name in SHELL_NAMES: # The inner-most (non-login) shell. - return (name, proc_cmd) + for proc_args in _iter_process_args(mapping, pid, max_depth): + shell = _get_shell(*proc_args) + if shell: + return shell return None diff --git a/pipenv/vendor/six.LICENSE b/pipenv/vendor/six.LICENSE index 4b05a54526..de6633112c 100644 --- a/pipenv/vendor/six.LICENSE +++ b/pipenv/vendor/six.LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2010-2019 Benjamin Peterson +Copyright (c) 2010-2020 Benjamin Peterson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/pipenv/vendor/six.py b/pipenv/vendor/six.py index 357e624abc..5fe9f8e141 100644 --- a/pipenv/vendor/six.py +++ b/pipenv/vendor/six.py @@ -1,4 +1,4 @@ -# Copyright (c) 2010-2019 Benjamin Peterson +# Copyright (c) 2010-2020 Benjamin Peterson # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal @@ -29,7 +29,7 @@ import types __author__ = "Benjamin Peterson <benjamin@python.org>" -__version__ = "1.13.0" +__version__ = "1.14.0" # Useful for very coarse version differentiation. @@ -259,7 +259,7 @@ class _MovedItems(_LazyModule): MovedModule("copyreg", "copy_reg"), MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"), - MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), + MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread" if sys.version_info < (3, 9) else "_thread"), MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), MovedModule("http_cookies", "Cookie", "http.cookies"), MovedModule("html_entities", "htmlentitydefs", "html.entities"), @@ -644,9 +644,11 @@ def u(s): if sys.version_info[1] <= 1: _assertRaisesRegex = "assertRaisesRegexp" _assertRegex = "assertRegexpMatches" + _assertNotRegex = "assertNotRegexpMatches" else: _assertRaisesRegex = "assertRaisesRegex" _assertRegex = "assertRegex" + _assertNotRegex = "assertNotRegex" else: def b(s): return s @@ -668,6 +670,7 @@ def indexbytes(buf, i): _assertCountEqual = "assertItemsEqual" _assertRaisesRegex = "assertRaisesRegexp" _assertRegex = "assertRegexpMatches" + _assertNotRegex = "assertNotRegexpMatches" _add_doc(b, """Byte literal""") _add_doc(u, """Text literal""") @@ -684,6 +687,10 @@ def assertRegex(self, *args, **kwargs): return getattr(self, _assertRegex)(*args, **kwargs) +def assertNotRegex(self, *args, **kwargs): + return getattr(self, _assertNotRegex)(*args, **kwargs) + + if PY3: exec_ = getattr(moves.builtins, "exec") @@ -719,16 +726,7 @@ def exec_(_code_, _globs_=None, _locs_=None): """) -if sys.version_info[:2] == (3, 2): - exec_("""def raise_from(value, from_value): - try: - if from_value is None: - raise value - raise value from from_value - finally: - value = None -""") -elif sys.version_info[:2] > (3, 2): +if sys.version_info[:2] > (3,): exec_("""def raise_from(value, from_value): try: raise value from from_value @@ -808,13 +806,33 @@ def print_(*args, **kwargs): _add_doc(reraise, """Reraise an exception.""") if sys.version_info[0:2] < (3, 4): + # This does exactly the same what the :func:`py3:functools.update_wrapper` + # function does on Python versions after 3.2. It sets the ``__wrapped__`` + # attribute on ``wrapper`` object and it doesn't raise an error if any of + # the attributes mentioned in ``assigned`` and ``updated`` are missing on + # ``wrapped`` object. + def _update_wrapper(wrapper, wrapped, + assigned=functools.WRAPPER_ASSIGNMENTS, + updated=functools.WRAPPER_UPDATES): + for attr in assigned: + try: + value = getattr(wrapped, attr) + except AttributeError: + continue + else: + setattr(wrapper, attr, value) + for attr in updated: + getattr(wrapper, attr).update(getattr(wrapped, attr, {})) + wrapper.__wrapped__ = wrapped + return wrapper + _update_wrapper.__doc__ = functools.update_wrapper.__doc__ + def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES): - def wrapper(f): - f = functools.wraps(wrapped, assigned, updated)(f) - f.__wrapped__ = wrapped - return f - return wrapper + return functools.partial(_update_wrapper, wrapped=wrapped, + assigned=assigned, updated=updated) + wraps.__doc__ = functools.wraps.__doc__ + else: wraps = functools.wraps @@ -919,10 +937,9 @@ def ensure_text(s, encoding='utf-8', errors='strict'): raise TypeError("not expecting type '%s'" % type(s)) - def python_2_unicode_compatible(klass): """ - A decorator that defines __unicode__ and __str__ methods under Python 2. + A class decorator that defines __unicode__ and __str__ methods under Python 2. Under Python 3 it does nothing. To support Python 2 and 3 with a single code base, define a __str__ method diff --git a/pipenv/vendor/tomlkit/__init__.py b/pipenv/vendor/tomlkit/__init__.py index be5c373f4b..ab126006c6 100644 --- a/pipenv/vendor/tomlkit/__init__.py +++ b/pipenv/vendor/tomlkit/__init__.py @@ -22,4 +22,4 @@ from .api import ws -__version__ = "0.5.8" +__version__ = "0.5.11" diff --git a/pipenv/vendor/tomlkit/container.py b/pipenv/vendor/tomlkit/container.py index b4e7cf2cd6..4b15b36503 100644 --- a/pipenv/vendor/tomlkit/container.py +++ b/pipenv/vendor/tomlkit/container.py @@ -15,6 +15,9 @@ from .items import item as _item +_NOT_SET = object() + + class Container(dict): """ A container for items within a TOMLDocument. @@ -498,6 +501,19 @@ def get(self, key, default=None): # type: (Any, Optional[Any]) -> Any return self[key] + def pop(self, key, default=_NOT_SET): + try: + value = self[key] + except KeyError: + if default is _NOT_SET: + raise + + return default + + del self[key] + + return value + def setdefault( self, key, default=None ): # type: (Union[Key, str], Any) -> Union[Item, Container] @@ -521,21 +537,12 @@ def __getitem__(self, key): # type: (Union[Key, str]) -> Union[Item, Container] raise NonExistentKey(key) if isinstance(idx, tuple): - container = Container(True) - - for i in idx: - item = self._body[i][1] - - if isinstance(item, Table): - for k, v in item.value.body: - container.append(k, v) - else: - container.append(key, item) - - return container + # The item we are getting is an out of order table + # so we need a proxy to retrieve the proper objects + # from the parent container + return OutOfOrderTableProxy(self, idx) item = self._body[idx][1] - if item.is_boolean(): return item.value @@ -568,6 +575,9 @@ def _replace( def _replace_at( self, idx, new_key, value ): # type: (Union[int, Tuple[int]], Union[Key, str], Item) -> None + if not isinstance(new_key, Key): + new_key = Key(new_key) + if isinstance(idx, tuple): for i in idx[1:]: self._body[i] = (None, Null()) @@ -577,6 +587,8 @@ def _replace_at( k, v = self._body[idx] self._map[new_key] = self._map.pop(k) + if new_key != k: + super(Container, self).__delitem__(k) if isinstance(self._map[new_key], tuple): self._map[new_key] = self._map[new_key][0] @@ -637,3 +649,99 @@ def __copy__(self): # type: () -> Container c._map.update(self._map) return c + + +class OutOfOrderTableProxy(dict): + def __init__(self, container, indices): # type: (Container, Tuple) -> None + self._container = container + self._internal_container = Container(self._container.parsing) + self._tables = [] + self._tables_map = {} + self._map = {} + + for i in indices: + key, item = self._container._body[i] + + if isinstance(item, Table): + self._tables.append(item) + table_idx = len(self._tables) - 1 + for k, v in item.value.body: + self._internal_container.append(k, v) + self._tables_map[k] = table_idx + else: + self._internal_container.append(key, item) + self._map[key] = i + + def __getitem__(self, key): # type: (Union[Key, str]) -> Any + if key not in self._internal_container: + raise NonExistentKey(key) + + return self._internal_container[key] + + def __setitem__(self, key, item): # type: (Union[Key, str], Any) -> None + if key in self._map: + idx = self._map[key] + self._container._replace_at(idx, key, item) + elif key in self._tables_map: + table = self._tables[self._tables_map[key]] + table[key] = item + elif self._tables: + table = self._tables[0] + table[key] = item + else: + self._container[key] = item + + def __delitem__(self, key): # type: (Union[Key, str]) -> None + if key in self._map: + idx = self._map[key] + del self._container[key] + del self._map[key] + elif key in self._tables_map: + table = self._tables[self._tables_map[key]] + del table[key] + del self._tables_map[key] + else: + raise NonExistentKey(key) + + del self._internal_container[key] + + def keys(self): + return self._internal_container.keys() + + def values(self): + return self._internal_container.values() + + def items(self): # type: () -> Generator[Item] + return self._internal_container.items() + + def update(self, other): # type: (Dict) -> None + self._internal_container.update(other) + + def get(self, key, default=None): # type: (Any, Optional[Any]) -> Any + return self._internal_container.get(key, default=default) + + def pop(self, key, default=_NOT_SET): + return self._internal_container.pop(key, default=default) + + def setdefault( + self, key, default=None + ): # type: (Union[Key, str], Any) -> Union[Item, Container] + return self._internal_container.setdefault(key, default=default) + + def __contains__(self, key): + return key in self._internal_container + + def __str__(self): + return str(self._internal_container) + + def __repr__(self): + return repr(self._internal_container) + + def __eq__(self, other): # type: (Dict) -> bool + if not isinstance(other, dict): + return NotImplemented + + return self._internal_container == other + + def __getattr__(self, attribute): + return getattr(self._internal_container, attribute) diff --git a/pipenv/vendor/tomlkit/items.py b/pipenv/vendor/tomlkit/items.py index 6588cda92f..309fe1d833 100644 --- a/pipenv/vendor/tomlkit/items.py +++ b/pipenv/vendor/tomlkit/items.py @@ -36,6 +36,7 @@ def item(value, _parent=None): elif isinstance(value, float): return Float(value, Trivia(), str(value)) elif isinstance(value, dict): + val = Table(Container(), Trivia(), False) if isinstance(value, InlineTableDict): val = InlineTable(Container(), Trivia()) else: diff --git a/pipenv/vendor/tomlkit/parser.py b/pipenv/vendor/tomlkit/parser.py index 0fb5068400..13fd9f9852 100644 --- a/pipenv/vendor/tomlkit/parser.py +++ b/pipenv/vendor/tomlkit/parser.py @@ -194,7 +194,8 @@ def _split_table_name(self, name): # type: (str) -> Generator[Key] in_name = False current = "" t = KeyType.Bare - for c in name: + parts = 0 + for c in name.strip(): c = TOMLChar(c) if c == ".": @@ -205,14 +206,20 @@ def _split_table_name(self, name): # type: (str) -> Generator[Key] if not current: raise self.parse_error() - yield Key(current, t=t, sep="") + yield Key(current.strip(), t=t, sep="") + parts += 1 current = "" t = KeyType.Bare continue elif c in {"'", '"'}: if in_name: - if t == KeyType.Literal and c == '"': + if ( + t == KeyType.Literal + and c == '"' + or t == KeyType.Basic + and c == "'" + ): current += c continue @@ -221,17 +228,35 @@ def _split_table_name(self, name): # type: (str) -> Generator[Key] in_name = False else: + if current and TOMLChar(current[-1]).is_spaces() and not parts: + raise self.parse_error() + in_name = True t = KeyType.Literal if c == "'" else KeyType.Basic continue elif in_name or c.is_bare_key_char(): + if ( + not in_name + and current + and TOMLChar(current[-1]).is_spaces() + and not parts + ): + raise self.parse_error() + current += c + elif c.is_spaces(): + # A space is only valid at this point + # if it's in between parts. + # We store it for now and will check + # later if it's valid + current += c + continue else: raise self.parse_error() - if current: - yield Key(current, t=t, sep="") + if current.strip(): + yield Key(current.strip(), t=t, sep="") def _parse_item(self): # type: () -> Optional[Tuple[Optional[Key], Item]] """ @@ -904,15 +929,46 @@ def _parse_table( is_aot = True - # Key + # Consume any whitespace self.mark() - while self._current != "]" and self.inc(): - if self.end(): - raise self.parse_error(UnexpectedEofError) - + while self._current.is_spaces() and self.inc(): pass - name = self.extract() + ws_prefix = self.extract() + + # Key + if self._current in [StringType.SLL.value, StringType.SLB.value]: + delimiter = ( + StringType.SLL + if self._current == StringType.SLL.value + else StringType.SLB + ) + name = self._parse_string(delimiter) + name = "{delimiter}{name}{delimiter}".format( + delimiter=delimiter.value, name=name + ) + + self.mark() + while self._current != "]" and self.inc(): + if self.end(): + raise self.parse_error(UnexpectedEofError) + + pass + + ws_suffix = self.extract() + name += ws_suffix + else: + self.mark() + while self._current != "]" and self.inc(): + if self.end(): + raise self.parse_error(UnexpectedEofError) + + pass + + name = self.extract() + + name = ws_prefix + name + if not name.strip(): raise self.parse_error(EmptyTableNameError) diff --git a/pipenv/vendor/tomlkit/toml_char.py b/pipenv/vendor/tomlkit/toml_char.py index 02c5517289..d649a917c1 100644 --- a/pipenv/vendor/tomlkit/toml_char.py +++ b/pipenv/vendor/tomlkit/toml_char.py @@ -4,7 +4,7 @@ from ._compat import unicode if PY2: - from pipenv.vendor.backports.functools_lru_cache import lru_cache + from functools32 import lru_cache else: from functools import lru_cache diff --git a/pipenv/vendor/urllib3/__init__.py b/pipenv/vendor/urllib3/__init__.py index 96474d3680..9bd8323f91 100644 --- a/pipenv/vendor/urllib3/__init__.py +++ b/pipenv/vendor/urllib3/__init__.py @@ -22,7 +22,7 @@ __author__ = "Andrey Petrov (andrey.petrov@shazow.net)" __license__ = "MIT" -__version__ = "1.25.7" +__version__ = "1.25.8" __all__ = ( "HTTPConnectionPool", diff --git a/pipenv/vendor/urllib3/connection.py b/pipenv/vendor/urllib3/connection.py index f5c946adf7..71e6790b1b 100644 --- a/pipenv/vendor/urllib3/connection.py +++ b/pipenv/vendor/urllib3/connection.py @@ -251,40 +251,6 @@ def __init__( # HTTPS requests to go out as HTTP. (See Issue #356) self._protocol = "https" - def connect(self): - conn = self._new_conn() - self._prepare_conn(conn) - - # Wrap socket using verification with the root certs in - # trusted_root_certs - default_ssl_context = False - if self.ssl_context is None: - default_ssl_context = True - self.ssl_context = create_urllib3_context( - ssl_version=resolve_ssl_version(self.ssl_version), - cert_reqs=resolve_cert_reqs(self.cert_reqs), - ) - - # Try to load OS default certs if none are given. - # Works well on Windows (requires Python3.4+) - context = self.ssl_context - if ( - not self.ca_certs - and not self.ca_cert_dir - and default_ssl_context - and hasattr(context, "load_default_certs") - ): - context.load_default_certs() - - self.sock = ssl_wrap_socket( - sock=conn, - keyfile=self.key_file, - certfile=self.cert_file, - key_password=self.key_password, - ssl_context=self.ssl_context, - server_hostname=self.server_hostname, - ) - class VerifiedHTTPSConnection(HTTPSConnection): """ diff --git a/pipenv/vendor/urllib3/connectionpool.py b/pipenv/vendor/urllib3/connectionpool.py index 31696460f0..d42eb7be67 100644 --- a/pipenv/vendor/urllib3/connectionpool.py +++ b/pipenv/vendor/urllib3/connectionpool.py @@ -996,10 +996,10 @@ def _validate_conn(self, conn): if not conn.is_verified: warnings.warn( ( - "Unverified HTTPS request is being made. " + "Unverified HTTPS request is being made to host '%s'. " "Adding certificate verification is strongly advised. See: " "https://urllib3.readthedocs.io/en/latest/advanced-usage.html" - "#ssl-warnings" + "#ssl-warnings" % conn.host ), InsecureRequestWarning, ) diff --git a/pipenv/vendor/urllib3/contrib/_appengine_environ.py b/pipenv/vendor/urllib3/contrib/_appengine_environ.py index 119efaeeb6..8765b907d7 100644 --- a/pipenv/vendor/urllib3/contrib/_appengine_environ.py +++ b/pipenv/vendor/urllib3/contrib/_appengine_environ.py @@ -6,7 +6,7 @@ def is_appengine(): - return "APPENGINE_RUNTIME" in os.environ + return is_local_appengine() or is_prod_appengine() def is_appengine_sandbox(): @@ -20,15 +20,15 @@ def is_appengine_sandbox(): def is_local_appengine(): - return is_appengine() and os.environ.get("SERVER_SOFTWARE", "").startswith( - "Development/" - ) + return "APPENGINE_RUNTIME" in os.environ and os.environ.get( + "SERVER_SOFTWARE", "" + ).startswith("Development/") def is_prod_appengine(): - return is_appengine() and os.environ.get("SERVER_SOFTWARE", "").startswith( - "Google App Engine/" - ) + return "APPENGINE_RUNTIME" in os.environ and os.environ.get( + "SERVER_SOFTWARE", "" + ).startswith("Google App Engine/") def is_prod_appengine_mvms(): diff --git a/pipenv/vendor/urllib3/response.py b/pipenv/vendor/urllib3/response.py index adc321e713..6090a7350f 100644 --- a/pipenv/vendor/urllib3/response.py +++ b/pipenv/vendor/urllib3/response.py @@ -792,7 +792,7 @@ def geturl(self): return self._request_url def __iter__(self): - buffer = [b""] + buffer = [] for chunk in self.stream(decode_content=True): if b"\n" in chunk: chunk = chunk.split(b"\n") diff --git a/pipenv/vendor/urllib3/util/ssl_.py b/pipenv/vendor/urllib3/util/ssl_.py index 8495b7753d..5b363d7f9e 100644 --- a/pipenv/vendor/urllib3/util/ssl_.py +++ b/pipenv/vendor/urllib3/util/ssl_.py @@ -182,7 +182,7 @@ def resolve_cert_reqs(candidate): """ Resolves the argument to a numeric constant, which can be passed to the wrap_socket function/method from the ssl module. - Defaults to :data:`ssl.CERT_NONE`. + Defaults to :data:`ssl.CERT_REQUIRED`. If given a string it is assumed to be the name of the constant in the :mod:`ssl` module or its abbreviation. (So you can specify `REQUIRED` instead of `CERT_REQUIRED`. diff --git a/pipenv/vendor/urllib3/util/url.py b/pipenv/vendor/urllib3/util/url.py index f7568e9d78..8ef5a2311d 100644 --- a/pipenv/vendor/urllib3/util/url.py +++ b/pipenv/vendor/urllib3/util/url.py @@ -216,18 +216,15 @@ def _encode_invalid_chars(component, allowed_chars, encoding="utf-8"): component = six.ensure_text(component) + # Normalize existing percent-encoded bytes. # Try to see if the component we're encoding is already percent-encoded # so we can skip all '%' characters but still encode all others. - percent_encodings = PERCENT_RE.findall(component) - - # Normalize existing percent-encoded bytes. - for enc in percent_encodings: - if not enc.isupper(): - component = component.replace(enc, enc.upper()) + component, percent_encodings = PERCENT_RE.subn( + lambda match: match.group(0).upper(), component + ) uri_bytes = component.encode("utf-8", "surrogatepass") - is_percent_encoded = len(percent_encodings) == uri_bytes.count(b"%") - + is_percent_encoded = percent_encodings == uri_bytes.count(b"%") encoded_component = bytearray() for i in range(0, len(uri_bytes)): @@ -237,7 +234,7 @@ def _encode_invalid_chars(component, allowed_chars, encoding="utf-8"): if (is_percent_encoded and byte == b"%") or ( byte_ord < 128 and byte.decode() in allowed_chars ): - encoded_component.extend(byte) + encoded_component += byte continue encoded_component.extend(b"%" + (hex(byte_ord)[2:].encode().zfill(2).upper())) @@ -322,9 +319,6 @@ def _idna_encode(name): def _encode_target(target): """Percent-encodes a request target so that there are no invalid characters""" - if not target.startswith("/"): - return target - path, query = TARGET_RE.match(target).groups() target = _encode_invalid_chars(path, PATH_CHARS) query = _encode_invalid_chars(query, QUERY_CHARS) diff --git a/pipenv/vendor/vistir/__init__.py b/pipenv/vendor/vistir/__init__.py index 53c1dc4363..fe78c8d5bb 100644 --- a/pipenv/vendor/vistir/__init__.py +++ b/pipenv/vendor/vistir/__init__.py @@ -36,7 +36,7 @@ from .path import create_tracked_tempdir, create_tracked_tempfile, mkdir_p, rmtree from .spin import create_spinner -__version__ = "0.5.1" +__version__ = "0.5.0" __all__ = [ diff --git a/tasks/vendoring/patches/patched/piptools.patch b/tasks/vendoring/patches/patched/piptools.patch index cb6ffab1de..dba36098e0 100644 --- a/tasks/vendoring/patches/patched/piptools.patch +++ b/tasks/vendoring/patches/patched/piptools.patch @@ -247,7 +247,7 @@ index acbd680..4bd3e22 100644 + + def _get_file_hash(self, location): + h = hashlib.new(FAVORITE_HASH) -+ with open_local_or_remote_file(location, self.session) as fp: ++ with open_local_or_remote_file(location, self.session) as (fp, size): + for chunk in iter(lambda: fp.read(8096), b""): + h.update(chunk) + return ":".join([FAVORITE_HASH, h.hexdigest()]) From 662e7c77c98b48a0812b0b42ff16adaedd2da605 Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Thu, 26 Mar 2020 17:10:35 -0400 Subject: [PATCH 06/49] Update tomlkit patch - Update tomlkit patch - Update test artifacts Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- tasks/vendoring/patches/vendor/tomlkit-fix.patch | 11 +++++------ tests/pypi | 2 +- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/tasks/vendoring/patches/vendor/tomlkit-fix.patch b/tasks/vendoring/patches/vendor/tomlkit-fix.patch index 26b761ccbe..1c32fd2487 100644 --- a/tasks/vendoring/patches/vendor/tomlkit-fix.patch +++ b/tasks/vendoring/patches/vendor/tomlkit-fix.patch @@ -55,7 +55,7 @@ index 4fbc667b..c1a4e620 100644 class TOMLKitError(Exception): diff --git a/pipenv/vendor/tomlkit/items.py b/pipenv/vendor/tomlkit/items.py -index 375b5f02..cccfd4a1 100644 +index 8399d0c3..68c47a6d 100644 --- a/pipenv/vendor/tomlkit/items.py +++ b/pipenv/vendor/tomlkit/items.py @@ -6,14 +6,6 @@ import string @@ -73,12 +73,12 @@ index 375b5f02..cccfd4a1 100644 from ._compat import PY2 from ._compat import PY38 -@@ -23,9 +14,12 @@ from ._compat import unicode +@@ -23,9 +15,12 @@ from ._compat import unicode from ._utils import escape_string if PY2: + from pipenv.vendor.backports.enum import Enum - from pipenv.vendor.backports.functools_lru_cache import lru_cache + from functools32 import lru_cache else: + from enum import Enum from functools import lru_cache @@ -86,11 +86,10 @@ index 375b5f02..cccfd4a1 100644 def item(value, _parent=None): -@@ -41,7 +35,10 @@ def item(value, _parent=None): - elif isinstance(value, float): +@@ -42,6 +37,10 @@ def item(value, _parent=None): return Float(value, Trivia(), str(value)) elif isinstance(value, dict): -- val = Table(Container(), Trivia(), False) + val = Table(Container(), Trivia(), False) + if isinstance(value, InlineTableDict): + val = InlineTable(Container(), Trivia()) + else: diff --git a/tests/pypi b/tests/pypi index 38f55ba588..1923638aee 160000 --- a/tests/pypi +++ b/tests/pypi @@ -1 +1 @@ -Subproject commit 38f55ba5883f1ce47c6f1f46feecc0d318c444a5 +Subproject commit 1923638aee441296130923610e6c2e3130057c44 From 01157a21d7e39cd23cb73682b945c34cc1cce956 Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Thu, 26 Mar 2020 18:10:22 -0400 Subject: [PATCH 07/49] Fix tomlkit patch and update library Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- pipenv/vendor/tomlkit/container.py | 1 + .../vendoring/patches/vendor/tomlkit-fix.patch | 17 ++++++++++++----- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/pipenv/vendor/tomlkit/container.py b/pipenv/vendor/tomlkit/container.py index 4b15b36503..96415901fe 100644 --- a/pipenv/vendor/tomlkit/container.py +++ b/pipenv/vendor/tomlkit/container.py @@ -11,6 +11,7 @@ from .items import Key from .items import Null from .items import Table +from .items import Trivia from .items import Whitespace from .items import item as _item diff --git a/tasks/vendoring/patches/vendor/tomlkit-fix.patch b/tasks/vendoring/patches/vendor/tomlkit-fix.patch index 1c32fd2487..2785f99e15 100644 --- a/tasks/vendoring/patches/vendor/tomlkit-fix.patch +++ b/tasks/vendoring/patches/vendor/tomlkit-fix.patch @@ -11,12 +11,11 @@ index e541c20c..0ac26752 100644 from .container import Container from .items import AoT diff --git a/pipenv/vendor/tomlkit/container.py b/pipenv/vendor/tomlkit/container.py -index cb8af1d5..9b5db5cb 100644 +index a7d4fe90..96415901 100644 --- a/pipenv/vendor/tomlkit/container.py +++ b/pipenv/vendor/tomlkit/container.py -@@ -1,15 +1,7 @@ - from __future__ import unicode_literals - +@@ -2,14 +2,6 @@ from __future__ import unicode_literals + import copy -from typing import Any @@ -30,7 +29,15 @@ index cb8af1d5..9b5db5cb 100644 from ._compat import decode from .exceptions import KeyAlreadyPresent from .exceptions import NonExistentKey -@@ -221,7 +214,12 @@ class Container(dict): +@@ -19,6 +11,7 @@ from .items import Item + from .items import Key + from .items import Null + from .items import Table ++from .items import Trivia + from .items import Whitespace + from .items import item as _item + +@@ -226,7 +219,12 @@ class Container(dict): for i in idx: self._body[i] = (None, Null()) else: From bff39c5720dfe7ad9a8534e8c688c3c444aac7a1 Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Mon, 30 Mar 2020 11:22:34 -0400 Subject: [PATCH 08/49] Update piptools patches - Fix hash retrieval and reverse dependency traversal for wheel urls Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- pipenv/patched/piptools/repositories/pypi.py | 2 +- pipenv/patched/piptools/resolver.py | 2 +- .../vendoring/patches/patched/piptools.patch | 22 +++++++++++++++---- 3 files changed, 20 insertions(+), 6 deletions(-) diff --git a/pipenv/patched/piptools/repositories/pypi.py b/pipenv/patched/piptools/repositories/pypi.py index a56cc6e640..ff02d36c0c 100644 --- a/pipenv/patched/piptools/repositories/pypi.py +++ b/pipenv/patched/piptools/repositories/pypi.py @@ -436,7 +436,7 @@ def get_hashes(self, ireq): cached_link = Link(path_to_url(cached_path)) else: cached_link = link - return {self._get_file_hash(cached_link)} + return {self._hash_cache._get_file_hash(cached_link)} if not is_pinned_requirement(ireq): raise TypeError("Expected pinned requirement, got {}".format(ireq)) diff --git a/pipenv/patched/piptools/resolver.py b/pipenv/patched/piptools/resolver.py index c05666512a..8fd8a57f68 100644 --- a/pipenv/patched/piptools/resolver.py +++ b/pipenv/patched/piptools/resolver.py @@ -342,7 +342,7 @@ def _iter_dependencies(self, ireq): Editable requirements will never be looked up, as they may have changed at any time. """ - if ireq.editable or is_url_requirement(ireq): + if ireq.editable or (is_url_requirement(ireq) and not ireq.link.is_wheel): for dependency in self.repository.get_dependencies(ireq): yield dependency return diff --git a/tasks/vendoring/patches/patched/piptools.patch b/tasks/vendoring/patches/patched/piptools.patch index dba36098e0..4cbeb3dcbe 100644 --- a/tasks/vendoring/patches/patched/piptools.patch +++ b/tasks/vendoring/patches/patched/piptools.patch @@ -155,7 +155,7 @@ index f389784..c1bcf9d 100644 else: return self.repository.find_best_match(ireq, prereleases) diff --git a/pipenv/patched/piptools/repositories/pypi.py b/pipenv/patched/piptools/repositories/pypi.py -index acbd680..4bd3e22 100644 +index acbd680..13378ae 100644 --- a/pipenv/patched/piptools/repositories/pypi.py +++ b/pipenv/patched/piptools/repositories/pypi.py @@ -2,21 +2,29 @@ @@ -209,7 +209,7 @@ index acbd680..4bd3e22 100644 ) from .base import BaseRepository -+os.environ["PIP_SHIMS_BASE_MODULE"] = str("pipenv.patched.notpip") ++os.environ["PIP_SHIMS_BASE_MODULE"] = str("pip") FILE_CHUNK_SIZE = 4096 FileStream = collections.namedtuple("FileStream", "stream size") @@ -475,6 +475,15 @@ index acbd680..4bd3e22 100644 finally: if "PIP_REQ_TRACKER" in os.environ: if prev_tracker: +@@ -305,7 +436,7 @@ class PyPIRepository(BaseRepository): + cached_link = Link(path_to_url(cached_path)) + else: + cached_link = link +- return {self._get_file_hash(cached_link)} ++ return {self._hash_cache._get_file_hash(cached_link)} + + if not is_pinned_requirement(ireq): + raise TypeError("Expected pinned requirement, got {}".format(ireq)) @@ -313,12 +444,10 @@ class PyPIRepository(BaseRepository): # We need to get all of the candidates that match our current version # pin, these will represent all of the files that could possibly @@ -526,7 +535,7 @@ index acbd680..4bd3e22 100644 def allow_all_wheels(self): """ diff --git a/pipenv/patched/piptools/resolver.py b/pipenv/patched/piptools/resolver.py -index fc53f18..c056665 100644 +index fc53f18..8fd8a57 100644 --- a/pipenv/patched/piptools/resolver.py +++ b/pipenv/patched/piptools/resolver.py @@ -34,6 +34,7 @@ class RequirementSummary(object): @@ -550,7 +559,12 @@ index fc53f18..c056665 100644 # Return a sorted, de-duped tuple of extras combined_ireq.extras = tuple( sorted(set(tuple(combined_ireq.extras) + tuple(ireq.extras))) -@@ -339,6 +346,15 @@ class Resolver(object): +@@ -335,10 +342,19 @@ class Resolver(object): + Editable requirements will never be looked up, as they may have + changed at any time. + """ +- if ireq.editable or is_url_requirement(ireq): ++ if ireq.editable or (is_url_requirement(ireq) and not ireq.link.is_wheel): for dependency in self.repository.get_dependencies(ireq): yield dependency return From 00bef5eac7ced03789d9a54ccced04c43010f51f Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Mon, 30 Mar 2020 15:30:49 -0400 Subject: [PATCH 09/49] Fix outline table test to avoid pep517 builder - Add poetry to test fixtures Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- tests/integration/test_install_basic.py | 4 ++-- tests/pypi | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/integration/test_install_basic.py b/tests/integration/test_install_basic.py index a6beb21d0b..88efd3ba88 100644 --- a/tests/integration/test_install_basic.py +++ b/tests/integration/test_install_basic.py @@ -466,11 +466,11 @@ def test_rewrite_outline_table(PipenvInstance): extras = ["socks"] """.strip() f.write(contents) - c = p.pipenv("install plette") + c = p.pipenv("install flask") assert c.return_code == 0 with open(p.pipfile_path) as f: contents = f.read() assert "[packages.requests]" not in contents assert 'six = {version = "*"}' in contents assert 'requests = {version = "*"' in contents - assert 'plette = "*"' in contents + assert 'flask = "*"' in contents diff --git a/tests/pypi b/tests/pypi index 1923638aee..ecbbb8775b 160000 --- a/tests/pypi +++ b/tests/pypi @@ -1 +1 @@ -Subproject commit 1923638aee441296130923610e6c2e3130057c44 +Subproject commit ecbbb8775b87be07d32afd70909750815b39ae60 From ee093c5b13af2bf145337f8eae28c47636d12d98 Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Mon, 30 Mar 2020 17:45:30 -0400 Subject: [PATCH 10/49] Update pip version-specific code - Use shimmed `InstallCommand` from `pip_shims` - Update `PackageFinder` usage - Fix attempts to stringify `Bool` instances from tomlkit - Add fallback for direct URL resolution - Update `Requirementslib` with bugfix Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- pipenv/environment.py | 38 +++++-------------- pipenv/project.py | 2 +- pipenv/utils.py | 11 +++++- .../requirementslib/models/setup_info.py | 6 +-- 4 files changed, 23 insertions(+), 34 deletions(-) diff --git a/pipenv/environment.py b/pipenv/environment.py index b57a858a88..d472e3455e 100644 --- a/pipenv/environment.py +++ b/pipenv/environment.py @@ -466,7 +466,7 @@ def pip_version(self): ), None) if pip is not None: return parse_version(pip.version) - return parse_version("18.0") + return parse_version("19.3") def get_distributions(self): """ @@ -529,42 +529,21 @@ def get_installed_packages(self): @contextlib.contextmanager def get_finder(self, pre=False): from .vendor.pip_shims.shims import ( - Command, cmdoptions, index_group, PackageFinder, parse_version, pip_version + InstallCommand, get_package_finder ) from .environments import PIPENV_CACHE_DIR - index_urls = [source.get("url") for source in self.sources] - class PipCommand(Command): - name = "PipCommand" - - pip_command = PipCommand() - index_opts = cmdoptions.make_option_group( - index_group, pip_command.parser - ) - cmd_opts = pip_command.cmd_opts - pip_command.parser.insert_option_group(0, index_opts) - pip_command.parser.insert_option_group(0, cmd_opts) + pip_command = InstallCommand() pip_args = self._modules["pipenv"].utils.prepare_pip_source_args(self.sources) pip_options, _ = pip_command.parser.parse_args(pip_args) pip_options.cache_dir = PIPENV_CACHE_DIR pip_options.pre = self.pipfile.get("pre", pre) with pip_command._build_session(pip_options) as session: - finder_args = { - "find_links": pip_options.find_links, - "index_urls": index_urls, - "allow_all_prereleases": pip_options.pre, - "trusted_hosts": pip_options.trusted_hosts, - "session": session - } - if parse_version(pip_version) < parse_version("19.0"): - finder_args.update( - {"process_dependency_links": pip_options.process_dependency_links} - ) - finder = PackageFinder(**finder_args) + finder = get_package_finder(install_cmd=pip_command, options=pip_options, session=session) yield finder def get_package_info(self, pre=False): - from .vendor.pip_shims.shims import pip_version, parse_version + from .vendor.pip_shims.shims import pip_version, parse_version, CandidateEvaluator dependency_links = [] packages = self.get_installed_packages() # This code is borrowed from pip's current implementation @@ -591,9 +570,10 @@ def get_package_info(self, pre=False): if not all_candidates: continue - best_candidate = max(all_candidates, key=finder._candidate_sort_key) - remote_version = best_candidate.version - if best_candidate.location.is_wheel: + candidate_evaluator = finder.make_candidate_evaluator(project_name=dist.key) + best_candidate_result = candidate_evaluator.compute_best_candidate(all_candidates) + remote_version = best_candidate_result.best_candidate.version + if best_candidate_result.best_candidate.link.is_wheel: typ = 'wheel' else: typ = 'sdist' diff --git a/pipenv/project.py b/pipenv/project.py index 5b14106e66..4cb2ff9c51 100644 --- a/pipenv/project.py +++ b/pipenv/project.py @@ -723,7 +723,7 @@ def populate_source(cls, source): if "verify_ssl" not in source: source["verify_ssl"] = "https://" in source["url"] if not isinstance(source["verify_ssl"], bool): - source["verify_ssl"] = source["verify_ssl"].lower() == "true" + source["verify_ssl"] = str(source["verify_ssl"]).lower() == "true" return source def get_or_create_lockfile(self, from_pipfile=False): diff --git a/pipenv/utils.py b/pipenv/utils.py index 680b7e218c..98754f6971 100644 --- a/pipenv/utils.py +++ b/pipenv/utils.py @@ -472,6 +472,7 @@ def parse_line( ): # type: (...) -> Tuple[Requirement, Dict[str, str], Dict[str, str]] from .vendor.requirementslib.models.requirements import Requirement + from .vendor.requirementslib.models.utils import DIRECT_URL_RE if index_lookup is None: index_lookup = {} if markers_lookup is None: @@ -488,7 +489,15 @@ def parse_line( try: req = Requirement.from_line(line) except ValueError: - raise ResolutionFailure("Failed to resolve requirement from line: {0!s}".format(line)) + direct_url = DIRECT_URL_RE.match(line) + if direct_url: + line = "{0}#egg={1}".format(line, direct_url.groupdict()["name"]) + try: + req = Requirement.from_line(line) + except ValueError: + raise ResolutionFailure("Failed to resolve requirement from line: {0!s}".format(line)) + else: + raise ResolutionFailure("Failed to resolve requirement from line: {0!s}".format(line)) if url: try: index_lookup[req.normalized_name] = project.get_source( diff --git a/pipenv/vendor/requirementslib/models/setup_info.py b/pipenv/vendor/requirementslib/models/setup_info.py index 38fffd4f55..81b4b715b3 100644 --- a/pipenv/vendor/requirementslib/models/setup_info.py +++ b/pipenv/vendor/requirementslib/models/setup_info.py @@ -219,13 +219,13 @@ def setuptools_parse_setup_cfg(path): parsed = read_configuration(path) results = parsed.get("metadata", {}) - results.update({parsed.get("options", {})}) + results.update(parsed.get("options", {})) results["install_requires"] = make_base_requirements( results.get("install_requires", []) ) extras = {} - for extras_section, extras in results.get("extras_require", {}).items(): - new_reqs = tuple(make_base_requirements(extras)) + for extras_section, extras_reqs in results.get("extras_require", {}).items(): + new_reqs = tuple(make_base_requirements(extras_reqs)) if new_reqs: extras[extras_section] = new_reqs results["extras_require"] = extras From 7923151d2bb91daffa7723047e9f8f9d7f8d1a1a Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Mon, 30 Mar 2020 18:39:21 -0400 Subject: [PATCH 11/49] Update vendoring script - Pull licenses from .DIST-INFO files if available before cleaining up wheel metadata Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- tasks/vendoring/__init__.py | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/tasks/vendoring/__init__.py b/tasks/vendoring/__init__.py index 6c66d00850..7b0a11f20a 100644 --- a/tasks/vendoring/__init__.py +++ b/tasks/vendoring/__init__.py @@ -4,6 +4,7 @@ """"Vendoring script, python 3.5 needed""" import io +import itertools import re import shutil import sys @@ -32,7 +33,6 @@ 'backports.shutil_get_terminal_size': 'backports/shutil_get_terminal_size', 'backports.weakref': 'backports/weakref', 'backports.functools_lru_cache': 'backports/functools_lru_cache', - 'shutil_backports': 'backports/shutil_get_terminal_size', 'python-dotenv': 'dotenv', 'pip-tools': 'piptools', 'setuptools': 'pkg_resources', @@ -339,6 +339,20 @@ def install(ctx, vendor_dir, package=None): requirement, ) ) + # read licenses from distinfo files if possible + for path in vendor_dir.glob("*.dist-info"): + pkg, _, _ = path.stem.rpartition("-") + license_file = path / "LICENSE" + if not license_file.exists(): + continue + if vendor_dir.joinpath(pkg).exists(): + vendor_dir.joinpath(pkg).joinpath("LICENSE").write_text(license_file.read_text()) + elif vendor_dir.joinpath("{0}.py".format(pkg)).exists(): + vendor_dir.joinpath("{0}.py.LICENSE".format(pkg)).write_text(license_file.read_text()) + else: + matched_path = next(iter(pth for pth in vendor_dir.glob("{0}*".format(pkg))), None) + if matched_path is not None: + vendor_dir.joinpath("{0}.LICENSE".format(matched_path)).write_text(license_file.read_text()) def post_install_cleanup(ctx, vendor_dir): @@ -348,6 +362,7 @@ def post_install_cleanup(ctx, vendor_dir): # Cleanup setuptools unneeded parts drop_dir(vendor_dir / 'bin') drop_dir(vendor_dir / 'tests') + drop_dir(vendor_dir / 'shutil_backports') remove_all(vendor_dir.glob('toml.py')) @@ -417,6 +432,7 @@ def vendor(ctx, vendor_dir, package=None, rewrite=True): @invoke.task def redo_imports(ctx, library): + vendor_dir = _get_vendor_dir(ctx) log('Using vendor dir: %s' % vendor_dir) vendored_libs = detect_vendored_libs(vendor_dir) item = vendor_dir / library @@ -449,7 +465,8 @@ def packages_missing_licenses(ctx, vendor_dir=None, requirements_file='vendor.tx vendor_dir = _get_vendor_dir(ctx) requirements = vendor_dir.joinpath(requirements_file).read_text().splitlines() new_requirements = [] - LICENSES = ["LICENSE-MIT", "LICENSE", "LICENSE.txt", "LICENSE.APACHE", "LICENSE.BSD"] + LICENSE_EXTS = ("rst", "txt", "APACHE", "BSD", "md") + LICENSES = [".".join(lic) for lic in itertools.product(("LICENSE", "LICENSE-MIT"), LICENSE_EXTS)] for i, req in enumerate(requirements): pkg = req.strip().split("=")[0] possible_pkgs = [pkg, pkg.replace('-', '_')] @@ -482,7 +499,7 @@ def packages_missing_licenses(ctx, vendor_dir=None, requirements_file='vendor.tx if match_found: continue else: - # log("%s: No license found in %s" % (pkg, pkgpath)) + # log("%s: No license found in %s" % (pkg, pkgpath)) new_requirements.append(req) return new_requirements From 6aa9d66549e101a10cea989a581ec2898efbcccf Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Mon, 30 Mar 2020 18:40:33 -0400 Subject: [PATCH 12/49] Update test url for discord link in tests - Point at ref that actually exists Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- tests/unit/test_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index d3363037c3..f9dd08a8ef 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -51,11 +51,11 @@ # Extras in url { "discord.py": { - "file": "https://github.com/Rapptz/discord.py/archive/rewrite.zip", + "file": "https://github.com/Rapptz/discord.py/archive/async.zip", "extras": ["voice"], } }, - "https://github.com/Rapptz/discord.py/archive/rewrite.zip#egg=discord.py[voice]", + "https://github.com/Rapptz/discord.py/archive/async.zip#egg=discord.py[voice]", ), ( { From 5ef95982f38004d976aa030f0b6c22bdfd4219e4 Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Mon, 30 Mar 2020 18:41:03 -0400 Subject: [PATCH 13/49] Add importlib_resources package to vendored pkgs Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- pipenv/vendor/importlib_resources/LICENSE | 13 ++ pipenv/vendor/importlib_resources/__init__.py | 56 +++++ pipenv/vendor/importlib_resources/_common.py | 76 +++++++ pipenv/vendor/importlib_resources/_compat.py | 60 +++++ pipenv/vendor/importlib_resources/_py2.py | 142 ++++++++++++ pipenv/vendor/importlib_resources/_py3.py | 203 +++++++++++++++++ pipenv/vendor/importlib_resources/abc.py | 134 +++++++++++ .../importlib_resources/tests/__init__.py | 0 .../tests/data01/__init__.py | 0 .../tests/data01/binary.file | Bin 0 -> 4 bytes .../tests/data01/subdirectory/__init__.py | 0 .../tests/data01/subdirectory/binary.file | Bin 0 -> 4 bytes .../tests/data01/utf-16.file | Bin 0 -> 44 bytes .../tests/data01/utf-8.file | 1 + .../tests/data02/__init__.py | 0 .../tests/data02/one/__init__.py | 0 .../tests/data02/one/resource1.txt | 1 + .../tests/data02/two/__init__.py | 0 .../tests/data02/two/resource2.txt | 1 + .../tests/data03/__init__.py | 0 .../tests/data03/namespace/resource1.txt | 0 .../importlib_resources/tests/test_open.py | 73 ++++++ .../importlib_resources/tests/test_path.py | 42 ++++ .../importlib_resources/tests/test_read.py | 63 ++++++ .../tests/test_resource.py | 170 ++++++++++++++ .../vendor/importlib_resources/tests/util.py | 213 ++++++++++++++++++ .../tests/zipdata01/__init__.py | 0 .../tests/zipdata01/ziptestdata.zip | Bin 0 -> 876 bytes .../tests/zipdata02/__init__.py | 0 .../tests/zipdata02/ziptestdata.zip | Bin 0 -> 698 bytes pipenv/vendor/importlib_resources/trees.py | 6 + 31 files changed, 1254 insertions(+) create mode 100644 pipenv/vendor/importlib_resources/LICENSE create mode 100644 pipenv/vendor/importlib_resources/__init__.py create mode 100644 pipenv/vendor/importlib_resources/_common.py create mode 100644 pipenv/vendor/importlib_resources/_compat.py create mode 100644 pipenv/vendor/importlib_resources/_py2.py create mode 100644 pipenv/vendor/importlib_resources/_py3.py create mode 100644 pipenv/vendor/importlib_resources/abc.py create mode 100644 pipenv/vendor/importlib_resources/tests/__init__.py create mode 100644 pipenv/vendor/importlib_resources/tests/data01/__init__.py create mode 100644 pipenv/vendor/importlib_resources/tests/data01/binary.file create mode 100644 pipenv/vendor/importlib_resources/tests/data01/subdirectory/__init__.py create mode 100644 pipenv/vendor/importlib_resources/tests/data01/subdirectory/binary.file create mode 100644 pipenv/vendor/importlib_resources/tests/data01/utf-16.file create mode 100644 pipenv/vendor/importlib_resources/tests/data01/utf-8.file create mode 100644 pipenv/vendor/importlib_resources/tests/data02/__init__.py create mode 100644 pipenv/vendor/importlib_resources/tests/data02/one/__init__.py create mode 100644 pipenv/vendor/importlib_resources/tests/data02/one/resource1.txt create mode 100644 pipenv/vendor/importlib_resources/tests/data02/two/__init__.py create mode 100644 pipenv/vendor/importlib_resources/tests/data02/two/resource2.txt create mode 100644 pipenv/vendor/importlib_resources/tests/data03/__init__.py create mode 100644 pipenv/vendor/importlib_resources/tests/data03/namespace/resource1.txt create mode 100644 pipenv/vendor/importlib_resources/tests/test_open.py create mode 100644 pipenv/vendor/importlib_resources/tests/test_path.py create mode 100644 pipenv/vendor/importlib_resources/tests/test_read.py create mode 100644 pipenv/vendor/importlib_resources/tests/test_resource.py create mode 100644 pipenv/vendor/importlib_resources/tests/util.py create mode 100644 pipenv/vendor/importlib_resources/tests/zipdata01/__init__.py create mode 100644 pipenv/vendor/importlib_resources/tests/zipdata01/ziptestdata.zip create mode 100644 pipenv/vendor/importlib_resources/tests/zipdata02/__init__.py create mode 100644 pipenv/vendor/importlib_resources/tests/zipdata02/ziptestdata.zip create mode 100644 pipenv/vendor/importlib_resources/trees.py diff --git a/pipenv/vendor/importlib_resources/LICENSE b/pipenv/vendor/importlib_resources/LICENSE new file mode 100644 index 0000000000..378b991a4d --- /dev/null +++ b/pipenv/vendor/importlib_resources/LICENSE @@ -0,0 +1,13 @@ +Copyright 2017-2019 Brett Cannon, Barry Warsaw + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/pipenv/vendor/importlib_resources/__init__.py b/pipenv/vendor/importlib_resources/__init__.py new file mode 100644 index 0000000000..4bce94b0fb --- /dev/null +++ b/pipenv/vendor/importlib_resources/__init__.py @@ -0,0 +1,56 @@ +"""Read resources contained within a package.""" + +import sys + +from ._compat import metadata +from ._common import as_file + +# for compatibility. Ref #88 +__import__('importlib_resources.trees') + + +__all__ = [ + 'Package', + 'Resource', + 'ResourceReader', + 'as_file', + 'contents', + 'files', + 'is_resource', + 'open_binary', + 'open_text', + 'path', + 'read_binary', + 'read_text', + ] + + +if sys.version_info >= (3,): + from importlib_resources._py3 import ( + Package, + Resource, + contents, + files, + is_resource, + open_binary, + open_text, + path, + read_binary, + read_text, + ) + from importlib_resources.abc import ResourceReader +else: + from importlib_resources._py2 import ( + contents, + files, + is_resource, + open_binary, + open_text, + path, + read_binary, + read_text, + ) + del __all__[:3] + + +__version__ = metadata.version('importlib_resources') diff --git a/pipenv/vendor/importlib_resources/_common.py b/pipenv/vendor/importlib_resources/_common.py new file mode 100644 index 0000000000..3a5b7e445c --- /dev/null +++ b/pipenv/vendor/importlib_resources/_common.py @@ -0,0 +1,76 @@ +from __future__ import absolute_import + +import os +import tempfile +import contextlib + +from ._compat import ( + Path, package_spec, FileNotFoundError, ZipPath, + singledispatch, suppress, + ) + + +def from_package(package): + """ + Return a Traversable object for the given package. + + """ + spec = package_spec(package) + return from_traversable_resources(spec) or fallback_resources(spec) + + +def from_traversable_resources(spec): + """ + If the spec.loader implements TraversableResources, + directly or implicitly, it will have a ``files()`` method. + """ + with suppress(AttributeError): + return spec.loader.files() + + +def fallback_resources(spec): + package_directory = Path(spec.origin).parent + try: + archive_path = spec.loader.archive + rel_path = package_directory.relative_to(archive_path) + return ZipPath(archive_path, str(rel_path) + '/') + except Exception: + pass + return package_directory + + +@contextlib.contextmanager +def _tempfile(reader, suffix=''): + # Not using tempfile.NamedTemporaryFile as it leads to deeper 'try' + # blocks due to the need to close the temporary file to work on Windows + # properly. + fd, raw_path = tempfile.mkstemp(suffix=suffix) + try: + os.write(fd, reader()) + os.close(fd) + yield Path(raw_path) + finally: + try: + os.remove(raw_path) + except FileNotFoundError: + pass + + +@singledispatch +@contextlib.contextmanager +def as_file(path): + """ + Given a Traversable object, return that object as a + path on the local file system in a context manager. + """ + with _tempfile(path.read_bytes, suffix=path.name) as local: + yield local + + +@as_file.register(Path) +@contextlib.contextmanager +def _(path): + """ + Degenerate behavior for pathlib.Path objects. + """ + yield path diff --git a/pipenv/vendor/importlib_resources/_compat.py b/pipenv/vendor/importlib_resources/_compat.py new file mode 100644 index 0000000000..48ca6afdab --- /dev/null +++ b/pipenv/vendor/importlib_resources/_compat.py @@ -0,0 +1,60 @@ +from __future__ import absolute_import + +# flake8: noqa + +try: + from pathlib import Path, PurePath +except ImportError: + from pathlib2 import Path, PurePath # type: ignore + + +try: + from contextlib import suppress +except ImportError: + from contextlib2 import suppress # type: ignore + + +try: + from functools import singledispatch +except ImportError: + from singledispatch import singledispatch # type: ignore + + +try: + from abc import ABC # type: ignore +except ImportError: + from abc import ABCMeta + + class ABC(object): # type: ignore + __metaclass__ = ABCMeta + + +try: + FileNotFoundError = FileNotFoundError # type: ignore +except NameError: + FileNotFoundError = OSError # type: ignore + + +try: + from importlib import metadata +except ImportError: + import importlib_metadata as metadata # type: ignore + + +try: + from zipfile import Path as ZipPath # type: ignore +except ImportError: + from zipp import Path as ZipPath # type: ignore + + +class PackageSpec(object): + def __init__(self, **kwargs): + vars(self).update(kwargs) + + +def package_spec(package): + return getattr(package, '__spec__', None) or \ + PackageSpec( + origin=package.__file__, + loader=getattr(package, '__loader__', None), + ) diff --git a/pipenv/vendor/importlib_resources/_py2.py b/pipenv/vendor/importlib_resources/_py2.py new file mode 100644 index 0000000000..26ce45d282 --- /dev/null +++ b/pipenv/vendor/importlib_resources/_py2.py @@ -0,0 +1,142 @@ +import os +import errno + +from . import _common +from ._compat import FileNotFoundError +from importlib import import_module +from io import BytesIO, TextIOWrapper, open as io_open + + +def _resolve(name): + """If name is a string, resolve to a module.""" + if not isinstance(name, basestring): # noqa: F821 + return name + return import_module(name) + + +def _get_package(package): + """Normalize a path by ensuring it is a string. + + If the resulting string contains path separators, an exception is raised. + """ + module = _resolve(package) + if not hasattr(module, '__path__'): + raise TypeError("{!r} is not a package".format(package)) + return module + + +def _normalize_path(path): + """Normalize a path by ensuring it is a string. + + If the resulting string contains path separators, an exception is raised. + """ + str_path = str(path) + parent, file_name = os.path.split(str_path) + if parent: + raise ValueError("{!r} must be only a file name".format(path)) + return file_name + + +def open_binary(package, resource): + """Return a file-like object opened for binary reading of the resource.""" + resource = _normalize_path(resource) + package = _get_package(package) + # Using pathlib doesn't work well here due to the lack of 'strict' argument + # for pathlib.Path.resolve() prior to Python 3.6. + package_path = os.path.dirname(package.__file__) + relative_path = os.path.join(package_path, resource) + full_path = os.path.abspath(relative_path) + try: + return io_open(full_path, 'rb') + except IOError: + # This might be a package in a zip file. zipimport provides a loader + # with a functioning get_data() method, however we have to strip the + # archive (i.e. the .zip file's name) off the front of the path. This + # is because the zipimport loader in Python 2 doesn't actually follow + # PEP 302. It should allow the full path, but actually requires that + # the path be relative to the zip file. + try: + loader = package.__loader__ + full_path = relative_path[len(loader.archive)+1:] + data = loader.get_data(full_path) + except (IOError, AttributeError): + package_name = package.__name__ + message = '{!r} resource not found in {!r}'.format( + resource, package_name) + raise FileNotFoundError(message) + return BytesIO(data) + + +def open_text(package, resource, encoding='utf-8', errors='strict'): + """Return a file-like object opened for text reading of the resource.""" + return TextIOWrapper( + open_binary(package, resource), encoding=encoding, errors=errors) + + +def read_binary(package, resource): + """Return the binary contents of the resource.""" + with open_binary(package, resource) as fp: + return fp.read() + + +def read_text(package, resource, encoding='utf-8', errors='strict'): + """Return the decoded string of the resource. + + The decoding-related arguments have the same semantics as those of + bytes.decode(). + """ + with open_text(package, resource, encoding, errors) as fp: + return fp.read() + + +def files(package): + return _common.from_package(_get_package(package)) + + +def path(package, resource): + """A context manager providing a file path object to the resource. + + If the resource does not already exist on its own on the file system, + a temporary file will be created. If the file was created, the file + will be deleted upon exiting the context manager (no exception is + raised if the file was deleted prior to the context manager + exiting). + """ + path = files(package).joinpath(_normalize_path(resource)) + if not path.is_file(): + raise FileNotFoundError(path) + return _common.as_file(path) + + +def is_resource(package, name): + """True if name is a resource inside package. + + Directories are *not* resources. + """ + package = _get_package(package) + _normalize_path(name) + try: + package_contents = set(contents(package)) + except OSError as error: + if error.errno not in (errno.ENOENT, errno.ENOTDIR): + # We won't hit this in the Python 2 tests, so it'll appear + # uncovered. We could mock os.listdir() to return a non-ENOENT or + # ENOTDIR, but then we'd have to depend on another external + # library since Python 2 doesn't have unittest.mock. It's not + # worth it. + raise # pragma: nocover + return False + if name not in package_contents: + return False + return (_common.from_package(package) / name).is_file() + + +def contents(package): + """Return an iterable of entries in `package`. + + Note that not all entries are resources. Specifically, directories are + not considered resources. Use `is_resource()` on each entry returned here + to check if it is a resource or not. + """ + package = _get_package(package) + return list(item.name for item in _common.from_package(package).iterdir()) diff --git a/pipenv/vendor/importlib_resources/_py3.py b/pipenv/vendor/importlib_resources/_py3.py new file mode 100644 index 0000000000..8dedde4c06 --- /dev/null +++ b/pipenv/vendor/importlib_resources/_py3.py @@ -0,0 +1,203 @@ +import os +import sys + +from . import abc as resources_abc +from . import _common +from contextlib import contextmanager, suppress +from importlib import import_module +from importlib.abc import ResourceLoader +from io import BytesIO, TextIOWrapper +from pathlib import Path +from types import ModuleType +from typing import Iterable, Iterator, Optional, Set, Union # noqa: F401 +from typing import cast +from typing.io import BinaryIO, TextIO + +if False: # TYPE_CHECKING + from typing import ContextManager + +Package = Union[ModuleType, str] +if sys.version_info >= (3, 6): + Resource = Union[str, os.PathLike] # pragma: <=35 +else: + Resource = str # pragma: >=36 + + +def _resolve(name) -> ModuleType: + """If name is a string, resolve to a module.""" + if hasattr(name, '__spec__'): + return name + return import_module(name) + + +def _get_package(package) -> ModuleType: + """Take a package name or module object and return the module. + + If a name, the module is imported. If the resolved module + object is not a package, raise an exception. + """ + module = _resolve(package) + if module.__spec__.submodule_search_locations is None: + raise TypeError('{!r} is not a package'.format(package)) + return module + + +def _normalize_path(path) -> str: + """Normalize a path by ensuring it is a string. + + If the resulting string contains path separators, an exception is raised. + """ + str_path = str(path) + parent, file_name = os.path.split(str_path) + if parent: + raise ValueError('{!r} must be only a file name'.format(path)) + return file_name + + +def _get_resource_reader( + package: ModuleType) -> Optional[resources_abc.ResourceReader]: + # Return the package's loader if it's a ResourceReader. We can't use + # a issubclass() check here because apparently abc.'s __subclasscheck__() + # hook wants to create a weak reference to the object, but + # zipimport.zipimporter does not support weak references, resulting in a + # TypeError. That seems terrible. + spec = package.__spec__ + reader = getattr(spec.loader, 'get_resource_reader', None) + if reader is None: + return None + return cast(resources_abc.ResourceReader, reader(spec.name)) + + +def open_binary(package: Package, resource: Resource) -> BinaryIO: + """Return a file-like object opened for binary reading of the resource.""" + resource = _normalize_path(resource) + package = _get_package(package) + reader = _get_resource_reader(package) + if reader is not None: + return reader.open_resource(resource) + # Using pathlib doesn't work well here due to the lack of 'strict' + # argument for pathlib.Path.resolve() prior to Python 3.6. + absolute_package_path = os.path.abspath( + package.__spec__.origin or 'non-existent file') + package_path = os.path.dirname(absolute_package_path) + full_path = os.path.join(package_path, resource) + try: + return open(full_path, mode='rb') + except OSError: + # Just assume the loader is a resource loader; all the relevant + # importlib.machinery loaders are and an AttributeError for + # get_data() will make it clear what is needed from the loader. + loader = cast(ResourceLoader, package.__spec__.loader) + data = None + if hasattr(package.__spec__.loader, 'get_data'): + with suppress(OSError): + data = loader.get_data(full_path) + if data is None: + package_name = package.__spec__.name + message = '{!r} resource not found in {!r}'.format( + resource, package_name) + raise FileNotFoundError(message) + return BytesIO(data) + + +def open_text(package: Package, + resource: Resource, + encoding: str = 'utf-8', + errors: str = 'strict') -> TextIO: + """Return a file-like object opened for text reading of the resource.""" + return TextIOWrapper( + open_binary(package, resource), encoding=encoding, errors=errors) + + +def read_binary(package: Package, resource: Resource) -> bytes: + """Return the binary contents of the resource.""" + with open_binary(package, resource) as fp: + return fp.read() + + +def read_text(package: Package, + resource: Resource, + encoding: str = 'utf-8', + errors: str = 'strict') -> str: + """Return the decoded string of the resource. + + The decoding-related arguments have the same semantics as those of + bytes.decode(). + """ + with open_text(package, resource, encoding, errors) as fp: + return fp.read() + + +def files(package: Package) -> resources_abc.Traversable: + """ + Get a Traversable resource from a package + """ + return _common.from_package(_get_package(package)) + + +def path( + package: Package, resource: Resource, + ) -> 'ContextManager[Path]': + """A context manager providing a file path object to the resource. + + If the resource does not already exist on its own on the file system, + a temporary file will be created. If the file was created, the file + will be deleted upon exiting the context manager (no exception is + raised if the file was deleted prior to the context manager + exiting). + """ + reader = _get_resource_reader(_get_package(package)) + return ( + _path_from_reader(reader, resource) + if reader else + _common.as_file(files(package).joinpath(_normalize_path(resource))) + ) + + +@contextmanager +def _path_from_reader(reader, resource): + norm_resource = _normalize_path(resource) + with suppress(FileNotFoundError): + yield Path(reader.resource_path(norm_resource)) + return + opener_reader = reader.open_resource(norm_resource) + with _common._tempfile(opener_reader.read, suffix=norm_resource) as res: + yield res + + +def is_resource(package: Package, name: str) -> bool: + """True if `name` is a resource inside `package`. + + Directories are *not* resources. + """ + package = _get_package(package) + _normalize_path(name) + reader = _get_resource_reader(package) + if reader is not None: + return reader.is_resource(name) + package_contents = set(contents(package)) + if name not in package_contents: + return False + return (_common.from_package(package) / name).is_file() + + +def contents(package: Package) -> Iterable[str]: + """Return an iterable of entries in `package`. + + Note that not all entries are resources. Specifically, directories are + not considered resources. Use `is_resource()` on each entry returned here + to check if it is a resource or not. + """ + package = _get_package(package) + reader = _get_resource_reader(package) + if reader is not None: + return reader.contents() + # Is the package a namespace package? By definition, namespace packages + # cannot have resources. + namespace = ( + package.__spec__.origin is None or + package.__spec__.origin == 'namespace' + ) + if namespace or not package.__spec__.has_location: + return () + return list(item.name for item in _common.from_package(package).iterdir()) diff --git a/pipenv/vendor/importlib_resources/abc.py b/pipenv/vendor/importlib_resources/abc.py new file mode 100644 index 0000000000..28596a4a58 --- /dev/null +++ b/pipenv/vendor/importlib_resources/abc.py @@ -0,0 +1,134 @@ +from __future__ import absolute_import + +import abc + +from ._compat import ABC, FileNotFoundError + +# Use mypy's comment syntax for Python 2 compatibility +try: + from typing import BinaryIO, Iterable, Text +except ImportError: + pass + + +class ResourceReader(ABC): + """Abstract base class for loaders to provide resource reading support.""" + + @abc.abstractmethod + def open_resource(self, resource): + # type: (Text) -> BinaryIO + """Return an opened, file-like object for binary reading. + + The 'resource' argument is expected to represent only a file name. + If the resource cannot be found, FileNotFoundError is raised. + """ + # This deliberately raises FileNotFoundError instead of + # NotImplementedError so that if this method is accidentally called, + # it'll still do the right thing. + raise FileNotFoundError + + @abc.abstractmethod + def resource_path(self, resource): + # type: (Text) -> Text + """Return the file system path to the specified resource. + + The 'resource' argument is expected to represent only a file name. + If the resource does not exist on the file system, raise + FileNotFoundError. + """ + # This deliberately raises FileNotFoundError instead of + # NotImplementedError so that if this method is accidentally called, + # it'll still do the right thing. + raise FileNotFoundError + + @abc.abstractmethod + def is_resource(self, path): + # type: (Text) -> bool + """Return True if the named 'path' is a resource. + + Files are resources, directories are not. + """ + raise FileNotFoundError + + @abc.abstractmethod + def contents(self): + # type: () -> Iterable[str] + """Return an iterable of entries in `package`.""" + raise FileNotFoundError + + +class Traversable(ABC): + """ + An object with a subset of pathlib.Path methods suitable for + traversing directories and opening files. + """ + + @abc.abstractmethod + def iterdir(self): + """ + Yield Traversable objects in self + """ + + @abc.abstractmethod + def read_bytes(self): + """ + Read contents of self as bytes + """ + + @abc.abstractmethod + def read_text(self, encoding=None): + """ + Read contents of self as bytes + """ + + @abc.abstractmethod + def is_dir(self): + """ + Return True if self is a dir + """ + + @abc.abstractmethod + def is_file(self): + """ + Return True if self is a file + """ + + @abc.abstractmethod + def joinpath(self, child): + """ + Return Traversable child in self + """ + + @abc.abstractmethod + def __truediv__(self, child): + """ + Return Traversable child in self + """ + + @abc.abstractmethod + def open(self, mode='r', *args, **kwargs): + """ + mode may be 'r' or 'rb' to open as text or binary. Return a handle + suitable for reading (same as pathlib.Path.open). + + When opening as text, accepts encoding parameters such as those + accepted by io.TextIOWrapper. + """ + + +class TraversableResources(ResourceReader): + @abc.abstractmethod + def files(self): + """Return a Traversable object for the loaded package.""" + + def open_resource(self, resource): + return self.files().joinpath(resource).open('rb') + + def resource_path(self, resource): + raise FileNotFoundError(resource) + + def is_resource(self, path): + return self.files().joinpath(path).isfile() + + def contents(self): + return (item.name for item in self.files().iterdir()) diff --git a/pipenv/vendor/importlib_resources/tests/__init__.py b/pipenv/vendor/importlib_resources/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pipenv/vendor/importlib_resources/tests/data01/__init__.py b/pipenv/vendor/importlib_resources/tests/data01/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pipenv/vendor/importlib_resources/tests/data01/binary.file b/pipenv/vendor/importlib_resources/tests/data01/binary.file new file mode 100644 index 0000000000000000000000000000000000000000..eaf36c1daccfdf325514461cd1a2ffbc139b5464 GIT binary patch literal 4 LcmZQzWMT#Y01f~L literal 0 HcmV?d00001 diff --git a/pipenv/vendor/importlib_resources/tests/data01/subdirectory/__init__.py b/pipenv/vendor/importlib_resources/tests/data01/subdirectory/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pipenv/vendor/importlib_resources/tests/data01/subdirectory/binary.file b/pipenv/vendor/importlib_resources/tests/data01/subdirectory/binary.file new file mode 100644 index 0000000000000000000000000000000000000000..eaf36c1daccfdf325514461cd1a2ffbc139b5464 GIT binary patch literal 4 LcmZQzWMT#Y01f~L literal 0 HcmV?d00001 diff --git a/pipenv/vendor/importlib_resources/tests/data01/utf-16.file b/pipenv/vendor/importlib_resources/tests/data01/utf-16.file new file mode 100644 index 0000000000000000000000000000000000000000..2cb772295ef4b480a8d83725bd5006a0236d8f68 GIT binary patch literal 44 ucmezW&x0YAAqNQa8FUyF7(y9B7~B|i84MZBfV^^`Xc15@g+Y;liva-T)Ce>H literal 0 HcmV?d00001 diff --git a/pipenv/vendor/importlib_resources/tests/data01/utf-8.file b/pipenv/vendor/importlib_resources/tests/data01/utf-8.file new file mode 100644 index 0000000000..1c0132ad90 --- /dev/null +++ b/pipenv/vendor/importlib_resources/tests/data01/utf-8.file @@ -0,0 +1 @@ +Hello, UTF-8 world! diff --git a/pipenv/vendor/importlib_resources/tests/data02/__init__.py b/pipenv/vendor/importlib_resources/tests/data02/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pipenv/vendor/importlib_resources/tests/data02/one/__init__.py b/pipenv/vendor/importlib_resources/tests/data02/one/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pipenv/vendor/importlib_resources/tests/data02/one/resource1.txt b/pipenv/vendor/importlib_resources/tests/data02/one/resource1.txt new file mode 100644 index 0000000000..61a813e401 --- /dev/null +++ b/pipenv/vendor/importlib_resources/tests/data02/one/resource1.txt @@ -0,0 +1 @@ +one resource diff --git a/pipenv/vendor/importlib_resources/tests/data02/two/__init__.py b/pipenv/vendor/importlib_resources/tests/data02/two/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pipenv/vendor/importlib_resources/tests/data02/two/resource2.txt b/pipenv/vendor/importlib_resources/tests/data02/two/resource2.txt new file mode 100644 index 0000000000..a80ce46ea3 --- /dev/null +++ b/pipenv/vendor/importlib_resources/tests/data02/two/resource2.txt @@ -0,0 +1 @@ +two resource diff --git a/pipenv/vendor/importlib_resources/tests/data03/__init__.py b/pipenv/vendor/importlib_resources/tests/data03/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pipenv/vendor/importlib_resources/tests/data03/namespace/resource1.txt b/pipenv/vendor/importlib_resources/tests/data03/namespace/resource1.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pipenv/vendor/importlib_resources/tests/test_open.py b/pipenv/vendor/importlib_resources/tests/test_open.py new file mode 100644 index 0000000000..8a3429f2e9 --- /dev/null +++ b/pipenv/vendor/importlib_resources/tests/test_open.py @@ -0,0 +1,73 @@ +import unittest + +import importlib_resources as resources +from . import data01 +from . import util +from .._compat import FileNotFoundError + + +class CommonBinaryTests(util.CommonTests, unittest.TestCase): + def execute(self, package, path): + with resources.open_binary(package, path): + pass + + +class CommonTextTests(util.CommonTests, unittest.TestCase): + def execute(self, package, path): + with resources.open_text(package, path): + pass + + +class OpenTests: + def test_open_binary(self): + with resources.open_binary(self.data, 'utf-8.file') as fp: + result = fp.read() + self.assertEqual(result, b'Hello, UTF-8 world!\n') + + def test_open_text_default_encoding(self): + with resources.open_text(self.data, 'utf-8.file') as fp: + result = fp.read() + self.assertEqual(result, 'Hello, UTF-8 world!\n') + + def test_open_text_given_encoding(self): + with resources.open_text( + self.data, 'utf-16.file', 'utf-16', 'strict') as fp: + result = fp.read() + self.assertEqual(result, 'Hello, UTF-16 world!\n') + + def test_open_text_with_errors(self): + # Raises UnicodeError without the 'errors' argument. + with resources.open_text( + self.data, 'utf-16.file', 'utf-8', 'strict') as fp: + self.assertRaises(UnicodeError, fp.read) + with resources.open_text( + self.data, 'utf-16.file', 'utf-8', 'ignore') as fp: + result = fp.read() + self.assertEqual( + result, + 'H\x00e\x00l\x00l\x00o\x00,\x00 ' + '\x00U\x00T\x00F\x00-\x001\x006\x00 ' + '\x00w\x00o\x00r\x00l\x00d\x00!\x00\n\x00') + + def test_open_binary_FileNotFoundError(self): + self.assertRaises( + FileNotFoundError, + resources.open_binary, self.data, 'does-not-exist') + + def test_open_text_FileNotFoundError(self): + self.assertRaises( + FileNotFoundError, + resources.open_text, self.data, 'does-not-exist') + + +class OpenDiskTests(OpenTests, unittest.TestCase): + def setUp(self): + self.data = data01 + + +class OpenZipTests(OpenTests, util.ZipSetup, unittest.TestCase): + pass + + +if __name__ == '__main__': + unittest.main() diff --git a/pipenv/vendor/importlib_resources/tests/test_path.py b/pipenv/vendor/importlib_resources/tests/test_path.py new file mode 100644 index 0000000000..f6004756d4 --- /dev/null +++ b/pipenv/vendor/importlib_resources/tests/test_path.py @@ -0,0 +1,42 @@ +import unittest + +import importlib_resources as resources +from . import data01 +from . import util + + +class CommonTests(util.CommonTests, unittest.TestCase): + + def execute(self, package, path): + with resources.path(package, path): + pass + + +class PathTests: + + def test_reading(self): + # Path should be readable. + # Test also implicitly verifies the returned object is a pathlib.Path + # instance. + with resources.path(self.data, 'utf-8.file') as path: + self.assertTrue(path.name.endswith("utf-8.file"), repr(path)) + # pathlib.Path.read_text() was introduced in Python 3.5. + with path.open('r', encoding='utf-8') as file: + text = file.read() + self.assertEqual('Hello, UTF-8 world!\n', text) + + +class PathDiskTests(PathTests, unittest.TestCase): + data = data01 + + +class PathZipTests(PathTests, util.ZipSetup, unittest.TestCase): + def test_remove_in_context_manager(self): + # It is not an error if the file that was temporarily stashed on the + # file system is removed inside the `with` stanza. + with resources.path(self.data, 'utf-8.file') as path: + path.unlink() + + +if __name__ == '__main__': + unittest.main() diff --git a/pipenv/vendor/importlib_resources/tests/test_read.py b/pipenv/vendor/importlib_resources/tests/test_read.py new file mode 100644 index 0000000000..ee94d8adfc --- /dev/null +++ b/pipenv/vendor/importlib_resources/tests/test_read.py @@ -0,0 +1,63 @@ +import unittest +import importlib_resources as resources + +from . import data01 +from . import util +from importlib import import_module + + +class CommonBinaryTests(util.CommonTests, unittest.TestCase): + def execute(self, package, path): + resources.read_binary(package, path) + + +class CommonTextTests(util.CommonTests, unittest.TestCase): + def execute(self, package, path): + resources.read_text(package, path) + + +class ReadTests: + def test_read_binary(self): + result = resources.read_binary(self.data, 'binary.file') + self.assertEqual(result, b'\0\1\2\3') + + def test_read_text_default_encoding(self): + result = resources.read_text(self.data, 'utf-8.file') + self.assertEqual(result, 'Hello, UTF-8 world!\n') + + def test_read_text_given_encoding(self): + result = resources.read_text( + self.data, 'utf-16.file', encoding='utf-16') + self.assertEqual(result, 'Hello, UTF-16 world!\n') + + def test_read_text_with_errors(self): + # Raises UnicodeError without the 'errors' argument. + self.assertRaises( + UnicodeError, resources.read_text, self.data, 'utf-16.file') + result = resources.read_text(self.data, 'utf-16.file', errors='ignore') + self.assertEqual( + result, + 'H\x00e\x00l\x00l\x00o\x00,\x00 ' + '\x00U\x00T\x00F\x00-\x001\x006\x00 ' + '\x00w\x00o\x00r\x00l\x00d\x00!\x00\n\x00') + + +class ReadDiskTests(ReadTests, unittest.TestCase): + data = data01 + + +class ReadZipTests(ReadTests, util.ZipSetup, unittest.TestCase): + def test_read_submodule_resource(self): + submodule = import_module('ziptestdata.subdirectory') + result = resources.read_binary( + submodule, 'binary.file') + self.assertEqual(result, b'\0\1\2\3') + + def test_read_submodule_resource_by_name(self): + result = resources.read_binary( + 'ziptestdata.subdirectory', 'binary.file') + self.assertEqual(result, b'\0\1\2\3') + + +if __name__ == '__main__': + unittest.main() diff --git a/pipenv/vendor/importlib_resources/tests/test_resource.py b/pipenv/vendor/importlib_resources/tests/test_resource.py new file mode 100644 index 0000000000..8c5a72cb3e --- /dev/null +++ b/pipenv/vendor/importlib_resources/tests/test_resource.py @@ -0,0 +1,170 @@ +import sys +import unittest +import importlib_resources as resources + +from . import data01 +from . import zipdata01, zipdata02 +from . import util +from importlib import import_module + + +class ResourceTests: + # Subclasses are expected to set the `data` attribute. + + def test_is_resource_good_path(self): + self.assertTrue(resources.is_resource(self.data, 'binary.file')) + + def test_is_resource_missing(self): + self.assertFalse(resources.is_resource(self.data, 'not-a-file')) + + def test_is_resource_subresource_directory(self): + # Directories are not resources. + self.assertFalse(resources.is_resource(self.data, 'subdirectory')) + + def test_contents(self): + contents = set(resources.contents(self.data)) + # There may be cruft in the directory listing of the data directory. + # Under Python 3 we could have a __pycache__ directory, and under + # Python 2 we could have .pyc files. These are both artifacts of the + # test suite importing these modules and writing these caches. They + # aren't germane to this test, so just filter them out. + contents.discard('__pycache__') + contents.discard('__init__.pyc') + contents.discard('__init__.pyo') + self.assertEqual(contents, { + '__init__.py', + 'subdirectory', + 'utf-8.file', + 'binary.file', + 'utf-16.file', + }) + + +class ResourceDiskTests(ResourceTests, unittest.TestCase): + def setUp(self): + self.data = data01 + + +class ResourceZipTests(ResourceTests, util.ZipSetup, unittest.TestCase): + pass + + +@unittest.skipIf(sys.version_info < (3,), 'No ResourceReader in Python 2') +class ResourceLoaderTests(unittest.TestCase): + def test_resource_contents(self): + package = util.create_package( + file=data01, path=data01.__file__, contents=['A', 'B', 'C']) + self.assertEqual( + set(resources.contents(package)), + {'A', 'B', 'C'}) + + def test_resource_is_resource(self): + package = util.create_package( + file=data01, path=data01.__file__, + contents=['A', 'B', 'C', 'D/E', 'D/F']) + self.assertTrue(resources.is_resource(package, 'B')) + + def test_resource_directory_is_not_resource(self): + package = util.create_package( + file=data01, path=data01.__file__, + contents=['A', 'B', 'C', 'D/E', 'D/F']) + self.assertFalse(resources.is_resource(package, 'D')) + + def test_resource_missing_is_not_resource(self): + package = util.create_package( + file=data01, path=data01.__file__, + contents=['A', 'B', 'C', 'D/E', 'D/F']) + self.assertFalse(resources.is_resource(package, 'Z')) + + +class ResourceCornerCaseTests(unittest.TestCase): + def test_package_has_no_reader_fallback(self): + # Test odd ball packages which: + # 1. Do not have a ResourceReader as a loader + # 2. Are not on the file system + # 3. Are not in a zip file + module = util.create_package( + file=data01, path=data01.__file__, contents=['A', 'B', 'C']) + # Give the module a dummy loader. + module.__loader__ = object() + # Give the module a dummy origin. + module.__file__ = '/path/which/shall/not/be/named' + if sys.version_info >= (3,): + module.__spec__.loader = module.__loader__ + module.__spec__.origin = module.__file__ + self.assertFalse(resources.is_resource(module, 'A')) + + +class ResourceFromZipsTest01(util.ZipSetupBase, unittest.TestCase): + ZIP_MODULE = zipdata01 # type: ignore + + def test_is_submodule_resource(self): + submodule = import_module('ziptestdata.subdirectory') + self.assertTrue( + resources.is_resource(submodule, 'binary.file')) + + def test_read_submodule_resource_by_name(self): + self.assertTrue( + resources.is_resource('ziptestdata.subdirectory', 'binary.file')) + + def test_submodule_contents(self): + submodule = import_module('ziptestdata.subdirectory') + self.assertEqual( + set(resources.contents(submodule)), + {'__init__.py', 'binary.file'}) + + def test_submodule_contents_by_name(self): + self.assertEqual( + set(resources.contents('ziptestdata.subdirectory')), + {'__init__.py', 'binary.file'}) + + +class ResourceFromZipsTest02(util.ZipSetupBase, unittest.TestCase): + ZIP_MODULE = zipdata02 # type: ignore + + def test_unrelated_contents(self): + # https://gitlab.com/python-devs/importlib_resources/issues/44 + # + # Here we have a zip file with two unrelated subpackages. The bug + # reports that getting the contents of a resource returns unrelated + # files. + self.assertEqual( + set(resources.contents('ziptestdata.one')), + {'__init__.py', 'resource1.txt'}) + self.assertEqual( + set(resources.contents('ziptestdata.two')), + {'__init__.py', 'resource2.txt'}) + + +@unittest.skipIf(sys.version_info < (3,), 'No namespace packages in Python 2') +class NamespaceTest(unittest.TestCase): + def test_namespaces_cannot_have_resources(self): + contents = resources.contents( + 'importlib_resources.tests.data03.namespace') + self.assertFalse(list(contents)) + # Even though there is a file in the namespace directory, it is not + # considered a resource, since namespace packages can't have them. + self.assertFalse(resources.is_resource( + 'importlib_resources.tests.data03.namespace', + 'resource1.txt')) + # We should get an exception if we try to read it or open it. + self.assertRaises( + FileNotFoundError, + resources.open_text, + 'importlib_resources.tests.data03.namespace', 'resource1.txt') + self.assertRaises( + FileNotFoundError, + resources.open_binary, + 'importlib_resources.tests.data03.namespace', 'resource1.txt') + self.assertRaises( + FileNotFoundError, + resources.read_text, + 'importlib_resources.tests.data03.namespace', 'resource1.txt') + self.assertRaises( + FileNotFoundError, + resources.read_binary, + 'importlib_resources.tests.data03.namespace', 'resource1.txt') + + +if __name__ == '__main__': + unittest.main() diff --git a/pipenv/vendor/importlib_resources/tests/util.py b/pipenv/vendor/importlib_resources/tests/util.py new file mode 100644 index 0000000000..8c26496d82 --- /dev/null +++ b/pipenv/vendor/importlib_resources/tests/util.py @@ -0,0 +1,213 @@ +import abc +import importlib +import io +import sys +import types +import unittest + +from . import data01 +from . import zipdata01 +from .._compat import ABC, Path, PurePath, FileNotFoundError +from ..abc import ResourceReader + +try: + from test.support import modules_setup, modules_cleanup +except ImportError: + # Python 2.7. + def modules_setup(): + return sys.modules.copy(), + + def modules_cleanup(oldmodules): + # Encoders/decoders are registered permanently within the internal + # codec cache. If we destroy the corresponding modules their + # globals will be set to None which will trip up the cached functions. + encodings = [(k, v) for k, v in sys.modules.items() + if k.startswith('encodings.')] + sys.modules.clear() + sys.modules.update(encodings) + # XXX: This kind of problem can affect more than just encodings. In + # particular extension modules (such as _ssl) don't cope with reloading + # properly. Really, test modules should be cleaning out the test + # specific modules they know they added (ala test_runpy) rather than + # relying on this function (as test_importhooks and test_pkg do + # currently). Implicitly imported *real* modules should be left alone + # (see issue 10556). + sys.modules.update(oldmodules) + + +try: + from importlib.machinery import ModuleSpec +except ImportError: + ModuleSpec = None # type: ignore + + +def create_package(file, path, is_package=True, contents=()): + class Reader(ResourceReader): + def get_resource_reader(self, package): + return self + + def open_resource(self, path): + self._path = path + if isinstance(file, Exception): + raise file + else: + return file + + def resource_path(self, path_): + self._path = path_ + if isinstance(path, Exception): + raise path + else: + return path + + def is_resource(self, path_): + self._path = path_ + if isinstance(path, Exception): + raise path + for entry in contents: + parts = entry.split('/') + if len(parts) == 1 and parts[0] == path_: + return True + return False + + def contents(self): + if isinstance(path, Exception): + raise path + # There's no yield from in baseball, er, Python 2. + for entry in contents: + yield entry + + name = 'testingpackage' + # Unforunately importlib.util.module_from_spec() was not introduced until + # Python 3.5. + module = types.ModuleType(name) + if ModuleSpec is None: + # Python 2. + module.__name__ = name + module.__file__ = 'does-not-exist' + if is_package: + module.__path__ = [] + else: + # Python 3. + loader = Reader() + spec = ModuleSpec( + name, loader, + origin='does-not-exist', + is_package=is_package) + module.__spec__ = spec + module.__loader__ = loader + return module + + +class CommonTests(ABC): + + @abc.abstractmethod + def execute(self, package, path): + raise NotImplementedError + + def test_package_name(self): + # Passing in the package name should succeed. + self.execute(data01.__name__, 'utf-8.file') + + def test_package_object(self): + # Passing in the package itself should succeed. + self.execute(data01, 'utf-8.file') + + def test_string_path(self): + # Passing in a string for the path should succeed. + path = 'utf-8.file' + self.execute(data01, path) + + @unittest.skipIf(sys.version_info < (3, 6), 'requires os.PathLike support') + def test_pathlib_path(self): + # Passing in a pathlib.PurePath object for the path should succeed. + path = PurePath('utf-8.file') + self.execute(data01, path) + + def test_absolute_path(self): + # An absolute path is a ValueError. + path = Path(__file__) + full_path = path.parent/'utf-8.file' + with self.assertRaises(ValueError): + self.execute(data01, full_path) + + def test_relative_path(self): + # A reative path is a ValueError. + with self.assertRaises(ValueError): + self.execute(data01, '../data01/utf-8.file') + + def test_importing_module_as_side_effect(self): + # The anchor package can already be imported. + del sys.modules[data01.__name__] + self.execute(data01.__name__, 'utf-8.file') + + def test_non_package_by_name(self): + # The anchor package cannot be a module. + with self.assertRaises(TypeError): + self.execute(__name__, 'utf-8.file') + + def test_non_package_by_package(self): + # The anchor package cannot be a module. + with self.assertRaises(TypeError): + module = sys.modules['importlib_resources.tests.util'] + self.execute(module, 'utf-8.file') + + @unittest.skipIf(sys.version_info < (3,), 'No ResourceReader in Python 2') + def test_resource_opener(self): + bytes_data = io.BytesIO(b'Hello, world!') + package = create_package(file=bytes_data, path=FileNotFoundError()) + self.execute(package, 'utf-8.file') + self.assertEqual(package.__loader__._path, 'utf-8.file') + + @unittest.skipIf(sys.version_info < (3,), 'No ResourceReader in Python 2') + def test_resource_path(self): + bytes_data = io.BytesIO(b'Hello, world!') + path = __file__ + package = create_package(file=bytes_data, path=path) + self.execute(package, 'utf-8.file') + self.assertEqual(package.__loader__._path, 'utf-8.file') + + def test_useless_loader(self): + package = create_package(file=FileNotFoundError(), + path=FileNotFoundError()) + with self.assertRaises(FileNotFoundError): + self.execute(package, 'utf-8.file') + + +class ZipSetupBase: + ZIP_MODULE = None + + @classmethod + def setUpClass(cls): + data_path = Path(cls.ZIP_MODULE.__file__) + data_dir = data_path.parent + cls._zip_path = str(data_dir / 'ziptestdata.zip') + sys.path.append(cls._zip_path) + cls.data = importlib.import_module('ziptestdata') + + @classmethod + def tearDownClass(cls): + try: + sys.path.remove(cls._zip_path) + except ValueError: + pass + + try: + del sys.path_importer_cache[cls._zip_path] + del sys.modules[cls.data.__name__] + except KeyError: + pass + + try: + del cls.data + del cls._zip_path + except AttributeError: + pass + + def setUp(self): + modules = modules_setup() + self.addCleanup(modules_cleanup, *modules) + + +class ZipSetup(ZipSetupBase): + ZIP_MODULE = zipdata01 # type: ignore diff --git a/pipenv/vendor/importlib_resources/tests/zipdata01/__init__.py b/pipenv/vendor/importlib_resources/tests/zipdata01/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pipenv/vendor/importlib_resources/tests/zipdata01/ziptestdata.zip b/pipenv/vendor/importlib_resources/tests/zipdata01/ziptestdata.zip new file mode 100644 index 0000000000000000000000000000000000000000..ddcccfb37368b3d80af8a5f5ebdd04c2a887c8c0 GIT binary patch literal 876 zcmWIWW@Zs#0D<m|9`DWFN;Wz`HVBIYaaCqPNosLPN@7W(erZXXuA!M;T4qk_|9>6~ zsSG(lm=9E{z!1t1!r;cB%V5Z02IQ3kMT>xPDGZ7XTnqsy7M%WZXcovi5EjE?fd$xn zkJOx;d>w_*5I0>5h4TEOoD@YaRC9n@;Q-yC@$s2?nI-Y@dIgoJnuObScC!GrfiSwJ zq|Cg;qDrvs42(?7s2br`0U0Vlj2tY*rAaB7MXAXp`9+obL|Kj31PpgDGKnzbjv$!J zwlsoR$nHXlt^jXTL(n4uVMrntLm*LtZU#&bhz5lv3~Xs!31lKW1SL#R9fBSq2t)3n x8G;ln_*?=CS{T^UD9(uL65LUR+gYe4M&dIO#f_|NAa}3<;dY>&`9RFT0010b%mDxZ literal 0 HcmV?d00001 diff --git a/pipenv/vendor/importlib_resources/tests/zipdata02/__init__.py b/pipenv/vendor/importlib_resources/tests/zipdata02/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pipenv/vendor/importlib_resources/tests/zipdata02/ziptestdata.zip b/pipenv/vendor/importlib_resources/tests/zipdata02/ziptestdata.zip new file mode 100644 index 0000000000000000000000000000000000000000..93f4ede5e9573b0c64b0a6f7727c8fa3ed318d12 GIT binary patch literal 698 zcmWIWW@Zs#0D<m|9&a!MN{9pLs?36t)Z&tq#F9k)`1s7c%#!$cy@JXBoSLN3H09@| z;?{Wf=bx3lKn)-)i$!BmYH@yPQF5xGUP(m>&?E(z02lENC@Ig!?SR;Polaa10BMBT zX9RKpPypEhj7%cTxC0L88!*_?2%>NXBW^=L0SN<J8cl&rWJ55571bd0fJGRThs7Y6 zFTuea;Eie&Oe=^+b<;L9qcB1m)gbh+h8eu2@jVuUU~U43e1JDA8%Q}T5au#6Fqi`| G0|NkCX`<Z# literal 0 HcmV?d00001 diff --git a/pipenv/vendor/importlib_resources/trees.py b/pipenv/vendor/importlib_resources/trees.py new file mode 100644 index 0000000000..ba42bb55b7 --- /dev/null +++ b/pipenv/vendor/importlib_resources/trees.py @@ -0,0 +1,6 @@ +# for compatibility with 1.1, continue to expose as_file here. + +from ._common import as_file + + +__all__ = ['as_file'] From ed8ae212c7bb66b93ec53d717cf091fb1b5e8946 Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Tue, 31 Mar 2020 10:46:50 -0400 Subject: [PATCH 14/49] Fix tomlkit functools32 import Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- pipenv/vendor/tomlkit/toml_char.py | 2 +- tasks/vendoring/patches/vendor/tomlkit-fix.patch | 13 +++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/pipenv/vendor/tomlkit/toml_char.py b/pipenv/vendor/tomlkit/toml_char.py index d649a917c1..02c5517289 100644 --- a/pipenv/vendor/tomlkit/toml_char.py +++ b/pipenv/vendor/tomlkit/toml_char.py @@ -4,7 +4,7 @@ from ._compat import unicode if PY2: - from functools32 import lru_cache + from pipenv.vendor.backports.functools_lru_cache import lru_cache else: from functools import lru_cache diff --git a/tasks/vendoring/patches/vendor/tomlkit-fix.patch b/tasks/vendoring/patches/vendor/tomlkit-fix.patch index 2785f99e15..c8101cde7c 100644 --- a/tasks/vendoring/patches/vendor/tomlkit-fix.patch +++ b/tasks/vendoring/patches/vendor/tomlkit-fix.patch @@ -148,3 +148,16 @@ index 3b416664..631e9959 100644 from .api import loads from .toml_document import TOMLDocument +diff --git a/pipenv/vendor/tomlkit/toml_char.py b/pipenv/vendor/tomlkit/toml_char.py +index d649a917..02c55172 100644 +--- a/pipenv/vendor/tomlkit/toml_char.py ++++ b/pipenv/vendor/tomlkit/toml_char.py +@@ -4,7 +4,7 @@ from ._compat import PY2 + from ._compat import unicode + + if PY2: +- from functools32 import lru_cache ++ from pipenv.vendor.backports.functools_lru_cache import lru_cache + else: + from functools import lru_cache + From feed2dd3edad92e5d3c48b551532df7a48186e7b Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Tue, 31 Mar 2020 11:07:59 -0400 Subject: [PATCH 15/49] Update requirementslib - Update requirementslib with URL fixes Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- pipenv/vendor/requirementslib/models/url.py | 28 ++++++++++++++++----- pipenv/vendor/vendor.txt | 2 +- 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/pipenv/vendor/requirementslib/models/url.py b/pipenv/vendor/requirementslib/models/url.py index 4f8c010117..200eba698a 100644 --- a/pipenv/vendor/requirementslib/models/url.py +++ b/pipenv/vendor/requirementslib/models/url.py @@ -10,6 +10,7 @@ from urllib3.util.url import Url from ..environment import MYPY_RUNNING +from .utils import extras_to_string, parse_extras if MYPY_RUNNING: from typing import Dict, List, Optional, Text, Tuple, TypeVar, Union @@ -274,7 +275,12 @@ def to_string( direct = self.is_direct_url if escape_password: password = "----" if self.password else "" - username = self.get_username(unquote=unquote) if password else "----" + if password: + username = self.get_username(unquote=unquote) + elif self.username: + username = "----" + else: + username = "" else: password = self.get_password(unquote=unquote) username = self.get_username(unquote=unquote) @@ -456,14 +462,22 @@ def update_url_name_and_fragment(name_with_extras, ref, parsed_dict): # type: (Optional[str], Optional[str], Dict[str, Optional[str]]) -> Dict[str, Optional[str]] if name_with_extras: fragment = "" # type: Optional[str] + parsed_extras = () + name, extras = pip_shims.shims._strip_extras(name_with_extras) + if extras: + parsed_extras = parsed_extras + tuple(parse_extras(extras)) if parsed_dict["fragment"] is not None: fragment = "{0}".format(parsed_dict["fragment"]) if fragment.startswith("egg="): - name, extras = pip_shims.shims._strip_extras(name_with_extras) - fragment_name, fragment_extras = pip_shims.shims._strip_extras(fragment) - if fragment_extras and not extras: - name_with_extras = "{0}{1}".format(name, fragment_extras) - fragment = "" + _, _, fragment_part = fragment.partition("=") + fragment_name, fragment_extras = pip_shims.shims._strip_extras( + fragment_part + ) + name = name if name else fragment_name + if fragment_extras: + parsed_extras = parsed_extras + tuple(parse_extras(fragment_extras)) + name_with_extras = "{0}{1}".format(name, extras_to_string(parsed_extras)) + parsed_dict["fragment"] = "egg={0}".format(name_with_extras) elif ( parsed_dict.get("path") is not None and "&subdirectory" in parsed_dict["path"] ): @@ -471,6 +485,8 @@ def update_url_name_and_fragment(name_with_extras, ref, parsed_dict): parsed_dict["path"] = path elif ref is not None and "&subdirectory" in ref: ref, fragment = URI.parse_subdirectory(ref) + parsed_dict["name"] = name + parsed_dict["extras"] = parsed_extras if ref: parsed_dict["ref"] = ref.strip() return parsed_dict diff --git a/pipenv/vendor/vendor.txt b/pipenv/vendor/vendor.txt index 27caa4d30d..a895e5011b 100644 --- a/pipenv/vendor/vendor.txt +++ b/pipenv/vendor/vendor.txt @@ -26,7 +26,7 @@ requests==2.23.0 idna==2.9 urllib3==1.25.8 certifi==2019.11.28 -requirementslib==1.5.4 +requirementslib==1.5.5 attrs==19.3.0 distlib==0.3.0 packaging==20.3 From e03878d7c78bc6c9c9fb213595fe92ef1d6c5e4c Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Tue, 31 Mar 2020 13:11:32 -0400 Subject: [PATCH 16/49] Add news entry and fix lockfile - Drop pytest-tap - Update azure pipelines config - Borrow ramdisk configuration from pip - Fix pyinstaller ref for python 2 - Add 0-minute timeout and add github workflow - Add skip for pywin32 - Scale down to `-n auto` to reduce race conditions on windows - Skip pywin32 on python 3.8 as the relevant dependencies aren't compatible - Use default pip exists action = ignore to work around VCS race condition - Create local temp directory to avoid crossing drive letter boundary on azure during CI runs - Monkeypatch click windows console detection to return False in CI Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- .azure-pipelines/scripts/New-Ramdisk.ps1 | 75 +++++++++++++++++ .azure-pipelines/steps/create-virtualenv.yml | 5 +- .../steps/install-dependencies.yml | 2 +- .azure-pipelines/steps/run-tests-linux.yml | 3 +- .azure-pipelines/steps/run-tests-windows.yml | 63 ++++++++++---- .azure-pipelines/steps/run-tests.yml | 16 ++-- .github/workflows/ci.yaml | 53 ++++++++++++ Pipfile.lock | 48 +++++------ azure-pipelines.yml | 82 ++++++++++++++----- news/4169.vendor.rst | 50 +++++++++++ pipenv/core.py | 5 +- pipenv/vendor/vendor.txt | 2 +- setup.py | 2 +- tasks/vendoring/__init__.py | 37 +++++++++ .../patches/vendor/tomlkit-fix.patch | 15 +--- tests/integration/conftest.py | 23 +++++- tests/integration/test_install_basic.py | 22 ++++- tests/integration/test_install_markers.py | 4 +- tests/integration/test_install_twists.py | 19 +++-- tests/integration/test_install_uri.py | 11 ++- tests/integration/test_lock.py | 5 +- tests/integration/test_pipenv.py | 1 - tests/integration/test_sync.py | 1 + tests/integration/test_uninstall.py | 11 +-- tests/integration/test_windows.py | 2 + tests/unit/test_core.py | 24 ++++-- tests/unit/test_help.py | 6 ++ tests/unit/test_patched.py | 1 + tests/unit/test_utils.py | 1 + tests/unit/test_utils_windows_executable.py | 1 + 30 files changed, 469 insertions(+), 121 deletions(-) create mode 100644 .azure-pipelines/scripts/New-Ramdisk.ps1 create mode 100644 .github/workflows/ci.yaml create mode 100644 news/4169.vendor.rst diff --git a/.azure-pipelines/scripts/New-Ramdisk.ps1 b/.azure-pipelines/scripts/New-Ramdisk.ps1 new file mode 100644 index 0000000000..fb068e47fe --- /dev/null +++ b/.azure-pipelines/scripts/New-Ramdisk.ps1 @@ -0,0 +1,75 @@ +# Taken from https://github.com/pypa/pip/blob/ceaf75b9ede9a9c25bcee84fe512fa6774889685/.azure-pipelines/scripts/New-RAMDisk.ps1 +[CmdletBinding()] +param( + [Parameter(Mandatory=$true, + HelpMessage="Drive letter to use for the RAMDisk")] + [String]$drive, + [Parameter(HelpMessage="Size to allocate to the RAMDisk")] + [UInt64]$size=1GB +) + +$ErrorActionPreference = "Stop" +Set-StrictMode -Version Latest + +Write-Output "Installing FS-iSCSITarget-Server" +Install-WindowsFeature -Name FS-iSCSITarget-Server + +Write-Output "Starting MSiSCSI" +Start-Service MSiSCSI +$retry = 10 +do { + $service = Get-Service MSiSCSI + if ($service.Status -eq "Running") { + break; + } + $retry-- + Start-Sleep -Milliseconds 500 +} until ($retry -eq 0) + +$service = Get-Service MSiSCSI +if ($service.Status -ne "Running") { + throw "MSiSCSI is not running" +} + +Write-Output "Configuring Firewall" +Get-NetFirewallServiceFilter -Service MSiSCSI | Enable-NetFirewallRule + +Write-Output "Configuring RAMDisk" +# Must use external-facing IP address, otherwise New-IscsiTargetPortal is +# unable to connect. +$ip = ( + Get-NetIPAddress -AddressFamily IPv4 | + Where-Object {$_.IPAddress -ne "127.0.0.1"} +)[0].IPAddress +if ( + -not (Get-IscsiServerTarget -ComputerName localhost | Where-Object {$_.TargetName -eq "ramdisks"}) +) { + New-IscsiServerTarget ` + -ComputerName localhost ` + -TargetName ramdisks ` + -InitiatorId IPAddress:$ip +} + +$newVirtualDisk = New-IscsiVirtualDisk ` + -ComputerName localhost ` + -Path ramdisk:local$drive.vhdx ` + -Size $size +Add-IscsiVirtualDiskTargetMapping ` + -ComputerName localhost ` + -TargetName ramdisks ` + -Path ramdisk:local$drive.vhdx + +Write-Output "Connecting to iSCSI" +New-IscsiTargetPortal -TargetPortalAddress $ip +Get-IscsiTarget | Where-Object {!$_.IsConnected} | Connect-IscsiTarget + +Write-Output "Configuring disk" +$newDisk = Get-IscsiConnection | + Get-Disk | + Where-Object {$_.SerialNumber -eq $newVirtualDisk.SerialNumber} + +Set-Disk -InputObject $newDisk -IsOffline $false +Initialize-Disk -InputObject $newDisk -PartitionStyle MBR +New-Partition -InputObject $newDisk -UseMaximumSize -DriveLetter $drive + +Format-Volume -DriveLetter $drive -NewFileSystemLabel Temp -FileSystem NTFS diff --git a/.azure-pipelines/steps/create-virtualenv.yml b/.azure-pipelines/steps/create-virtualenv.yml index 5f6160c4ba..3ad3a496c4 100644 --- a/.azure-pipelines/steps/create-virtualenv.yml +++ b/.azure-pipelines/steps/create-virtualenv.yml @@ -8,7 +8,7 @@ steps: echo "##vso[task.setvariable variable=PIP_PROCESS_DEPENDENCY_LINKS]1" displayName: Set Environment Variables -- ${{ if eq(parameters.vmImage, 'windows-2019') }}: +- ${{ if eq(parameters.vmImage, 'windows-latest') }}: - powershell: | pip install certifi $env:PYTHON_PATH=$(python -c "import sys; print(sys.executable)") @@ -18,7 +18,7 @@ steps: displayName: Set Python Path env: PYTHONWARNINGS: 'ignore:DEPRECATION' -- ${{ if ne(parameters.vmImage, 'windows-2019') }}: +- ${{ if ne(parameters.vmImage, 'windows-latest') }}: - bash: | pip install certifi PYTHON_PATH=$(python -c 'import sys; print(sys.executable)') @@ -36,6 +36,7 @@ steps: echo "python_version: ${{ parameters.python_version }}" git submodule sync git submodule update --init --recursive + $(PY_EXE) -m pip install "virtualenv<20" $(PY_EXE) -m pipenv install --deploy --dev --python="$(PY_EXE)" env: PIPENV_DEFAULT_PYTHON_VERSION: ${{ parameters.python_version }} diff --git a/.azure-pipelines/steps/install-dependencies.yml b/.azure-pipelines/steps/install-dependencies.yml index 79684d4a29..4e179d2a19 100644 --- a/.azure-pipelines/steps/install-dependencies.yml +++ b/.azure-pipelines/steps/install-dependencies.yml @@ -1,5 +1,5 @@ steps: -- script: 'python -m pip install --upgrade pip setuptools wheel -e .[dev,tests] --upgrade' +- script: 'python -m pip install --upgrade pip setuptools wheel --upgrade-strategy=eager && python -m pip install -e . --upgrade' displayName: Upgrade Pip & Install Pipenv env: PYTHONWARNINGS: 'ignore:DEPRECATION' diff --git a/.azure-pipelines/steps/run-tests-linux.yml b/.azure-pipelines/steps/run-tests-linux.yml index 185a83b21a..a786f8735b 100644 --- a/.azure-pipelines/steps/run-tests-linux.yml +++ b/.azure-pipelines/steps/run-tests-linux.yml @@ -6,9 +6,10 @@ steps: # Fix Git SSL errors echo "Using pipenv python version: $(PIPENV_DEFAULT_PYTHON_VERSION)" git submodule sync && git submodule update --init --recursive - pipenv run pytest --junitxml=test-results.xml + pipenv run pytest -n 4 --junitxml=test-results.xml displayName: Run integration tests env: PYTHONWARNINGS: ignore:DEPRECATION PIPENV_NOSPIN: '1' PIPENV_DEFAULT_PYTHON_VERSION: ${{ parameters.python_version }} + GIT_SSH_COMMAND: ssh -o StrictHostKeyChecking=accept-new -o CheckHostIP=no diff --git a/.azure-pipelines/steps/run-tests-windows.yml b/.azure-pipelines/steps/run-tests-windows.yml index 1730fa14b8..5fbb6cf105 100644 --- a/.azure-pipelines/steps/run-tests-windows.yml +++ b/.azure-pipelines/steps/run-tests-windows.yml @@ -1,21 +1,52 @@ parameters: python_version: '' + python_architecture: '' + pytest_markers: '' steps: -- powershell: | - subst T: "$env:TEMP" - Write-Host "##vso[task.setvariable variable=TEMP]T:\" - Write-Host "##vso[task.setvariable variable=TMP]T:\" - Write-Host "##vso[task.setvariable variable=PIPENV_DEFAULT_PYTHON_VERSION]$env:PYTHON_VERSION" - Write-Host "##vso[task.setvariable variable=PIPENV_NOSPIN]1" - displayName: Fix Temp Variable - env: - PYTHON_VERSION: ${{ parameters.python_version }} + - task: UsePythonVersion@0 + inputs: + versionSpec: ${{ parameters.python_version }} + architecture: ${{ parameters.python_architecture }} + addToPath: true + displayName: Use Python ${{ parameters.python_version }} -- script: | - git submodule sync && git submodule update --init --recursive - pipenv run pytest -ra --ignore=pipenv\patched --ignore=pipenv\vendor --junitxml=test-results.xml tests - displayName: Run integration tests - env: - PYTHONWARNINGS: 'ignore:DEPRECATION' - PIPENV_NOSPIN: '1' + - script: | + echo '##vso[task.setvariable variable=PIPENV_DEFAULT_PYTHON_VERSION]'${{ parameters.python_version }} + env: + PYTHON_VERSION: ${{ parameters.python_version }} + + - template: install-dependencies.yml + + - template: create-virtualenv.yml + parameters: + python_version: ${{ parameters.python_version }} + + - powershell: | + subst T: "$env:TEMP" + Write-Host "##vso[task.setvariable variable=TEMP]T:\" + Write-Host "##vso[task.setvariable variable=TMP]T:\" + Write-Host "##vso[task.setvariable variable=PIPENV_DEFAULT_PYTHON_VERSION]$env:PYTHON_VERSION" + Write-Host "##vso[task.setvariable variable=PIPENV_NOSPIN]1" + displayName: Fix Temp Variable + env: + PYTHON_VERSION: ${{ parameters.python_version }} + + - powershell: | + git submodule sync + git submodule update --init --recursive + Write-Host "Running Command: pipenv run pytest -n auto -m '${{ parameters.pytest_markers }}' --junitxml=test-results-${{ parameters.test_number }}.xml --timeout 300 tests/" + pipenv run pytest -n 4 -vvv -m '${{ parameters.pytest_markers }}' --junitxml=test-results.xml --timeout 300 tests/ + failOnStderr: false + displayName: Run integration tests + env: + PYTHONWARNINGS: 'ignore:DEPRECATION' + PIPENV_NOSPIN: '1' + GIT_SSH_COMMAND: ssh -o StrictHostKeyChecking=accept-new -o CheckHostIP=no + + - task: PublishTestResults@2 + displayName: Publish Test Results + inputs: + testResultsFiles: '**/test-results-${{ parameters.test_number }}.xml' + testRunTitle: 'Python ${{ parameters.python_version }}' + condition: succeededOrFailed() diff --git a/.azure-pipelines/steps/run-tests.yml b/.azure-pipelines/steps/run-tests.yml index 011cecf283..e33a91004a 100644 --- a/.azure-pipelines/steps/run-tests.yml +++ b/.azure-pipelines/steps/run-tests.yml @@ -17,18 +17,18 @@ steps: parameters: python_version: $(python.version) -- ${{ if eq(parameters.vmImage, 'windows-2019') }}: +- ${{ if eq(parameters.vmImage, 'windows-latest') }}: - template: run-tests-windows.yml parameters: python_version: $(python.version) -- ${{ if ne(parameters.vmImage, 'windows-2019') }}: +- ${{ if ne(parameters.vmImage, 'windows-latest') }}: - template: run-tests-linux.yml parameters: python_version: $(python.version) -- task: PublishTestResults@2 - displayName: Publish Test Results - inputs: - testResultsFiles: '**/test-results.xml' - testRunTitle: 'Python $(python.version)' - condition: succeededOrFailed() + - task: PublishTestResults@2 + displayName: Publish Test Results + inputs: + testResultsFiles: '**/test-results.xml' + testRunTitle: 'Python $(python.version)' + condition: succeededOrFailed() diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml new file mode 100644 index 0000000000..2b8ca39a42 --- /dev/null +++ b/.github/workflows/ci.yaml @@ -0,0 +1,53 @@ +name: pipenv_ci + +on: [push, pull_request] + +jobs: + build: + name: pipenv CI python ${{ matrix.python-version }} on ${{matrix.os}} + runs-on: ${{ matrix.os }} + strategy: + matrix: + python-version: [2.7, 3.6, 3.7, 3.8] + os: [macOS-latest, ubuntu-latest, windows-latest] + + steps: + - uses: actions/checkout@v1 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python-version }} + + - name: Get python path + id: python-path + run: | + echo ::set-output name=path::$(python -c "import sys; print(sys.executable)") + + - name: Install latest pip, setuptools, wheel + run: | + python -m pip install --upgrade pip setuptools wheel virtualenv --upgrade-strategy=eager + - name: Install dependencies + env: + PIPENV_DEFAULT_PYTHON_VERSION: ${{ matrix.python-version }} + PYTHONWARNINGS: ignore:DEPRECATION + PYTHONIOENCODING: 'utf-8' + GIT_ASK_YESNO: 'false' + run: | + git submodule sync + git submodule update --init --recursive + python -m pip install -e . --upgrade + python -m pip install "virtualenv<20" + pipenv install --deploy --dev --python=${{ steps.python-path.outputs.path }} + - name: Run tests + env: + PIPENV_DEFAULT_PYTHON_VERSION: ${{ matrix.python-version }} + PYTHONWARNINGS: ignore:DEPRECATION + PIPENV_NOSPIN: '1' + CI: '1' + GIT_ASK_YESNO: 'false' + PYPI_VENDOR_DIR: './tests/pypi/' + PYTHONIOENCODING: 'utf-8' + GIT_SSH_COMMAND: ssh -o StrictHostKeyChecking=accept-new -o CheckHostIP=no + run: | + pipenv run pytest -ra -n 4 --timeout 300 tests diff --git a/Pipfile.lock b/Pipfile.lock index 69e4612846..466065f277 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -253,7 +253,7 @@ "sha256:c3858660960c984d6ab0ebad691265180da2b43f07e061c0f8dca9ef3cffd328", "sha256:cce6a7477ed816bd2542d03d53db9f0db935dd013b70f336a95c73979289f248" ], - "markers": "python_version < '3.0'", + "markers": "python_version < '3'", "version": "==1.1.10" }, "execnet": { @@ -344,11 +344,11 @@ }, "importlib-metadata": { "hashes": [ - "sha256:298a914c82144c6b3b06c568a8973b89ad2176685f43cd1ea9ba968307300fa9", - "sha256:dfc83688553a91a786c6c91eeb5f3b1d31f24d71877bbd94ecbf5484e57690a2" + "sha256:2a688cbaa90e0cc587f1df48bdc97a6eadccdcd9c35fb3f976a09e3b5016d90f", + "sha256:34513a8a0c4962bc66d35b359558fd8a5e10cd472d37aec5f66858addef32c1e" ], "markers": "python_version < '3.8'", - "version": "==1.5.2" + "version": "==1.6.0" }, "importlib-resources": { "hashes": [ @@ -616,12 +616,12 @@ "editable": true, "path": "./tests/pytest-pypi" }, - "pytest-tap": { + "pytest-timeout": { "hashes": [ - "sha256:7de72c291dfc8de944a137366acd1e5877e21029868bd536dedaa8a61af7d2b4", - "sha256:87503e7496f9f5505aa603fc6a7b48cf224e9f6be0206958b1ee276810a2fe8a" + "sha256:80faa19cd245a42b87a51699d640c00d937c02b749052bfca6bae8bdbe12c48e", + "sha256:95ca727d4a1dace6ec5f0534d2940b8417ff8b782f7eef0ea09240bdd94d95c2" ], - "version": "==3.1" + "version": "==1.3.4" }, "pytest-xdist": { "hashes": [ @@ -780,13 +780,6 @@ "markers": "sys_platform == 'linux'", "version": "==0.9.0" }, - "tap.py": { - "hashes": [ - "sha256:a598bfaa2e224d71f2e86147c2ef822c18ff2e1b8ef006397e5056b08f92f699", - "sha256:f5eeeeebfd64e53d32661752bb4c288589a3babbb96db3f391a4ec29f1359c70" - ], - "version": "==3.0" - }, "termcolor": { "hashes": [ "sha256:1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b" @@ -810,11 +803,11 @@ }, "tqdm": { "hashes": [ - "sha256:0d8b5afb66e23d80433102e9bd8b5c8b65d34c2a2255b2de58d97bd2ea8170fd", - "sha256:f35fb121bafa030bd94e74fcfd44f3c2830039a2ddef7fc87ef1c2d205237b24" + "sha256:03d2366c64d44c7f61e74c700d9b202d57e9efe355ea5c28814c52bfe7a50b8c", + "sha256:be5ddeec77d78ba781ea41eacb2358a77f74cc2407f54b82222d7ee7dc8c8ccf" ], "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==4.43.0" + "version": "==4.44.1" }, "twine": { "hashes": [ @@ -848,6 +841,7 @@ "sha256:fc0fea399acb12edbf8a628ba8d2312f583bdbdb3335635db062fa98cf71fca4", "sha256:fe460b922ec15dd205595c9b5b99e2f056fd98ae8f9f56b888e7a17dc2b757e7" ], + "markers": "python_version >= '3.4'", "version": "==1.4.1" }, "typing": { @@ -869,19 +863,19 @@ }, "virtualenv": { "hashes": [ - "sha256:6f4c2882a943d20714076679f8dcc5675e953d6c29bfea3bc5d08bb6cdea5d36", - "sha256:cb1dab893f9e39b3e68d9118c555dcd86526d531c128c3f72e1551939723b72f" + "sha256:4e399f48c6b71228bf79f5febd27e3bbb753d9d5905776a86667bc61ab628a25", + "sha256:9e81279f4a9d16d1c0654a127c2c86e5bca2073585341691882c1e66e31ef8a5" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==20.0.14" + "version": "==20.0.15" }, "virtualenv-clone": { "hashes": [ - "sha256:532f789a5c88adf339506e3ca03326f20ee82fd08ee5586b44dc859b5b4468c5", - "sha256:c88ae171a11b087ea2513f260cdac9232461d8e9369bcd1dc143fc399d220557" + "sha256:07e74418b7cc64f4fda987bf5bc71ebd59af27a7bc9e8a8ee9fd54b1f2390a27", + "sha256:665e48dd54c84b98b71a657acb49104c54e7652bce9c1c4f6c6976ed4c827a29" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==0.5.3" + "version": "==0.5.4" }, "wcwidth": { "hashes": [ @@ -899,11 +893,11 @@ }, "werkzeug": { "hashes": [ - "sha256:169ba8a33788476292d04186ab33b01d6add475033dfc07215e6d219cc077096", - "sha256:6dc65cf9091cf750012f56f2cad759fa9e879f511b5ff8685e456b4e3bf90d16" + "sha256:2de2a5db0baeae7b2d2664949077c2ac63fbd16d98da0ff71837f7d1dea3fd43", + "sha256:6c80b1e5ad3665290ea39320b91e1be1e0d5f60652b964a3070216de83d2e47c" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==1.0.0" + "version": "==1.0.1" }, "zipp": { "hashes": [ diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 636ea71381..4f682d3b20 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -6,10 +6,10 @@ trigger: - master paths: exclude: - - docs/ - - news/ - - peeps/ - - examples/ + - docs/* + - news/* + - peeps/* + - examples/* - pytest.ini - README.md - pipenv/*.txt @@ -26,7 +26,7 @@ variables: jobs: - job: TestLinux pool: - vmImage: 'Ubuntu-16.04' + vmImage: 'Ubuntu-latest' strategy: matrix: Python27: @@ -38,37 +38,41 @@ jobs: Python37: python.version: '3.7' python.architecture: x64 - maxParallel: 4 + Python38: + python.version: '3.8' + python.architecture: x64 + maxParallel: 8 steps: - template: .azure-pipelines/steps/run-tests.yml parameters: - vmImage: 'Ubuntu-16.04' + vmImage: 'Ubuntu-latest' - job: TestVendoring pool: - vmImage: 'Ubuntu-16.04' + vmImage: 'Ubuntu-latest' variables: python.version: '3.7' python.architecture: x64 steps: - template: .azure-pipelines/steps/run-vendor-scripts.yml parameters: - vmImage: 'Ubuntu-16.04' + vmImage: 'Ubuntu-latest' - job: TestPackaging pool: - vmImage: 'Ubuntu-16.04' + vmImage: 'Ubuntu-latest' variables: python.version: '3.7' python.architecture: x64 steps: - template: .azure-pipelines/steps/build-package.yml parameters: - vmImage: 'Ubuntu-16.04' + vmImage: 'Ubuntu-latest' -- job: TestWindows +- job: TestWindows1 + timeoutInMinutes: 0 pool: - vmImage: windows-2019 + vmImage: windows-latest strategy: matrix: Python27: @@ -80,15 +84,50 @@ jobs: Python37: python.version: '3.7' python.architecture: x64 - maxParallel: 4 + Python38: + python.version: '3.8' + python.architecture: x64 + maxParallel: 8 steps: - - template: .azure-pipelines/steps/run-tests.yml - parameters: - vmImage: windows-2019 + - template: .azure-pipelines/steps/run-tests-windows.yml + parameters: + vmImage: windows-latest + python_version: $(python.version) + python_architecture: $(python.architecture) + test_number: "1" + pytest_markers: "lock or dotvenv or markers or project or utils or patched or core or cli" + +- job: TestWindows2 + timeoutInMinutes: 0 + pool: + vmImage: windows-latest + strategy: + matrix: + Python27: + python.version: '2.7' + python.architecture: x64 + Python36: + python.version: '3.6' + python.architecture: x64 + Python37: + python.version: '3.7' + python.architecture: x64 + Python38: + python.version: '3.8' + python.architecture: x64 + maxParallel: 8 + steps: + - template: .azure-pipelines/steps/run-tests-windows.yml + parameters: + vmImage: windows-latest + python_version: $(python.version) + python_architecture: $(python.architecture) + test_number: "2" + pytest_markers: "urls or multiprocessing or local or sequential or run or outdated or basic or code or uninstall" - job: TestMacOS pool: - vmImage: macOS-10.13 + vmImage: macOS-latest strategy: matrix: Python27: @@ -100,8 +139,11 @@ jobs: Python37: python.version: '3.7' python.architecture: x64 - maxParallel: 4 + Python38: + python.version: '3.8' + python.architecture: x64 + maxParallel: 8 steps: - template: .azure-pipelines/steps/run-tests.yml parameters: - vmImage: macOS-10.13 + vmImage: macOS-latest diff --git a/news/4169.vendor.rst b/news/4169.vendor.rst new file mode 100644 index 0000000000..d128f56cbc --- /dev/null +++ b/news/4169.vendor.rst @@ -0,0 +1,50 @@ +Update vendored dependencies and invocations + +- Update vendored and patched dependencies + - Update patches on `piptools`, `pip`, `pip-shims`, `tomlkit` +- Fix invocations of dependencies + - Fix custom `InstallCommand` instantiation + - Update `PackageFinder` usage + - Fix `Bool` stringify attempts from `tomlkit` + +Updated vendored dependencies: + - **attrs**: ``18.2.0`` => ``19.1.0`` + - **certifi**: ``2018.10.15`` => ``2019.3.9`` + - **cached_property**: ``1.4.3`` => ``1.5.1`` + - **cerberus**: ``1.2.0`` => ``1.3.1`` + - **click**: ``7.0.0`` => ``7.1.1`` + - **click-completion**: ``0.5.0`` => ``0.5.1`` + - **colorama**: ``0.3.9`` => ``0.4.3`` + - **contextlib2**: ``(new)`` => ``0.6.0.post1`` + - **distlib**: ``0.2.8`` => ``0.2.9`` + - **funcsigs**: ``(new)`` => ``1.0.2`` + - **importlib_metadata** ``1.3.0`` => ``1.5.1`` + - **importlib-resources**: ``(new)`` => ``1.4.0`` + - **idna**: ``2.7`` => ``2.9`` + - **jinja2**: ``2.10.0`` => ``2.11.1`` + - **markupsafe**: ``1.0`` => ``1.1.1`` + - **more-itertools**: ``(new)`` => ``5.0.0`` + - **orderedmultidict**: ``(new)`` => ``1.0`` + - **packaging**: ``18.0`` => ``19.0`` + - **parse**: ``1.9.0`` => ``1.15.0`` + - **pathlib2**: ``2.3.2`` => ``2.3.3`` + - **pep517**: ``(new)`` => ``0.5.0`` + - **pexpect**: ``4.6.0`` => ``4.8.0`` + - **pip-shims**: ``0.2.0`` => ``0.5.1`` + - **pipdeptree**: ``0.13.0`` => ``0.13.2`` + - **pyparsing**: ``2.2.2`` => ``2.4.6`` + - **python-dotenv**: ``0.9.1`` => ``0.10.2`` + - **pythonfinder**: ``1.1.10`` => ``1.2.2`` + - **pytoml**: ``(new)`` => ``0.1.20`` + - **requests**: ``2.20.1`` => ``2.23.0`` + - **requirementslib**: ``1.3.3`` => ``1.5.4`` + - **scandir**: ``1.9.0`` => ``1.10.0`` + - **shellingham**: ``1.2.7`` => ``1.3.2`` + - **six**: ``1.11.0`` => ``1.14.0`` + - **tomlkit**: ``0.5.2`` => ``0.5.11`` + - **urllib3**: ``1.24`` => ``1.25.8`` + - **vistir**: ``0.3.0`` => ``0.5.0`` + - **yaspin**: ``0.14.0`` => ``0.14.3`` + - **zipp**: ``0.6.0`` + +- Removed vendored dependency **cursor**. diff --git a/pipenv/core.py b/pipenv/core.py index 9070dfce64..4171ff6f47 100644 --- a/pipenv/core.py +++ b/pipenv/core.py @@ -1317,7 +1317,8 @@ def get_pip_args( "no_use_pep517": [], "no_deps": ["--no-deps"], "selective_upgrade": [ - "--upgrade-strategy=only-if-needed", "--exists-action={0}".format(PIP_EXISTS_ACTION or "i") + "--upgrade-strategy=only-if-needed", + "--exists-action={0}".format(PIP_EXISTS_ACTION or "i") ], "src_dir": src_dir, } @@ -1329,6 +1330,8 @@ def get_pip_args( for key in arg_map.keys(): if key in locals() and locals().get(key): arg_set.extend(arg_map.get(key)) + elif key == "selective_upgrade" and not locals().get(key): + arg_set.append("--exists-action=i") return list(vistir.misc.dedup(arg_set)) diff --git a/pipenv/vendor/vendor.txt b/pipenv/vendor/vendor.txt index a895e5011b..dbd78deb83 100644 --- a/pipenv/vendor/vendor.txt +++ b/pipenv/vendor/vendor.txt @@ -32,7 +32,7 @@ requirementslib==1.5.5 packaging==20.3 pyparsing==2.4.6 git+https://github.com/sarugaku/plette.git@master#egg=plette - tomlkit==0.5.8 + tomlkit==0.5.11 shellingham==1.3.2 six==1.14.0 semver==2.9.0 diff --git a/setup.py b/setup.py index c3ee913bba..015e5586e4 100644 --- a/setup.py +++ b/setup.py @@ -42,7 +42,7 @@ "parver", "invoke", ], - "tests": ["pytest<5.0", "pytest-tap", "pytest-xdist", "flaky", "mock"], + "tests": ["pytest<5.0", "pytest-timeout", "pytest-xdist", "flaky", "mock"], } diff --git a/tasks/vendoring/__init__.py b/tasks/vendoring/__init__.py index 7b0a11f20a..dbcdac0bc2 100644 --- a/tasks/vendoring/__init__.py +++ b/tasks/vendoring/__init__.py @@ -702,6 +702,43 @@ def download_all_licenses(ctx, include_pip=False): update_pip_deps(ctx) +def unpin_file(contents): + requirements = [] + for line in contents.splitlines(): + if "==" in line: + line, _, _ = line.strip().partition("=") + if not line.startswith("#"): + requirements.append(line) + return "\n".join(sorted(requirements)) + + +def unpin_and_copy_requirements(ctx, requirement_file, name="requirements.txt"): + with TemporaryDirectory() as tempdir: + target = Path(tempdir.name).joinpath("requirements.txt") + contents = unpin_file(requirement_file.read_text()) + target.write_text(contents) + env = {"PIPENV_IGNORE_VIRTUALENVS": "1", "PIPENV_NOSPIN": "1", "PIPENV_PYTHON": "2.7"} + with ctx.cd(tempdir.name): + ctx.run("pipenv install -r {0}".format(target.as_posix()), env=env, hide=True) + result = ctx.run("pipenv lock -r", env=env, hide=True).stdout.strip() + ctx.run("pipenv --rm", env=env, hide=True) + result = list(sorted([line.strip() for line in result.splitlines()[1:]])) + new_requirements = requirement_file.parent.joinpath(name) + requirement_file.rename(requirement_file.parent.joinpath("{}.bak".format(name))) + new_requirements.write_text("\n".join(result)) + return result + + +@invoke.task +def unpin_and_update_vendored(ctx, vendor=True, patched=False): + if vendor: + vendor_file = _get_vendor_dir(ctx) / "vendor.txt" + unpin_and_copy_requirements(ctx, vendor_file, name="vendor.txt") + if patched: + patched_file = _get_patched_dir(ctx) / "patched.txt" + unpin_and_copy_requirements(ctx, patched_file, name="patched.txt") + + @invoke.task(name=TASK_NAME) def main(ctx, package=None): vendor_dir = _get_vendor_dir(ctx) diff --git a/tasks/vendoring/patches/vendor/tomlkit-fix.patch b/tasks/vendoring/patches/vendor/tomlkit-fix.patch index c8101cde7c..49931e1fb7 100644 --- a/tasks/vendoring/patches/vendor/tomlkit-fix.patch +++ b/tasks/vendoring/patches/vendor/tomlkit-fix.patch @@ -85,7 +85,7 @@ index 8399d0c3..68c47a6d 100644 if PY2: + from pipenv.vendor.backports.enum import Enum - from functools32 import lru_cache + from pipenv.vendor.backports.functools_lru_cache import lru_cache else: + from enum import Enum from functools import lru_cache @@ -148,16 +148,3 @@ index 3b416664..631e9959 100644 from .api import loads from .toml_document import TOMLDocument -diff --git a/pipenv/vendor/tomlkit/toml_char.py b/pipenv/vendor/tomlkit/toml_char.py -index d649a917..02c55172 100644 ---- a/pipenv/vendor/tomlkit/toml_char.py -+++ b/pipenv/vendor/tomlkit/toml_char.py -@@ -4,7 +4,7 @@ from ._compat import PY2 - from ._compat import unicode - - if PY2: -- from functools32 import lru_cache -+ from pipenv.vendor.backports.functools_lru_cache import lru_cache - else: - from functools import lru_cache - diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 53973d348e..b6141162b6 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -69,7 +69,7 @@ def check_github_ssh(): # GitHub does not provide shell access.' if ssh keys are available and # registered with GitHub. Otherwise, the command will fail with # return_code=255 and say 'Permission denied (publickey).' - c = delegator.run('ssh -T git@github.com') + c = delegator.run('ssh -o StrictHostKeyChecking=accept-new -o CheckHostIP=no -T git@github.com', timeout=30) res = True if c.return_code == 1 else False except KeyboardInterrupt: warnings.warn( @@ -115,6 +115,10 @@ def pytest_runtest_setup(item): sys.version_info[:2] <= (2, 7) and os.name == "nt" ): pytest.skip('must use python > 2.7 on windows') + if item.get_closest_marker('skip_py38') is not None and ( + sys.version_info[:2] == (3, 8) + ): + pytest.skip('test not applicable on python 3.8') if item.get_closest_marker('py3_only') is not None and ( sys.version_info < (3, 0) ): @@ -151,6 +155,22 @@ def vistir_tmpdir(): yield Path(temp_path) +@pytest.fixture() +def local_tempdir(request): + old_temp = os.environ.get("TEMP", "") + new_temp = Path(os.getcwd()).absolute() / "temp" + new_temp.mkdir(parents=True, exist_ok=True) + os.environ["TEMP"] = new_temp.as_posix() + + def finalize(): + os.environ['TEMP'] = fs_str(old_temp) + _rmtree_func(new_temp.as_posix()) + + request.addfinalizer(finalize) + with TemporaryDirectory(dir=new_temp.as_posix()) as temp_dir: + yield Path(temp_dir.name) + + @pytest.fixture(name='create_tmpdir') def vistir_tmpdir_factory(): @@ -183,6 +203,7 @@ def isolate(create_tmpdir): os.environ["GIT_CONFIG_NOSYSTEM"] = fs_str("1") os.environ["GIT_AUTHOR_NAME"] = fs_str("pipenv") os.environ["GIT_AUTHOR_EMAIL"] = fs_str("pipenv@pipenv.org") + os.environ["GIT_ASK_YESNO"] = fs_str("false") workon_home = create_tmpdir() os.environ["WORKON_HOME"] = fs_str(str(workon_home)) os.environ["HOME"] = home_dir diff --git a/tests/integration/test_install_basic.py b/tests/integration/test_install_basic.py index 88efd3ba88..9d3247fed8 100644 --- a/tests/integration/test_install_basic.py +++ b/tests/integration/test_install_basic.py @@ -11,8 +11,9 @@ from pipenv.vendor import delegator -@pytest.mark.install @pytest.mark.setup +@pytest.mark.basic +@pytest.mark.install def test_basic_setup(PipenvInstance): with PipenvInstance() as p: with PipenvInstance(pipfile=False) as p: @@ -28,6 +29,7 @@ def test_basic_setup(PipenvInstance): @flaky +@pytest.mark.basic @pytest.mark.install @pytest.mark.skip_osx def test_basic_install(PipenvInstance): @@ -43,6 +45,7 @@ def test_basic_install(PipenvInstance): @flaky +@pytest.mark.basic @pytest.mark.install def test_mirror_install(PipenvInstance): with temp_environ(), PipenvInstance(chdir=True) as p: @@ -69,6 +72,7 @@ def test_mirror_install(PipenvInstance): @flaky +@pytest.mark.basic @pytest.mark.install @pytest.mark.needs_internet def test_bad_mirror_install(PipenvInstance): @@ -111,6 +115,7 @@ def test_basic_dev_install(PipenvInstance): @flaky @pytest.mark.dev +@pytest.mark.basic @pytest.mark.install def test_install_without_dev(PipenvInstance): """Ensure that running `pipenv install` doesn't install dev packages""" @@ -137,6 +142,7 @@ def test_install_without_dev(PipenvInstance): @flaky +@pytest.mark.basic @pytest.mark.install def test_install_without_dev_section(PipenvInstance): with PipenvInstance() as p: @@ -157,6 +163,7 @@ def test_install_without_dev_section(PipenvInstance): @flaky +@pytest.mark.lock @pytest.mark.extras @pytest.mark.install def test_extras_install(PipenvInstance): @@ -175,6 +182,7 @@ def test_extras_install(PipenvInstance): @flaky @pytest.mark.pin +@pytest.mark.basic @pytest.mark.install def test_windows_pinned_pipfile(PipenvInstance): with PipenvInstance() as p: @@ -191,6 +199,7 @@ def test_windows_pinned_pipfile(PipenvInstance): @flaky +@pytest.mark.basic @pytest.mark.install @pytest.mark.resolver @pytest.mark.backup_resolver @@ -259,6 +268,7 @@ def test_outline_table_specifier(PipenvInstance): @pytest.mark.bad +@pytest.mark.basic @pytest.mark.install def test_bad_packages(PipenvInstance): with PipenvInstance() as p: @@ -266,6 +276,7 @@ def test_bad_packages(PipenvInstance): assert c.return_code > 0 +@pytest.mark.lock @pytest.mark.extras @pytest.mark.install @pytest.mark.requirements @@ -296,6 +307,7 @@ def test_requirements_to_pipfile(PipenvInstance): assert "pysocks" in p.lockfile["default"] +@pytest.mark.basic @pytest.mark.install @pytest.mark.skip_osx @pytest.mark.requirements @@ -335,6 +347,7 @@ def test_clean_on_empty_venv(PipenvInstance): assert c.return_code == 0 +@pytest.mark.basic @pytest.mark.install def test_install_does_not_extrapolate_environ(PipenvInstance): """Ensure environment variables are not expanded in lock file. @@ -366,6 +379,7 @@ def test_install_does_not_extrapolate_environ(PipenvInstance): assert p.lockfile["_meta"]["sources"][0]["url"] == "${PYPI_URL}/simple" +@pytest.mark.basic @pytest.mark.editable @pytest.mark.badparameter @pytest.mark.install @@ -376,6 +390,7 @@ def test_editable_no_args(PipenvInstance): assert "Error: -e option requires an argument" in c.err +@pytest.mark.basic @pytest.mark.install @pytest.mark.virtualenv def test_install_venv_project_directory(PipenvInstance): @@ -400,6 +415,7 @@ def test_install_venv_project_directory(PipenvInstance): assert venv_loc.joinpath(".project").exists() +@pytest.mark.cli @pytest.mark.deploy @pytest.mark.system def test_system_and_deploy_work(PipenvInstance): @@ -424,6 +440,7 @@ def test_system_and_deploy_work(PipenvInstance): assert c.return_code == 0 +@pytest.mark.basic @pytest.mark.install def test_install_creates_pipfile(PipenvInstance): with PipenvInstance(chdir=True) as p: @@ -437,6 +454,7 @@ def test_install_creates_pipfile(PipenvInstance): assert os.path.isfile(p.pipfile_path) +@pytest.mark.basic @pytest.mark.install def test_install_non_exist_dep(PipenvInstance): with PipenvInstance(chdir=True) as p: @@ -445,6 +463,7 @@ def test_install_non_exist_dep(PipenvInstance): assert "dateutil" not in p.pipfile["packages"] +@pytest.mark.basic @pytest.mark.install def test_install_package_with_dots(PipenvInstance): with PipenvInstance(chdir=True) as p: @@ -453,6 +472,7 @@ def test_install_package_with_dots(PipenvInstance): assert "backports.html" in p.pipfile["packages"] +@pytest.mark.basic @pytest.mark.install def test_rewrite_outline_table(PipenvInstance): with PipenvInstance(chdir=True) as p: diff --git a/tests/integration/test_install_markers.py b/tests/integration/test_install_markers.py index de3ba19369..80532ddf22 100644 --- a/tests/integration/test_install_markers.py +++ b/tests/integration/test_install_markers.py @@ -58,8 +58,8 @@ def test_platform_python_implementation_marker(PipenvInstance): @flaky -@pytest.mark.run @pytest.mark.alt +@pytest.mark.markers @pytest.mark.install def test_specific_package_environment_markers(PipenvInstance): @@ -127,7 +127,7 @@ def test_global_overrides_environment_markers(PipenvInstance): @flaky -@pytest.mark.lock +@pytest.mark.markers @pytest.mark.complex @pytest.mark.py3_only @pytest.mark.lte_py36 diff --git a/tests/integration/test_install_twists.py b/tests/integration/test_install_twists.py index 4fcbd4c5fc..907264467b 100644 --- a/tests/integration/test_install_twists.py +++ b/tests/integration/test_install_twists.py @@ -63,8 +63,8 @@ def test_local_extras_install(PipenvInstance): assert "six" in p.lockfile["default"] -@pytest.mark.install @pytest.mark.local +@pytest.mark.install @pytest.mark.needs_internet @flaky class TestDirectDependencies(object): @@ -122,6 +122,7 @@ def test_ssh_dependency_links_install(self, PipenvInstance): @pytest.mark.e +@pytest.mark.local @pytest.mark.install @pytest.mark.skip(reason="this doesn't work on windows") def test_e_dot(PipenvInstance, pip_src_dir): @@ -135,8 +136,8 @@ def test_e_dot(PipenvInstance, pip_src_dir): assert "path" in p.pipfile["dev-packages"][key] assert "requests" in p.lockfile["develop"] - @pytest.mark.install +@pytest.mark.multiprocessing @flaky def test_multiprocess_bug_and_install(PipenvInstance): with temp_environ(): @@ -163,8 +164,8 @@ def test_multiprocess_bug_and_install(PipenvInstance): assert c.return_code == 0 -@pytest.mark.sequential @pytest.mark.install +@pytest.mark.sequential @flaky def test_sequential_mode(PipenvInstance): @@ -189,8 +190,8 @@ def test_sequential_mode(PipenvInstance): assert c.return_code == 0 -@pytest.mark.install @pytest.mark.run +@pytest.mark.install def test_normalize_name_install(PipenvInstance): with PipenvInstance() as p: with open(p.pipfile_path, "w") as f: @@ -221,9 +222,10 @@ def test_normalize_name_install(PipenvInstance): @flaky +@pytest.mark.eggs @pytest.mark.files +@pytest.mark.local @pytest.mark.resolver -@pytest.mark.eggs def test_local_package(PipenvInstance, pip_src_dir, testsroot): """This test ensures that local packages (directories with a setup.py) installed in editable mode have their dependencies resolved as well""" @@ -248,6 +250,7 @@ def test_local_package(PipenvInstance, pip_src_dir, testsroot): @pytest.mark.files +@pytest.mark.local @flaky def test_local_zipfiles(PipenvInstance, testsroot): file_name = "requests-2.19.1.tar.gz" @@ -272,6 +275,7 @@ def test_local_zipfiles(PipenvInstance, testsroot): assert "file" in dep or "path" in dep +@pytest.mark.local @pytest.mark.files @flaky def test_relative_paths(PipenvInstance, testsroot): @@ -295,6 +299,7 @@ def test_relative_paths(PipenvInstance, testsroot): @pytest.mark.install +@pytest.mark.local @pytest.mark.local_file @flaky def test_install_local_file_collision(PipenvInstance): @@ -310,7 +315,7 @@ def test_install_local_file_collision(PipenvInstance): assert target_package in p.lockfile["default"] -@pytest.mark.url +@pytest.mark.urls @pytest.mark.install def test_install_local_uri_special_character(PipenvInstance, testsroot): file_name = "six-1.11.0+mkl-py2.py3-none-any.whl" @@ -334,9 +339,9 @@ def test_install_local_uri_special_character(PipenvInstance, testsroot): assert "six" in p.lockfile["default"] +@pytest.mark.run @pytest.mark.files @pytest.mark.install -@pytest.mark.run def test_multiple_editable_packages_should_not_race(PipenvInstance, testsroot): """Test for a race condition that can occur when installing multiple 'editable' packages at once, and which causes some of them to not be importable. diff --git a/tests/integration/test_install_uri.py b/tests/integration/test_install_uri.py index b71df9651c..8772df54ff 100644 --- a/tests/integration/test_install_uri.py +++ b/tests/integration/test_install_uri.py @@ -123,6 +123,7 @@ def test_local_vcs_urls_work(PipenvInstance, tmpdir): @pytest.mark.e @pytest.mark.vcs +@pytest.mark.urls @pytest.mark.install @pytest.mark.needs_internet def test_editable_vcs_install(PipenvInstance_NoPyPI): @@ -142,7 +143,7 @@ def test_editable_vcs_install(PipenvInstance_NoPyPI): @pytest.mark.vcs -@pytest.mark.tablib +@pytest.mark.urls @pytest.mark.install @pytest.mark.needs_internet def test_install_editable_git_tag(PipenvInstance_NoPyPI): @@ -163,6 +164,7 @@ def test_install_editable_git_tag(PipenvInstance_NoPyPI): assert "ref" in p.lockfile["default"]["six"] +@pytest.mark.urls @pytest.mark.index @pytest.mark.install @pytest.mark.needs_internet @@ -191,6 +193,7 @@ def test_install_named_index_alias(PipenvInstance_NoPyPI): @pytest.mark.vcs +@pytest.mark.urls @pytest.mark.install @pytest.mark.needs_internet def test_install_local_vcs_not_in_lockfile(PipenvInstance): @@ -207,6 +210,7 @@ def test_install_local_vcs_not_in_lockfile(PipenvInstance): @pytest.mark.vcs +@pytest.mark.urls @pytest.mark.install @pytest.mark.needs_internet def test_get_vcs_refs(PipenvInstance_NoPyPI): @@ -235,9 +239,11 @@ def test_get_vcs_refs(PipenvInstance_NoPyPI): @pytest.mark.vcs +@pytest.mark.urls @pytest.mark.install @pytest.mark.needs_internet @pytest.mark.skip_py27_win +@pytest.mark.skip_py38 def test_vcs_entry_supersedes_non_vcs(PipenvInstance): """See issue #2181 -- non-editable VCS dep was specified, but not showing up in the lockfile -- due to not running pip install before locking and not locking @@ -256,7 +262,7 @@ def test_vcs_entry_supersedes_non_vcs(PipenvInstance): [packages] PyUpdater = "*" -PyInstaller = {{ref = "develop", git = "{0}"}} +PyInstaller = {{ref = "v3.6", git = "{0}"}} """.format(pyinstaller_uri).strip() ) c = p.pipenv("install") @@ -273,6 +279,7 @@ def test_vcs_entry_supersedes_non_vcs(PipenvInstance): @pytest.mark.vcs +@pytest.mark.urls @pytest.mark.install @pytest.mark.needs_internet def test_vcs_can_use_markers(PipenvInstance): diff --git a/tests/integration/test_lock.py b/tests/integration/test_lock.py index 7d207ccaa0..7b81e0be18 100644 --- a/tests/integration/test_lock.py +++ b/tests/integration/test_lock.py @@ -126,6 +126,7 @@ def test_keep_outdated_doesnt_upgrade_pipfile_pins(PipenvInstance): assert p.lockfile["default"]["urllib3"]["version"] == "==1.21.1" +@pytest.mark.lock def test_keep_outdated_keeps_markers_not_removed(PipenvInstance): with PipenvInstance(chdir=True) as p: c = p.pipenv("install six click") @@ -164,10 +165,10 @@ def test_keep_outdated_doesnt_update_satisfied_constraints(PipenvInstance): @pytest.mark.lock @pytest.mark.complex @pytest.mark.needs_internet -def test_complex_lock_with_vcs_deps(PipenvInstance, pip_src_dir): +def test_complex_lock_with_vcs_deps(local_tempdir, PipenvInstance, pip_src_dir): # This uses the real PyPI since we need Internet to access the Git # dependency anyway. - with PipenvInstance() as p: + with PipenvInstance() as p, local_tempdir: with open(p.pipfile_path, 'w') as f: contents = """ [packages] diff --git a/tests/integration/test_pipenv.py b/tests/integration/test_pipenv.py index 12ae2348bd..5e172f8b24 100644 --- a/tests/integration/test_pipenv.py +++ b/tests/integration/test_pipenv.py @@ -27,7 +27,6 @@ def test_code_import_manual(PipenvInstance): @pytest.mark.lock @pytest.mark.deploy -@pytest.mark.cli def test_deploy_works(PipenvInstance): with PipenvInstance(chdir=True) as p: diff --git a/tests/integration/test_sync.py b/tests/integration/test_sync.py index d085aaf421..1c5b3e9612 100644 --- a/tests/integration/test_sync.py +++ b/tests/integration/test_sync.py @@ -8,6 +8,7 @@ from pipenv.utils import temp_environ +@pytest.mark.lock @pytest.mark.sync def test_sync_error_without_lockfile(PipenvInstance): with PipenvInstance(chdir=True) as p: diff --git a/tests/integration/test_uninstall.py b/tests/integration/test_uninstall.py index bc82df8d94..c87c702f40 100644 --- a/tests/integration/test_uninstall.py +++ b/tests/integration/test_uninstall.py @@ -8,7 +8,6 @@ from pipenv.utils import temp_environ -@pytest.mark.run @pytest.mark.uninstall @pytest.mark.install def test_uninstall_requests(PipenvInstance): @@ -32,6 +31,7 @@ def test_uninstall_requests(PipenvInstance): assert c.return_code > 0 +@pytest.mark.uninstall def test_uninstall_django(PipenvInstance): with PipenvInstance() as p: c = p.pipenv("install Django==1.11.13") @@ -53,9 +53,8 @@ def test_uninstall_django(PipenvInstance): assert c.return_code > 0 -@pytest.mark.run -@pytest.mark.uninstall @pytest.mark.install +@pytest.mark.uninstall def test_mirror_uninstall(PipenvInstance): with temp_environ(), PipenvInstance(chdir=True) as p: @@ -94,8 +93,8 @@ def test_mirror_uninstall(PipenvInstance): @pytest.mark.files -@pytest.mark.uninstall @pytest.mark.install +@pytest.mark.uninstall def test_uninstall_all_local_files(PipenvInstance, testsroot): file_name = "tablib-0.12.1.tar.gz" # Not sure where travis/appveyor run tests from @@ -114,9 +113,8 @@ def test_uninstall_all_local_files(PipenvInstance, testsroot): assert "tablib" in p.pipfile["packages"] -@pytest.mark.run -@pytest.mark.uninstall @pytest.mark.install +@pytest.mark.uninstall def test_uninstall_all_dev(PipenvInstance): with PipenvInstance() as p: c = p.pipenv("install --dev Django==1.11.13 six") @@ -151,7 +149,6 @@ def test_uninstall_all_dev(PipenvInstance): @pytest.mark.uninstall -@pytest.mark.run def test_normalize_name_uninstall(PipenvInstance): with PipenvInstance() as p: with open(p.pipfile_path, "w") as f: diff --git a/tests/integration/test_windows.py b/tests/integration/test_windows.py index a74be38671..7355bbd0ef 100644 --- a/tests/integration/test_windows.py +++ b/tests/integration/test_windows.py @@ -39,6 +39,7 @@ def test_case_changes_windows(PipenvInstance): @pytest.mark.files +@pytest.mark.local def test_local_path_windows(PipenvInstance): whl = ( Path(__file__).parent.parent @@ -53,6 +54,7 @@ def test_local_path_windows(PipenvInstance): assert c.return_code == 0 +@pytest.mark.local @pytest.mark.files def test_local_path_windows_forward_slash(PipenvInstance): whl = ( diff --git a/tests/unit/test_core.py b/tests/unit/test_core.py index 61d318c368..eb79985001 100644 --- a/tests/unit/test_core.py +++ b/tests/unit/test_core.py @@ -20,8 +20,12 @@ def test_suppress_nested_venv_warning(capsys): @pytest.mark.core -def test_load_dot_env_from_environment_variable_location(capsys): - with temp_environ(), TemporaryDirectory(prefix='pipenv-', suffix='') as tempdir: +def test_load_dot_env_from_environment_variable_location(monkeypatch, capsys): + with temp_environ(), monkeypatch.context() as m, TemporaryDirectory(prefix='pipenv-', suffix='') as tempdir: + if os.name == "nt": + import click + is_console = False + m.setattr(click._winconsole, "_is_console", lambda x: is_console) dotenv_path = os.path.join(tempdir.name, 'test.env') key, val = 'SOME_KEY', 'some_value' with open(dotenv_path, 'w') as f: @@ -33,8 +37,12 @@ def test_load_dot_env_from_environment_variable_location(capsys): @pytest.mark.core -def test_doesnt_load_dot_env_if_disabled(capsys): - with temp_environ(), TemporaryDirectory(prefix='pipenv-', suffix='') as tempdir: +def test_doesnt_load_dot_env_if_disabled(monkeypatch, capsys): + with temp_environ(), monkeypatch.context() as m, TemporaryDirectory(prefix='pipenv-', suffix='') as tempdir: + if os.name == "nt": + import click + is_console = False + m.setattr(click._winconsole, "_is_console", lambda x: is_console) dotenv_path = os.path.join(tempdir.name, 'test.env') key, val = 'SOME_KEY', 'some_value' with open(dotenv_path, 'w') as f: @@ -50,8 +58,12 @@ def test_doesnt_load_dot_env_if_disabled(capsys): @pytest.mark.core -def test_load_dot_env_warns_if_file_doesnt_exist(capsys): - with temp_environ(), TemporaryDirectory(prefix='pipenv-', suffix='') as tempdir: +def test_load_dot_env_warns_if_file_doesnt_exist(monkeypatch, capsys): + with temp_environ(), monkeypatch.context() as m, TemporaryDirectory(prefix='pipenv-', suffix='') as tempdir: + if os.name == "nt": + import click + is_console = False + m.setattr(click._winconsole, "_is_console", lambda x: is_console) dotenv_path = os.path.join(tempdir.name, 'does-not-exist.env') with mock.patch('pipenv.environments.PIPENV_DOTENV_LOCATION', dotenv_path): load_dot_env() diff --git a/tests/unit/test_help.py b/tests/unit/test_help.py index d37556018a..5179e4d3d1 100644 --- a/tests/unit/test_help.py +++ b/tests/unit/test_help.py @@ -2,7 +2,11 @@ import subprocess import sys +import pytest + +@pytest.mark.cli +@pytest.mark.help def test_help(): output = subprocess.check_output( [sys.executable, '-m', 'pipenv.help'], @@ -11,6 +15,8 @@ def test_help(): assert output +@pytest.mark.cli +@pytest.mark.help def test_count_of_description_pre_option(): test_command = 'pipenv install --help' test_line = '--pre Allow pre-releases.' diff --git a/tests/unit/test_patched.py b/tests/unit/test_patched.py index 03e1c0390a..f358191538 100644 --- a/tests/unit/test_patched.py +++ b/tests/unit/test_patched.py @@ -123,6 +123,7 @@ } +@pytest.mark.patched @pytest.mark.parametrize( 'scenarios,expected', list(get_extras_links_scenarios.values()), diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index f9dd08a8ef..4b9cd75ce6 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -425,6 +425,7 @@ def test_prepare_pip_source_args(self, sources, expected_args): == expected_args ) + @pytest.mark.utils def test_invalid_prepare_pip_source_args(self): sources = [{}] with pytest.raises(PipenvUsageError): diff --git a/tests/unit/test_utils_windows_executable.py b/tests/unit/test_utils_windows_executable.py index b74cfbf313..22f20b35e4 100644 --- a/tests/unit/test_utils_windows_executable.py +++ b/tests/unit/test_utils_windows_executable.py @@ -13,6 +13,7 @@ ) +@pytest.mark.utils @mock.patch('os.path.isfile') @mock.patch('pipenv.utils.find_executable') def test_find_windows_executable(mocked_find_executable, mocked_isfile): From a79c91f7e9f0458dfb9be20709cc4d0dd0b95c12 Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Fri, 3 Apr 2020 15:33:25 -0400 Subject: [PATCH 17/49] Update azure templates Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- .azure-pipelines/steps/run-tests-windows.yml | 12 +---- .azure-pipelines/steps/run-tests.yml | 12 ++--- azure-pipelines.yml | 56 ++++++++++---------- 3 files changed, 35 insertions(+), 45 deletions(-) diff --git a/.azure-pipelines/steps/run-tests-windows.yml b/.azure-pipelines/steps/run-tests-windows.yml index 5fbb6cf105..1324947375 100644 --- a/.azure-pipelines/steps/run-tests-windows.yml +++ b/.azure-pipelines/steps/run-tests-windows.yml @@ -1,7 +1,6 @@ parameters: python_version: '' python_architecture: '' - pytest_markers: '' steps: - task: UsePythonVersion@0 @@ -35,18 +34,11 @@ steps: - powershell: | git submodule sync git submodule update --init --recursive - Write-Host "Running Command: pipenv run pytest -n auto -m '${{ parameters.pytest_markers }}' --junitxml=test-results-${{ parameters.test_number }}.xml --timeout 300 tests/" - pipenv run pytest -n 4 -vvv -m '${{ parameters.pytest_markers }}' --junitxml=test-results.xml --timeout 300 tests/ + Write-Host "Running Command: pipenv run pytest -n 4 --junitxml=test-results.xml --timeout 300 tests/" + pipenv run pytest -n 4 -vvv --junitxml=test-results.xml --timeout 300 tests/ failOnStderr: false displayName: Run integration tests env: PYTHONWARNINGS: 'ignore:DEPRECATION' PIPENV_NOSPIN: '1' GIT_SSH_COMMAND: ssh -o StrictHostKeyChecking=accept-new -o CheckHostIP=no - - - task: PublishTestResults@2 - displayName: Publish Test Results - inputs: - testResultsFiles: '**/test-results-${{ parameters.test_number }}.xml' - testRunTitle: 'Python ${{ parameters.python_version }}' - condition: succeededOrFailed() diff --git a/.azure-pipelines/steps/run-tests.yml b/.azure-pipelines/steps/run-tests.yml index e33a91004a..ce4b501659 100644 --- a/.azure-pipelines/steps/run-tests.yml +++ b/.azure-pipelines/steps/run-tests.yml @@ -26,9 +26,9 @@ steps: parameters: python_version: $(python.version) - - task: PublishTestResults@2 - displayName: Publish Test Results - inputs: - testResultsFiles: '**/test-results.xml' - testRunTitle: 'Python $(python.version)' - condition: succeededOrFailed() +- task: PublishTestResults@2 + displayName: Publish Test Results + inputs: + testResultsFiles: '**/test-results.xml' + testRunTitle: 'Python $(python.version)' + condition: succeededOrFailed() diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 4f682d3b20..a0f30c1896 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -94,36 +94,34 @@ jobs: vmImage: windows-latest python_version: $(python.version) python_architecture: $(python.architecture) - test_number: "1" - pytest_markers: "lock or dotvenv or markers or project or utils or patched or core or cli" + # pytest_markers: "lock or dotvenv or markers or project or utils or patched or core or cli" -- job: TestWindows2 - timeoutInMinutes: 0 - pool: - vmImage: windows-latest - strategy: - matrix: - Python27: - python.version: '2.7' - python.architecture: x64 - Python36: - python.version: '3.6' - python.architecture: x64 - Python37: - python.version: '3.7' - python.architecture: x64 - Python38: - python.version: '3.8' - python.architecture: x64 - maxParallel: 8 - steps: - - template: .azure-pipelines/steps/run-tests-windows.yml - parameters: - vmImage: windows-latest - python_version: $(python.version) - python_architecture: $(python.architecture) - test_number: "2" - pytest_markers: "urls or multiprocessing or local or sequential or run or outdated or basic or code or uninstall" +# - job: TestWindows2 +# timeoutInMinutes: 0 +# pool: +# vmImage: windows-latest +# strategy: +# matrix: +# Python27: +# python.version: '2.7' +# python.architecture: x64 +# Python36: +# python.version: '3.6' +# python.architecture: x64 +# Python37: +# python.version: '3.7' +# python.architecture: x64 +# Python38: +# python.version: '3.8' +# python.architecture: x64 +# maxParallel: 8 +# steps: +# - template: .azure-pipelines/steps/run-tests-windows.yml +# parameters: +# vmImage: windows-latest +# python_version: $(python.version) +# python_architecture: $(python.architecture) +# pytest_markers: "urls or multiprocessing or local or sequential or run or outdated or basic or code or uninstall" - job: TestMacOS pool: From d91a9d4fa6a58bfe76031941c4dea9bbe73ed11d Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Fri, 3 Apr 2020 16:02:24 -0400 Subject: [PATCH 18/49] Normalize paths in test_run_in_virtualenv for windows Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- tests/integration/test_project.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_project.py b/tests/integration/test_project.py index bef9912fe7..ce5ab380cc 100644 --- a/tests/integration/test_project.py +++ b/tests/integration/test_project.py @@ -8,7 +8,7 @@ from pipenv.patched import pipfile from pipenv.project import Project -from pipenv.utils import temp_environ +from pipenv.utils import temp_environ, normalize_path from pipenv.vendor.vistir.path import is_in_path from pipenv.vendor.delegator import run as delegator_run @@ -221,7 +221,9 @@ def test_run_in_virtualenv(PipenvInstance): assert c.return_code == 0 c = p.pipenv('run python -c "import click;print(click.__file__)"') assert c.return_code == 0 - assert c.out.strip().startswith(str(project.virtualenv_location)) + assert normalize_path(c.out.strip()).startswith( + normalize_path(str(project.virtualenv_location)) + ) c = p.pipenv("clean --dry-run") assert c.return_code == 0 assert "click" in c.out From c1061bd0a208687b4b08aacf2e3f81403ff88521 Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Fri, 3 Apr 2020 16:30:01 -0400 Subject: [PATCH 19/49] Use vistir path normalization for shortened windows paths Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- pipenv/core.py | 2 +- tests/integration/test_project.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pipenv/core.py b/pipenv/core.py index 4171ff6f47..b54f190676 100644 --- a/pipenv/core.py +++ b/pipenv/core.py @@ -1469,7 +1469,7 @@ def pip_install( ) pip_command.extend(pip_args) if r: - pip_command.extend(["-r", r]) + pip_command.extend(["-r", vistir.path.normalize_path(r)]) elif line: pip_command.extend(line) pip_command.extend(prepare_pip_source_args(sources)) diff --git a/tests/integration/test_project.py b/tests/integration/test_project.py index ce5ab380cc..ad38d74a46 100644 --- a/tests/integration/test_project.py +++ b/tests/integration/test_project.py @@ -8,8 +8,8 @@ from pipenv.patched import pipfile from pipenv.project import Project -from pipenv.utils import temp_environ, normalize_path -from pipenv.vendor.vistir.path import is_in_path +from pipenv.utils import temp_environ +from pipenv.vendor.vistir.path import is_in_path, normalize_path from pipenv.vendor.delegator import run as delegator_run From 13c91f7a617eb2a8fa87bfea0adb6d242befc542 Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Fri, 3 Apr 2020 18:03:09 -0400 Subject: [PATCH 20/49] stop using verbose mode in windows tests Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- .azure-pipelines/steps/run-tests-windows.yml | 2 +- Pipfile.lock | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.azure-pipelines/steps/run-tests-windows.yml b/.azure-pipelines/steps/run-tests-windows.yml index 1324947375..376a83d55d 100644 --- a/.azure-pipelines/steps/run-tests-windows.yml +++ b/.azure-pipelines/steps/run-tests-windows.yml @@ -35,7 +35,7 @@ steps: git submodule sync git submodule update --init --recursive Write-Host "Running Command: pipenv run pytest -n 4 --junitxml=test-results.xml --timeout 300 tests/" - pipenv run pytest -n 4 -vvv --junitxml=test-results.xml --timeout 300 tests/ + pipenv run pytest -n 4 --junitxml=test-results.xml --timeout 300 tests/ failOnStderr: false displayName: Run integration tests env: diff --git a/Pipfile.lock b/Pipfile.lock index 466065f277..dfdf6a3506 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -863,11 +863,11 @@ }, "virtualenv": { "hashes": [ - "sha256:4e399f48c6b71228bf79f5febd27e3bbb753d9d5905776a86667bc61ab628a25", - "sha256:9e81279f4a9d16d1c0654a127c2c86e5bca2073585341691882c1e66e31ef8a5" + "sha256:55059a7a676e4e19498f1aad09b8313a38fcc0cdbe4fdddc0e9b06946d21b4bb", + "sha256:0d62c70883c0342d59c11d0ddac0d954d0431321a41ab20851facf2b222598f3" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==20.0.15" + "version": "==16.7.9" }, "virtualenv-clone": { "hashes": [ From fdea18e1af9047e55ef0ff03824943351c3999a0 Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Fri, 3 Apr 2020 18:49:55 -0400 Subject: [PATCH 21/49] Only use wrapped streams if not running CI Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- pipenv/__init__.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/pipenv/__init__.py b/pipenv/__init__.py index 624397c549..481b460875 100644 --- a/pipenv/__init__.py +++ b/pipenv/__init__.py @@ -37,20 +37,22 @@ pass from pipenv.vendor.vistir.misc import get_text_stream +from pipenv.environments import PIPENV_IS_CI -stdout = get_text_stream("stdout") -stderr = get_text_stream("stderr") +if not PIPENV_IS_CI: + stdout = get_text_stream("stdout") + stderr = get_text_stream("stderr") -if os.name == "nt": - from pipenv.vendor.vistir.misc import _can_use_color, _wrap_for_color + if os.name == "nt": + from pipenv.vendor.vistir.misc import _can_use_color, _wrap_for_color - if _can_use_color(stdout): - stdout = _wrap_for_color(stdout) - if _can_use_color(stderr): - stderr = _wrap_for_color(stderr) + if _can_use_color(stdout): + stdout = _wrap_for_color(stdout) + if _can_use_color(stderr): + stderr = _wrap_for_color(stderr) -sys.stdout = stdout -sys.stderr = stderr + sys.stdout = stdout + sys.stderr = stderr from .cli import cli from . import resolver # noqa From e911c9ac719ee105ca9339f1c12cc45cb44a849a Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Fri, 3 Apr 2020 22:34:37 -0400 Subject: [PATCH 22/49] Use monkeypatch instead of mock for setting dotenv test attributes Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- pipenv/__init__.py | 22 ++++++++++------------ tests/unit/test_core.py | 25 ++++++++++++++----------- 2 files changed, 24 insertions(+), 23 deletions(-) diff --git a/pipenv/__init__.py b/pipenv/__init__.py index 481b460875..624397c549 100644 --- a/pipenv/__init__.py +++ b/pipenv/__init__.py @@ -37,22 +37,20 @@ pass from pipenv.vendor.vistir.misc import get_text_stream -from pipenv.environments import PIPENV_IS_CI -if not PIPENV_IS_CI: - stdout = get_text_stream("stdout") - stderr = get_text_stream("stderr") +stdout = get_text_stream("stdout") +stderr = get_text_stream("stderr") - if os.name == "nt": - from pipenv.vendor.vistir.misc import _can_use_color, _wrap_for_color +if os.name == "nt": + from pipenv.vendor.vistir.misc import _can_use_color, _wrap_for_color - if _can_use_color(stdout): - stdout = _wrap_for_color(stdout) - if _can_use_color(stderr): - stderr = _wrap_for_color(stderr) + if _can_use_color(stdout): + stdout = _wrap_for_color(stdout) + if _can_use_color(stderr): + stderr = _wrap_for_color(stderr) - sys.stdout = stdout - sys.stderr = stderr +sys.stdout = stdout +sys.stderr = stderr from .cli import cli from . import resolver # noqa diff --git a/tests/unit/test_core.py b/tests/unit/test_core.py index eb79985001..71a7681f12 100644 --- a/tests/unit/test_core.py +++ b/tests/unit/test_core.py @@ -31,8 +31,9 @@ def test_load_dot_env_from_environment_variable_location(monkeypatch, capsys): with open(dotenv_path, 'w') as f: f.write('{}={}'.format(key, val)) - with mock.patch('pipenv.environments.PIPENV_DOTENV_LOCATION', dotenv_path): - load_dot_env() + m.setenv("PIPENV_DOTENV_LOCATION", dotenv_path) + m.setattr("pipenv.environments", "PIPENV_DOTENV_LOCATION", dotenv_path) + load_dot_env() assert os.environ[key] == val @@ -48,13 +49,14 @@ def test_doesnt_load_dot_env_if_disabled(monkeypatch, capsys): with open(dotenv_path, 'w') as f: f.write('{}={}'.format(key, val)) - with mock.patch('pipenv.environments.PIPENV_DOTENV_LOCATION', dotenv_path): - with mock.patch('pipenv.environments.PIPENV_DONT_LOAD_ENV', '1'): - load_dot_env() - assert key not in os.environ - - load_dot_env() - assert key in os.environ + m.setenv("PIPENV_DOTENV_LOCATION", dotenv_path) + m.setattr("pipenv.environments.PIPENV_DOTENV_LOCATION", dotenv_path) + m.setattr("pipenv.environments.PIPENV_DONT_LOAD_ENV", True) + load_dot_env() + assert key not in os.environ + m.setattr("pipenv.environments.PIPENV_DONT_LOAD_ENV", False) + load_dot_env() + assert key in os.environ @pytest.mark.core @@ -65,7 +67,8 @@ def test_load_dot_env_warns_if_file_doesnt_exist(monkeypatch, capsys): is_console = False m.setattr(click._winconsole, "_is_console", lambda x: is_console) dotenv_path = os.path.join(tempdir.name, 'does-not-exist.env') - with mock.patch('pipenv.environments.PIPENV_DOTENV_LOCATION', dotenv_path): - load_dot_env() + m.setenv("PIPENV_DOTENV_LOCATION", dotenv_path) + m.setattr("pipenv.environments.PIPENV_DOTENV_LOCATION", dotenv_path) + load_dot_env() output, err = capsys.readouterr() assert 'Warning' in err From f90e183064920796a0cd8d83657f763a2ecfb213 Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Sat, 4 Apr 2020 01:31:03 -0400 Subject: [PATCH 23/49] Fix failing test Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- tests/unit/test_core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/test_core.py b/tests/unit/test_core.py index 71a7681f12..663cd46d3f 100644 --- a/tests/unit/test_core.py +++ b/tests/unit/test_core.py @@ -32,7 +32,7 @@ def test_load_dot_env_from_environment_variable_location(monkeypatch, capsys): f.write('{}={}'.format(key, val)) m.setenv("PIPENV_DOTENV_LOCATION", dotenv_path) - m.setattr("pipenv.environments", "PIPENV_DOTENV_LOCATION", dotenv_path) + m.setattr("pipenv.environments.PIPENV_DOTENV_LOCATION", dotenv_path) load_dot_env() assert os.environ[key] == val From 5c525edcaa7da2b2076a6eca6c5bc19aa93fc437 Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Sun, 5 Apr 2020 00:06:40 -0400 Subject: [PATCH 24/49] Redo test structure for azure Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- .azure-pipelines/steps/create-virtualenv.yml | 45 ------------- .../steps/install-dependencies.yml | 27 +++++++- .azure-pipelines/steps/reinstall-pythons.yml | 34 ---------- .azure-pipelines/steps/run-tests-linux.yml | 17 ++++- .azure-pipelines/steps/run-tests-windows.yml | 64 +++++++++++++------ .azure-pipelines/steps/run-tests.yml | 8 +-- azure-pipelines.yml | 34 +--------- 7 files changed, 88 insertions(+), 141 deletions(-) delete mode 100644 .azure-pipelines/steps/create-virtualenv.yml delete mode 100644 .azure-pipelines/steps/reinstall-pythons.yml diff --git a/.azure-pipelines/steps/create-virtualenv.yml b/.azure-pipelines/steps/create-virtualenv.yml deleted file mode 100644 index 3ad3a496c4..0000000000 --- a/.azure-pipelines/steps/create-virtualenv.yml +++ /dev/null @@ -1,45 +0,0 @@ -parameters: - python_version: '' - -steps: - -- script: | - echo "##vso[task.setvariable variable=LANG]C.UTF-8" - echo "##vso[task.setvariable variable=PIP_PROCESS_DEPENDENCY_LINKS]1" - displayName: Set Environment Variables - -- ${{ if eq(parameters.vmImage, 'windows-latest') }}: - - powershell: | - pip install certifi - $env:PYTHON_PATH=$(python -c "import sys; print(sys.executable)") - $env:CERTIFI_CONTENT=$(python -m certifi) - echo "##vso[task.setvariable variable=GIT_SSL_CAINFO]$env:CERTIFI_CONTENT" - echo "##vso[task.setvariable variable=PY_EXE]$env:PYTHON_PATH" - displayName: Set Python Path - env: - PYTHONWARNINGS: 'ignore:DEPRECATION' -- ${{ if ne(parameters.vmImage, 'windows-latest') }}: - - bash: | - pip install certifi - PYTHON_PATH=$(python -c 'import sys; print(sys.executable)') - CERTIFI_CONTENT=$(python -m certifi) - echo "##vso[task.setvariable variable=GIT_SSL_CAINFO]$CERTIFI_CONTENT" - echo "##vso[task.setvariable variable=PY_EXE]$PYTHON_PATH" - displayName: Set Python Path - env: - PYTHONWARNINGS: 'ignore:DEPRECATION' - -- script: | - echo "Python path: $(PY_EXE)" - echo "GIT_SSL_CAINFO: $(GIT_SSL_CAINFO)" - echo "PIPENV PYTHON VERSION: $(python.version)" - echo "python_version: ${{ parameters.python_version }}" - git submodule sync - git submodule update --init --recursive - $(PY_EXE) -m pip install "virtualenv<20" - $(PY_EXE) -m pipenv install --deploy --dev --python="$(PY_EXE)" - env: - PIPENV_DEFAULT_PYTHON_VERSION: ${{ parameters.python_version }} - PYTHONWARNINGS: 'ignore:DEPRECATION' - PIPENV_NOSPIN: '1' - displayName: Make Virtualenv diff --git a/.azure-pipelines/steps/install-dependencies.yml b/.azure-pipelines/steps/install-dependencies.yml index 4e179d2a19..c537447d49 100644 --- a/.azure-pipelines/steps/install-dependencies.yml +++ b/.azure-pipelines/steps/install-dependencies.yml @@ -1,5 +1,30 @@ +parameters: + python_version: '' + steps: -- script: 'python -m pip install --upgrade pip setuptools wheel --upgrade-strategy=eager && python -m pip install -e . --upgrade' + +- script: | + echo "##vso[task.setvariable variable=LANG]C.UTF-8" + echo "##vso[task.setvariable variable=PIP_PROCESS_DEPENDENCY_LINKS]1" + displayName: Set Environment Variables + +- script: | + echo "Python path: $(PY_EXE)" + echo "GIT_SSL_CAINFO: $(GIT_SSL_CAINFO)" + echo "PIPENV PYTHON VERSION: $(python.version)" + echo "python_version: ${{ parameters.python_version }}" + git submodule sync + git submodule update --init --recursive + $(PY_EXE) -m pip install --upgrade --upgrade-strategy=eager pip setuptools wheel + $(PY_EXE) -m pip install "virtualenv<20" + $(PY_EXE) -m pipenv install --deploy --dev --python="$(PY_EXE)" + env: + PIPENV_DEFAULT_PYTHON_VERSION: ${{ parameters.python_version }} + PYTHONWARNINGS: 'ignore:DEPRECATION' + PIPENV_NOSPIN: '1' + displayName: Make Virtualenv + +- script: 'python -m pip install -e . --upgrade' displayName: Upgrade Pip & Install Pipenv env: PYTHONWARNINGS: 'ignore:DEPRECATION' diff --git a/.azure-pipelines/steps/reinstall-pythons.yml b/.azure-pipelines/steps/reinstall-pythons.yml deleted file mode 100644 index 7964792563..0000000000 --- a/.azure-pipelines/steps/reinstall-pythons.yml +++ /dev/null @@ -1,34 +0,0 @@ -steps: - - script: | - # When you paste this, please make sure the indentation is preserved - # Fail out if any setups fail - set -e - - # Delete old Pythons - rm -rf $AGENT_TOOLSDIRECTORY/Python/2.7.16 - rm -rf $AGENT_TOOLSDIRECTORY/Python/3.5.7 - rm -rf $AGENT_TOOLSDIRECTORY/Python/3.7.3 - [ -e $AGENT_TOOLSDIRECTORY/Python/3.7.2 ] && [ -e $AGENT_TOOLSDIRECTORY/Python/3.5.5 ] && [ -e $AGENT_TOOLSDIRECTORY/Python/2.7.15 ] && exit 0 - # Download new Pythons - azcopy --recursive \ - --source https://vstsagenttools.blob.core.windows.net/tools/hostedtoolcache/linux/Python/2.7.15 \ - --destination $AGENT_TOOLSDIRECTORY/Python/2.7.15 - - azcopy --recursive \ - --source https://vstsagenttools.blob.core.windows.net/tools/hostedtoolcache/linux/Python/3.5.5 \ - --destination $AGENT_TOOLSDIRECTORY/Python/3.5.5 - - azcopy --recursive \ - --source https://vstsagenttools.blob.core.windows.net/tools/hostedtoolcache/linux/Python/3.7.2 \ - --destination $AGENT_TOOLSDIRECTORY/Python/3.7.2 - - # Install new Pythons - original_directory=$PWD - setups=$(find $AGENT_TOOLSDIRECTORY/Python -name setup.sh) - for setup in $setups; do - chmod +x $setup; - cd $(dirname $setup); - ./$(basename $setup); - cd $original_directory; - done; - displayName: 'Workaround: roll back Python versions' diff --git a/.azure-pipelines/steps/run-tests-linux.yml b/.azure-pipelines/steps/run-tests-linux.yml index a786f8735b..dbb13f9481 100644 --- a/.azure-pipelines/steps/run-tests-linux.yml +++ b/.azure-pipelines/steps/run-tests-linux.yml @@ -2,11 +2,26 @@ parameters: python_version: '' steps: + +- bash: | + pip install certifi + PYTHON_PATH=$(python -c 'import sys; print(sys.executable)') + CERTIFI_CONTENT=$(python -m certifi) + echo "##vso[task.setvariable variable=GIT_SSL_CAINFO]$CERTIFI_CONTENT" + echo "##vso[task.setvariable variable=PY_EXE]$PYTHON_PATH" + displayName: Set Python Path + env: + PYTHONWARNINGS: 'ignore:DEPRECATION' + +- template: install-dependencies.yml + parameters: + python_version: ${{ parameters.python_version }} + - script: | # Fix Git SSL errors echo "Using pipenv python version: $(PIPENV_DEFAULT_PYTHON_VERSION)" git submodule sync && git submodule update --init --recursive - pipenv run pytest -n 4 --junitxml=test-results.xml + pipenv run pytest -n 4 --junitxml=junit/test-results.xml displayName: Run integration tests env: PYTHONWARNINGS: ignore:DEPRECATION diff --git a/.azure-pipelines/steps/run-tests-windows.yml b/.azure-pipelines/steps/run-tests-windows.yml index 376a83d55d..14b645a1e3 100644 --- a/.azure-pipelines/steps/run-tests-windows.yml +++ b/.azure-pipelines/steps/run-tests-windows.yml @@ -3,42 +3,64 @@ parameters: python_architecture: '' steps: - - task: UsePythonVersion@0 + - task: PowerShell@2 inputs: - versionSpec: ${{ parameters.python_version }} - architecture: ${{ parameters.python_architecture }} - addToPath: true - displayName: Use Python ${{ parameters.python_version }} + filePath: .azure-pipelines/scripts/New-RAMDisk.ps1 + arguments: "-Drive R -Size 2GB" + displayName: Setup RAMDisk - - script: | - echo '##vso[task.setvariable variable=PIPENV_DEFAULT_PYTHON_VERSION]'${{ parameters.python_version }} - env: - PYTHON_VERSION: ${{ parameters.python_version }} - - - template: install-dependencies.yml - - - template: create-virtualenv.yml - parameters: - python_version: ${{ parameters.python_version }} + - powershell: | + mkdir R:\Temp + $acl = Get-Acl "R:\Temp" + $rule = New-Object System.Security.AccessControl.FileSystemAccessRule( + "Everyone", "FullControl", "ContainerInherit,ObjectInherit", "None", "Allow" + ) + $acl.AddAccessRule($rule) + Set-Acl "R:\Temp" $acl + displayName: Set RAMDisk Permissions - powershell: | - subst T: "$env:TEMP" - Write-Host "##vso[task.setvariable variable=TEMP]T:\" - Write-Host "##vso[task.setvariable variable=TMP]T:\" + Write-Host "##vso[task.setvariable variable=TEMP]R:\" + Write-Host "##vso[task.setvariable variable=TMP]R:\" + Write-Host "##vso[task.setvariable variable=WORKON_HOME]R:\virtualenvs" Write-Host "##vso[task.setvariable variable=PIPENV_DEFAULT_PYTHON_VERSION]$env:PYTHON_VERSION" Write-Host "##vso[task.setvariable variable=PIPENV_NOSPIN]1" displayName: Fix Temp Variable env: PYTHON_VERSION: ${{ parameters.python_version }} + - powershell: | + pip install certifi + $env:PYTHON_PATH=$(python -c "import sys; print(sys.executable)") + $env:CERTIFI_CONTENT=$(python -m certifi) + echo "##vso[task.setvariable variable=GIT_SSL_CAINFO]$env:CERTIFI_CONTENT" + echo "##vso[task.setvariable variable=PY_EXE]$env:PYTHON_PATH" + displayName: Set Python Path + env: + PYTHONWARNINGS: 'ignore:DEPRECATION' + + - template: install-dependencies.yml + parameters: + python_version: ${{ parameters.python_version }} + - powershell: | git submodule sync git submodule update --init --recursive - Write-Host "Running Command: pipenv run pytest -n 4 --junitxml=test-results.xml --timeout 300 tests/" - pipenv run pytest -n 4 --junitxml=test-results.xml --timeout 300 tests/ + Write-Host "Running Command: pipenv run pytest -n 4 --junitxml=junit/test-results.xml --timeout 300 tests/" + $env:TEMP = "R:\Temp" + pipenv run pytest -ra -n 4 --timeout=300 --junit-xml=junit/test-results.xml tests/ failOnStderr: false displayName: Run integration tests env: PYTHONWARNINGS: 'ignore:DEPRECATION' - PIPENV_NOSPIN: '1' + PIPENV_NOSPIN: 1 GIT_SSH_COMMAND: ssh -o StrictHostKeyChecking=accept-new -o CheckHostIP=no + + + +- task: PublishTestResults@2 + displayName: Publish Test Results + inputs: + testResultsFiles: junit/*.xml + testRunTitle: 'Python $(python.version)' + condition: succeededOrFailed() diff --git a/.azure-pipelines/steps/run-tests.yml b/.azure-pipelines/steps/run-tests.yml index ce4b501659..41cb72fa86 100644 --- a/.azure-pipelines/steps/run-tests.yml +++ b/.azure-pipelines/steps/run-tests.yml @@ -6,17 +6,11 @@ steps: addToPath: true displayName: Use Python $(python.version) -- template: install-dependencies.yml - - script: | echo '##vso[task.setvariable variable=PIPENV_DEFAULT_PYTHON_VERSION]'$(python.version) env: PYTHON_VERSION: $(python.version) -- template: create-virtualenv.yml - parameters: - python_version: $(python.version) - - ${{ if eq(parameters.vmImage, 'windows-latest') }}: - template: run-tests-windows.yml parameters: @@ -29,6 +23,6 @@ steps: - task: PublishTestResults@2 displayName: Publish Test Results inputs: - testResultsFiles: '**/test-results.xml' + testResultsFiles: '**/junit/*.xml' testRunTitle: 'Python $(python.version)' condition: succeededOrFailed() diff --git a/azure-pipelines.yml b/azure-pipelines.yml index a0f30c1896..c7943d4a6e 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -69,7 +69,7 @@ jobs: parameters: vmImage: 'Ubuntu-latest' -- job: TestWindows1 +- job: TestWindows timeoutInMinutes: 0 pool: vmImage: windows-latest @@ -89,39 +89,9 @@ jobs: python.architecture: x64 maxParallel: 8 steps: - - template: .azure-pipelines/steps/run-tests-windows.yml + - template: .azure-pipelines/steps/run-tests.yml parameters: vmImage: windows-latest - python_version: $(python.version) - python_architecture: $(python.architecture) - # pytest_markers: "lock or dotvenv or markers or project or utils or patched or core or cli" - -# - job: TestWindows2 -# timeoutInMinutes: 0 -# pool: -# vmImage: windows-latest -# strategy: -# matrix: -# Python27: -# python.version: '2.7' -# python.architecture: x64 -# Python36: -# python.version: '3.6' -# python.architecture: x64 -# Python37: -# python.version: '3.7' -# python.architecture: x64 -# Python38: -# python.version: '3.8' -# python.architecture: x64 -# maxParallel: 8 -# steps: -# - template: .azure-pipelines/steps/run-tests-windows.yml -# parameters: -# vmImage: windows-latest -# python_version: $(python.version) -# python_architecture: $(python.architecture) -# pytest_markers: "urls or multiprocessing or local or sequential or run or outdated or basic or code or uninstall" - job: TestMacOS pool: From 6652157fb195bbd54c307d0899500f4ba6fae808 Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Sun, 5 Apr 2020 00:43:42 -0400 Subject: [PATCH 25/49] Fix vendor test Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- .azure-pipelines/steps/run-vendor-scripts.yml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/.azure-pipelines/steps/run-vendor-scripts.yml b/.azure-pipelines/steps/run-vendor-scripts.yml index 2aca1fe084..2923f26693 100644 --- a/.azure-pipelines/steps/run-vendor-scripts.yml +++ b/.azure-pipelines/steps/run-vendor-scripts.yml @@ -9,16 +9,13 @@ steps: addToPath: true displayName: Use Python $(python.version) -- template: install-dependencies.yml - script: | echo '##vso[task.setvariable variable=PIPENV_DEFAULT_PYTHON_VERSION]'$(python.version) env: PYTHON_VERSION: $(python.version) -- template: create-virtualenv.yml - parameters: - python_version: $(python.version) +- template: install-dependencies.yml - script: | python -m pip install --upgrade invoke requests parver bs4 vistir towncrier pip setuptools wheel --upgrade-strategy=eager From e51340458e87d29b70a4b41967039e1d6f6d7c52 Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Sun, 5 Apr 2020 00:45:14 -0400 Subject: [PATCH 26/49] Fix package build test Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- .azure-pipelines/steps/build-package.yml | 9 +++++---- .azure-pipelines/steps/install-dependencies.yml | 5 +++-- .azure-pipelines/steps/run-tests-windows.yml | 9 --------- .azure-pipelines/steps/run-vendor-scripts.yml | 8 ++++++++ 4 files changed, 16 insertions(+), 15 deletions(-) diff --git a/.azure-pipelines/steps/build-package.yml b/.azure-pipelines/steps/build-package.yml index 81a6160b78..01f9fe0bc3 100644 --- a/.azure-pipelines/steps/build-package.yml +++ b/.azure-pipelines/steps/build-package.yml @@ -6,16 +6,17 @@ steps: addToPath: true displayName: Use Python $(python.version) -- template: install-dependencies.yml +- bash: | + PYTHON_PATH=$(python -c 'import sys; print(sys.executable)') + echo "##vso[task.setvariable variable=PY_EXE]$PYTHON_PATH" + displayName: Set Python Path - script: | echo '##vso[task.setvariable variable=PIPENV_DEFAULT_PYTHON_VERSION]'$(python.version) env: PYTHON_VERSION: $(python.version) -- template: create-virtualenv.yml - parameters: - python_version: $(python.version) +- template: install-dependencies.yml - script: | python -m pip install --upgrade wheel pip setuptools twine readme_renderer[md] diff --git a/.azure-pipelines/steps/install-dependencies.yml b/.azure-pipelines/steps/install-dependencies.yml index c537447d49..fd5b31c74e 100644 --- a/.azure-pipelines/steps/install-dependencies.yml +++ b/.azure-pipelines/steps/install-dependencies.yml @@ -17,14 +17,15 @@ steps: git submodule update --init --recursive $(PY_EXE) -m pip install --upgrade --upgrade-strategy=eager pip setuptools wheel $(PY_EXE) -m pip install "virtualenv<20" - $(PY_EXE) -m pipenv install --deploy --dev --python="$(PY_EXE)" env: PIPENV_DEFAULT_PYTHON_VERSION: ${{ parameters.python_version }} PYTHONWARNINGS: 'ignore:DEPRECATION' PIPENV_NOSPIN: '1' displayName: Make Virtualenv -- script: 'python -m pip install -e . --upgrade' +- script: | + $(PY_EXE) -m pip install -e . --upgrade + $(PY_EXE) -m pipenv install --deploy --dev --python="$(PY_EXE)" displayName: Upgrade Pip & Install Pipenv env: PYTHONWARNINGS: 'ignore:DEPRECATION' diff --git a/.azure-pipelines/steps/run-tests-windows.yml b/.azure-pipelines/steps/run-tests-windows.yml index 14b645a1e3..554cae7cee 100644 --- a/.azure-pipelines/steps/run-tests-windows.yml +++ b/.azure-pipelines/steps/run-tests-windows.yml @@ -55,12 +55,3 @@ steps: PYTHONWARNINGS: 'ignore:DEPRECATION' PIPENV_NOSPIN: 1 GIT_SSH_COMMAND: ssh -o StrictHostKeyChecking=accept-new -o CheckHostIP=no - - - -- task: PublishTestResults@2 - displayName: Publish Test Results - inputs: - testResultsFiles: junit/*.xml - testRunTitle: 'Python $(python.version)' - condition: succeededOrFailed() diff --git a/.azure-pipelines/steps/run-vendor-scripts.yml b/.azure-pipelines/steps/run-vendor-scripts.yml index 2923f26693..070cc455e4 100644 --- a/.azure-pipelines/steps/run-vendor-scripts.yml +++ b/.azure-pipelines/steps/run-vendor-scripts.yml @@ -10,8 +10,16 @@ steps: displayName: Use Python $(python.version) +- bash: | + PYTHON_PATH=$(python -c 'import sys; print(sys.executable)') + echo "##vso[task.setvariable variable=PY_EXE]$PYTHON_PATH" + displayName: Set Python Path + - script: | echo '##vso[task.setvariable variable=PIPENV_DEFAULT_PYTHON_VERSION]'$(python.version) + env: + PYTHONWARNINGS: 'ignore:DEPRECATION' + env: PYTHON_VERSION: $(python.version) From 1d633082b10c34f84d58fa2ad80fb878b5449745 Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Sun, 5 Apr 2020 19:06:28 -0400 Subject: [PATCH 27/49] AP: Update windows tests tempdir path - Use ramdisk on windows without subdirectory for tempdir path Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- .azure-pipelines/steps/run-tests-windows.yml | 2 +- .azure-pipelines/steps/run-vendor-scripts.yml | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/.azure-pipelines/steps/run-tests-windows.yml b/.azure-pipelines/steps/run-tests-windows.yml index 554cae7cee..d0257f9236 100644 --- a/.azure-pipelines/steps/run-tests-windows.yml +++ b/.azure-pipelines/steps/run-tests-windows.yml @@ -47,7 +47,7 @@ steps: git submodule sync git submodule update --init --recursive Write-Host "Running Command: pipenv run pytest -n 4 --junitxml=junit/test-results.xml --timeout 300 tests/" - $env:TEMP = "R:\Temp" + $env:TEMP = "R:\" pipenv run pytest -ra -n 4 --timeout=300 --junit-xml=junit/test-results.xml tests/ failOnStderr: false displayName: Run integration tests diff --git a/.azure-pipelines/steps/run-vendor-scripts.yml b/.azure-pipelines/steps/run-vendor-scripts.yml index 070cc455e4..df0f766d60 100644 --- a/.azure-pipelines/steps/run-vendor-scripts.yml +++ b/.azure-pipelines/steps/run-vendor-scripts.yml @@ -17,9 +17,6 @@ steps: - script: | echo '##vso[task.setvariable variable=PIPENV_DEFAULT_PYTHON_VERSION]'$(python.version) - env: - PYTHONWARNINGS: 'ignore:DEPRECATION' - env: PYTHON_VERSION: $(python.version) From 8a7d3b5385ec7935f67cd816a70de50fa60b4760 Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Mon, 6 Apr 2020 23:54:32 -0400 Subject: [PATCH 28/49] Update ACL scripts Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- .azure-pipelines/steps/run-tests-windows.yml | 6 +++--- .azure-pipelines/steps/run-vendor-scripts.yml | 13 +++++-------- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/.azure-pipelines/steps/run-tests-windows.yml b/.azure-pipelines/steps/run-tests-windows.yml index d0257f9236..188f06614c 100644 --- a/.azure-pipelines/steps/run-tests-windows.yml +++ b/.azure-pipelines/steps/run-tests-windows.yml @@ -10,13 +10,13 @@ steps: displayName: Setup RAMDisk - powershell: | - mkdir R:\Temp - $acl = Get-Acl "R:\Temp" + mkdir R:\virtualenvs + $acl = Get-Acl "R:\" $rule = New-Object System.Security.AccessControl.FileSystemAccessRule( "Everyone", "FullControl", "ContainerInherit,ObjectInherit", "None", "Allow" ) $acl.AddAccessRule($rule) - Set-Acl "R:\Temp" $acl + Set-Acl "R:\" $acl displayName: Set RAMDisk Permissions - powershell: | diff --git a/.azure-pipelines/steps/run-vendor-scripts.yml b/.azure-pipelines/steps/run-vendor-scripts.yml index df0f766d60..d4580672ee 100644 --- a/.azure-pipelines/steps/run-vendor-scripts.yml +++ b/.azure-pipelines/steps/run-vendor-scripts.yml @@ -9,27 +9,24 @@ steps: addToPath: true displayName: Use Python $(python.version) - - bash: | + python -m pip install --upgrade --upgrade-strategy=eager pip requests certifi wheel setuptools PYTHON_PATH=$(python -c 'import sys; print(sys.executable)') + CERTIFI_CONTENT=$(python -m certifi) echo "##vso[task.setvariable variable=PY_EXE]$PYTHON_PATH" + echo "##vso[task.setvariable variable=GIT_SSL_CAINFO]$CERTIFI_CONTENT" displayName: Set Python Path -- script: | - echo '##vso[task.setvariable variable=PIPENV_DEFAULT_PYTHON_VERSION]'$(python.version) - env: - PYTHON_VERSION: $(python.version) - - template: install-dependencies.yml - script: | - python -m pip install --upgrade invoke requests parver bs4 vistir towncrier pip setuptools wheel --upgrade-strategy=eager + python -m pip install --upgrade invoke parver bs4 vistir towncrier --upgrade-strategy=eager python -m invoke vendoring.update displayName: Run Vendor Scripts env: PY_EXE: $(PY_EXE) GIT_SSL_CAINFO: $(GIT_SSL_CAINFO) LANG: $(LANG) - PIPENV_DEFAULT_PYTHON_VERSION: '${{ parameters.python_version }}' + PIPENV_DEFAULT_PYTHON_VERSION: $(python.version) PYTHONWARNINGS: ignore:DEPRECATION PIPENV_NOSPIN: '1' From 511a144e262a734d96b7e828c9b153829e4448c4 Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Tue, 7 Apr 2020 11:02:51 -0400 Subject: [PATCH 29/49] Update test artifacts Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- tests/pypi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pypi b/tests/pypi index ecbbb8775b..7a3ae1c84b 160000 --- a/tests/pypi +++ b/tests/pypi @@ -1 +1 @@ -Subproject commit ecbbb8775b87be07d32afd70909750815b39ae60 +Subproject commit 7a3ae1c84b3b9f40cedf909f73be778d214b86ef From 6a12408ead93be92c7215b303a58ccc1c045727f Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Tue, 7 Apr 2020 13:47:22 -0400 Subject: [PATCH 30/49] Add new pendulum versions Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- tests/pypi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pypi b/tests/pypi index 7a3ae1c84b..f6a8de64e3 160000 --- a/tests/pypi +++ b/tests/pypi @@ -1 +1 @@ -Subproject commit 7a3ae1c84b3b9f40cedf909f73be778d214b86ef +Subproject commit f6a8de64e39748939959b0ed466f94bb03443d51 From 1d151cd5e282433d674f43265f50ef1b2ab84dcc Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Tue, 7 Apr 2020 14:21:43 -0400 Subject: [PATCH 31/49] Fix broken exception on windows Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- pipenv/exceptions.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/pipenv/exceptions.py b/pipenv/exceptions.py index 6e0032ad97..919442f1d3 100644 --- a/pipenv/exceptions.py +++ b/pipenv/exceptions.py @@ -233,7 +233,7 @@ def __init__(self, filename="Pipfile.lock", extra=None, **kwargs): class DeployException(PipenvUsageError): def __init__(self, message=None, **kwargs): if not message: - message = crayons.normal("Aborting deploy", bold=True) + message = str(crayons.normal("Aborting deploy", bold=True)) extra = kwargs.pop("extra", []) PipenvUsageError.__init__(self, message=message, extra=extra, **kwargs) @@ -256,7 +256,9 @@ def __init__(self, option_name="system", message=None, ctx=None, **kwargs): ), ] if message is None: - message = crayons.blue("See also: {0}".format(crayons.white("--deploy flag."))) + message = str( + crayons.blue("See also: {0}".format(crayons.white("--deploy flag."))) + ) super(SystemUsageError, self).__init__(option_name, message=message, ctx=ctx, extra=extra, **kwargs) @@ -310,7 +312,9 @@ def __init__(self, message=None, **kwargs): # so replacement or parsing requires this step extra = ANSI_REMOVAL_RE.sub("", "{0}".format(extra)) if "KeyboardInterrupt" in extra: - extra = crayons.red("Virtualenv creation interrupted by user", bold=True) + extra = str( + crayons.red("Virtualenv creation interrupted by user", bold=True) + ) self.extra = extra = [extra] VirtualenvException.__init__(self, message, extra=extra) From 8b0f5addd2298fdbf3c9e80b66a68233c34722e8 Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Tue, 7 Apr 2020 17:47:46 -0400 Subject: [PATCH 32/49] Don't use pytest-timeout on windows - Ditch timeout runner and update test modules Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- .azure-pipelines/steps/run-tests-windows.yml | 4 ++-- tests/pypi | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.azure-pipelines/steps/run-tests-windows.yml b/.azure-pipelines/steps/run-tests-windows.yml index 188f06614c..b50917c099 100644 --- a/.azure-pipelines/steps/run-tests-windows.yml +++ b/.azure-pipelines/steps/run-tests-windows.yml @@ -46,9 +46,9 @@ steps: - powershell: | git submodule sync git submodule update --init --recursive - Write-Host "Running Command: pipenv run pytest -n 4 --junitxml=junit/test-results.xml --timeout 300 tests/" + Write-Host "Running Command: pipenv run pytest -n 4 --junitxml=junit/test-results.xml tests/" $env:TEMP = "R:\" - pipenv run pytest -ra -n 4 --timeout=300 --junit-xml=junit/test-results.xml tests/ + pipenv run pytest -ra -n 4 --junit-xml=junit/test-results.xml tests/ failOnStderr: false displayName: Run integration tests env: diff --git a/tests/pypi b/tests/pypi index f6a8de64e3..96445ddead 160000 --- a/tests/pypi +++ b/tests/pypi @@ -1 +1 @@ -Subproject commit f6a8de64e39748939959b0ed466f94bb03443d51 +Subproject commit 96445ddead5de0b2563d80a0ca1b0b275df68b69 From 4c68e2092f57e13d46e4e5d718880c676340687e Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Wed, 8 Apr 2020 00:58:57 -0400 Subject: [PATCH 33/49] Don't rely on old requests versions in tests' Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- tests/integration/test_pipenv.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/integration/test_pipenv.py b/tests/integration/test_pipenv.py index 5e172f8b24..a8c2b5e2cf 100644 --- a/tests/integration/test_pipenv.py +++ b/tests/integration/test_pipenv.py @@ -33,11 +33,11 @@ def test_deploy_works(PipenvInstance): with open(p.pipfile_path, 'w') as f: contents = """ [packages] -requests = "==2.14.0" -flask = "==0.12.2" +requests = "==2.19.1" +flask = "==1.1.2" [dev-packages] -pytest = "==3.1.1" +pytest = "==4.6.9" """.strip() f.write(contents) c = p.pipenv('install --verbose') @@ -50,7 +50,7 @@ def test_deploy_works(PipenvInstance): with open(p.pipfile_path, 'w') as f: contents = """ [packages] -requests = "==2.14.0" +requests = "==2.19.1" """.strip() f.write(contents) From 9942755d40fd9c094feb46c040e21269de5a3f8a Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Wed, 8 Apr 2020 10:31:44 -0400 Subject: [PATCH 34/49] Update test dependencies Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- tests/pypi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pypi b/tests/pypi index 96445ddead..6faddf97c2 160000 --- a/tests/pypi +++ b/tests/pypi @@ -1 +1 @@ -Subproject commit 96445ddead5de0b2563d80a0ca1b0b275df68b69 +Subproject commit 6faddf97c2a0220870da0a1409a196667b06c9cc From e9767374039dd71600041437391fe463b4e6dab1 Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Wed, 8 Apr 2020 20:06:54 -0400 Subject: [PATCH 35/49] Include data-requires-python in test pypi server - Fix test plugin for pypi runner Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- tests/pytest-pypi/pytest_pypi/app.py | 19 ++++++++++++++----- .../pytest_pypi/templates/package.html | 4 ++-- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/tests/pytest-pypi/pytest_pypi/app.py b/tests/pytest-pypi/pytest_pypi/app.py index 5484eeb782..5b1e693ee2 100644 --- a/tests/pytest-pypi/pytest_pypi/app.py +++ b/tests/pytest-pypi/pytest_pypi/app.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- from __future__ import absolute_import, print_function +import collections import contextlib import io import json @@ -8,12 +9,15 @@ from tarfile import is_tarfile from zipfile import is_zipfile +import distlib.wheel import requests from six.moves import xmlrpc_client from flask import Flask, redirect, abort, render_template, send_file, jsonify +ReleaseTuple = collections.namedtuple("ReleaseTuple", ["path", "requires_python"]) + app = Flask(__name__) session = requests.Session() @@ -49,7 +53,7 @@ def __init__(self, name): @property def json(self): - for path in self._package_dirs: + for path, _ in self._package_dirs: try: with open(os.path.join(path, 'api.json')) as f: return json.load(f) @@ -59,7 +63,7 @@ def json(self): releases = response["releases"] files = { pkg for pkg_dir in self._package_dirs - for pkg in os.listdir(pkg_dir) + for pkg in os.listdir(pkg_dir.path) } for release in list(releases.keys()): values = ( @@ -81,8 +85,13 @@ def __repr__(self): def add_release(self, path_to_binary): path_to_binary = os.path.abspath(path_to_binary) path, release = os.path.split(path_to_binary) - self.releases[release] = path_to_binary - self._package_dirs.add(path) + requires_python = "" + if path_to_binary.endswith(".whl"): + pkg = distlib.wheel.Wheel(path_to_binary) + md_dict = pkg.metadata.todict() + requires_python = md_dict.get("requires_python", "") + self.releases[release] = ReleaseTuple(path_to_binary, requires_python) + self._package_dirs.add(ReleaseTuple(path, requires_python)) class Artifact(object): @@ -194,7 +203,7 @@ def serve_package(package, release): package = packages[package] if release in package.releases: - return send_file(package.releases[release]) + return send_file(package.releases[release].path) abort(404) diff --git a/tests/pytest-pypi/pytest_pypi/templates/package.html b/tests/pytest-pypi/pytest_pypi/templates/package.html index 26ba9eca16..3d3645177f 100644 --- a/tests/pytest-pypi/pytest_pypi/templates/package.html +++ b/tests/pytest-pypi/pytest_pypi/templates/package.html @@ -6,8 +6,8 @@ </head> <body> <h1>Links for {{ package.name }}</h1> - {% for release in package.releases %} - <a href="/{{ package.name }}/{{ release }}">{{ release }}</a> + {% for release, value in package.releases.items() %} + <a href="/{{ package.name }}/{{ release }}"{%- if value.requires_python %} data-requires-python="{{ value.requires_python }}"{% endif %}>{{ release }}</a> <br> {% endfor %} </body> From 85f10b9588adbce15d1319604d48282e34dfcc54 Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Thu, 9 Apr 2020 15:30:34 -0400 Subject: [PATCH 36/49] Add comment on resolver functionality Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- pipenv/utils.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pipenv/utils.py b/pipenv/utils.py index 98754f6971..13fba13331 100644 --- a/pipenv/utils.py +++ b/pipenv/utils.py @@ -455,6 +455,10 @@ def get_metadata( ) index_lookup.update(req_idx) markers_lookup.update(markers_idx) + # Add dependencies of any file (e.g. wheels/tarballs), source, or local + # directories into the initial constraint pool to be resolved with the + # rest of the dependencies, while adding the files/vcs deps/paths themselves + # to the lockfile directly constraint_update, lockfile_update = cls.get_deps_from_req( req, resolver=transient_resolver ) From 4bbcefb675513f434350678978c535111c125fc1 Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Thu, 9 Apr 2020 15:31:22 -0400 Subject: [PATCH 37/49] Update marker resolution Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- pipenv/patched/piptools/resolver.py | 20 +++++++--- .../vendoring/patches/patched/piptools.patch | 39 +++++++++++++------ 2 files changed, 41 insertions(+), 18 deletions(-) diff --git a/pipenv/patched/piptools/resolver.py b/pipenv/patched/piptools/resolver.py index 8fd8a57f68..7e856fe645 100644 --- a/pipenv/patched/piptools/resolver.py +++ b/pipenv/patched/piptools/resolver.py @@ -6,6 +6,9 @@ from functools import partial from itertools import chain, count +from pipenv.vendor.requirementslib.models.markers import normalize_marker_str +from packaging.markers import Marker + from . import click from ._compat import install_req_from_line from .cache import DependencyCache @@ -64,12 +67,17 @@ def combine_install_requirements(ireqs): # NOTE we may be losing some info on dropped reqs here combined_ireq.req.specifier &= ireq.req.specifier combined_ireq.constraint &= ireq.constraint - if not combined_ireq.markers: - combined_ireq.markers = ireq.markers - else: - _markers = combined_ireq.markers._markers - if not isinstance(_markers[0], (tuple, list)): - combined_ireq.markers._markers = [_markers, 'and', ireq.markers._markers] + if ireq.markers and not combined_ireq.markers: + combined_ireq.markers = copy.deepcopy(ireq.markers) + elif ireq.markers and combined_ireq.markers: + _markers = [] # type: List[Marker] + for marker in [ireq.markers, combined_ireq.markers]: + if isinstance(marker, str): + _markers.append(Marker(marker)) + else: + _markers.append(marker) + marker_str = " and ".join([normalize_marker_str(m) for m in _markers if m]) + combined_ireq.markers = Marker(marker_str) # Return a sorted, de-duped tuple of extras combined_ireq.extras = tuple( sorted(set(tuple(combined_ireq.extras) + tuple(ireq.extras))) diff --git a/tasks/vendoring/patches/patched/piptools.patch b/tasks/vendoring/patches/patched/piptools.patch index 4cbeb3dcbe..45eb963b0d 100644 --- a/tasks/vendoring/patches/patched/piptools.patch +++ b/tasks/vendoring/patches/patched/piptools.patch @@ -535,10 +535,20 @@ index acbd680..13378ae 100644 def allow_all_wheels(self): """ diff --git a/pipenv/patched/piptools/resolver.py b/pipenv/patched/piptools/resolver.py -index fc53f18..8fd8a57 100644 +index fc53f18..7e856fe 100644 --- a/pipenv/patched/piptools/resolver.py +++ b/pipenv/patched/piptools/resolver.py -@@ -34,6 +34,7 @@ class RequirementSummary(object): +@@ -6,6 +6,9 @@ import os + from functools import partial + from itertools import chain, count + ++from pipenv.vendor.requirementslib.models.markers import normalize_marker_str ++from packaging.markers import Marker ++ + from . import click + from ._compat import install_req_from_line + from .cache import DependencyCache +@@ -34,6 +37,7 @@ class RequirementSummary(object): self.req = ireq.req self.key = key_from_req(ireq.req) self.extras = str(sorted(ireq.extras)) @@ -546,20 +556,25 @@ index fc53f18..8fd8a57 100644 self.specifier = str(ireq.specifier) def __eq__(self, other): -@@ -63,6 +64,12 @@ def combine_install_requirements(ireqs): +@@ -63,6 +67,17 @@ def combine_install_requirements(ireqs): # NOTE we may be losing some info on dropped reqs here combined_ireq.req.specifier &= ireq.req.specifier combined_ireq.constraint &= ireq.constraint -+ if not combined_ireq.markers: -+ combined_ireq.markers = ireq.markers -+ else: -+ _markers = combined_ireq.markers._markers -+ if not isinstance(_markers[0], (tuple, list)): -+ combined_ireq.markers._markers = [_markers, 'and', ireq.markers._markers] ++ if ireq.markers and not combined_ireq.markers: ++ combined_ireq.markers = copy.deepcopy(ireq.markers) ++ elif ireq.markers and combined_ireq.markers: ++ _markers = [] # type: List[Marker] ++ for marker in [ireq.markers, combined_ireq.markers]: ++ if isinstance(marker, str): ++ _markers.append(Marker(marker)) ++ else: ++ _markers.append(marker) ++ marker_str = " and ".join([normalize_marker_str(m) for m in _markers if m]) ++ combined_ireq.markers = Marker(marker_str) # Return a sorted, de-duped tuple of extras combined_ireq.extras = tuple( sorted(set(tuple(combined_ireq.extras) + tuple(ireq.extras))) -@@ -335,10 +342,19 @@ class Resolver(object): +@@ -335,10 +350,19 @@ class Resolver(object): Editable requirements will never be looked up, as they may have changed at any time. """ @@ -580,7 +595,7 @@ index fc53f18..8fd8a57 100644 elif not is_pinned_requirement(ireq): raise TypeError( "Expected pinned or editable requirement, got {}".format(ireq) -@@ -356,7 +372,7 @@ class Resolver(object): +@@ -356,7 +380,7 @@ class Resolver(object): fg="yellow", ) dependencies = self.repository.get_dependencies(ireq) @@ -589,7 +604,7 @@ index fc53f18..8fd8a57 100644 # Example: ['Werkzeug>=0.9', 'Jinja2>=2.4'] dependency_strings = self.dependency_cache[ireq] -@@ -372,7 +388,8 @@ class Resolver(object): +@@ -372,7 +396,8 @@ class Resolver(object): ) def reverse_dependencies(self, ireqs): From 5e68bcab8c0785fd2eb74956b2bb82e5161a481c Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Thu, 9 Apr 2020 15:42:48 -0400 Subject: [PATCH 38/49] Allow github tests to finish even if one fails, tweak windows tests Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- .azure-pipelines/steps/run-tests-windows.yml | 5 ++--- .github/workflows/ci.yaml | 3 ++- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.azure-pipelines/steps/run-tests-windows.yml b/.azure-pipelines/steps/run-tests-windows.yml index b50917c099..275beeebf3 100644 --- a/.azure-pipelines/steps/run-tests-windows.yml +++ b/.azure-pipelines/steps/run-tests-windows.yml @@ -43,15 +43,14 @@ steps: parameters: python_version: ${{ parameters.python_version }} - - powershell: | + - shell: | git submodule sync git submodule update --init --recursive - Write-Host "Running Command: pipenv run pytest -n 4 --junitxml=junit/test-results.xml tests/" - $env:TEMP = "R:\" pipenv run pytest -ra -n 4 --junit-xml=junit/test-results.xml tests/ failOnStderr: false displayName: Run integration tests env: + TEMP: 'R:\' PYTHONWARNINGS: 'ignore:DEPRECATION' PIPENV_NOSPIN: 1 GIT_SSH_COMMAND: ssh -o StrictHostKeyChecking=accept-new -o CheckHostIP=no diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 2b8ca39a42..8070b0fccc 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -10,6 +10,7 @@ jobs: matrix: python-version: [2.7, 3.6, 3.7, 3.8] os: [macOS-latest, ubuntu-latest, windows-latest] + fail-fast: false steps: - uses: actions/checkout@v1 @@ -50,4 +51,4 @@ jobs: PYTHONIOENCODING: 'utf-8' GIT_SSH_COMMAND: ssh -o StrictHostKeyChecking=accept-new -o CheckHostIP=no run: | - pipenv run pytest -ra -n 4 --timeout 300 tests + pipenv run pytest -ra -n 4 tests From 4a656b3e5bddbb15b3fecae00e8e5c3b0b7a2f79 Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Thu, 9 Apr 2020 18:58:17 -0400 Subject: [PATCH 39/49] Rewrite egg-links on failure to use expanded paths - only fix egg links in the project environment Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- pipenv/core.py | 31 +++++++++++++++++++++---------- pipenv/environment.py | 22 ++++++++++++++++++++++ 2 files changed, 43 insertions(+), 10 deletions(-) diff --git a/pipenv/core.py b/pipenv/core.py index b54f190676..7f4b913492 100644 --- a/pipenv/core.py +++ b/pipenv/core.py @@ -675,7 +675,17 @@ def _cleanup_procs(procs, failed_deps_queue, retry=True): click.echo(crayons.blue(c.out.strip() or c.err.strip())) # The Installation failed… if failed: - if not retry: + if "does not match installed location" in c.err: + project.environment.expand_egg_links() + click.echo("{0}".format( + crayons.yellow( + "Failed initial installation: Failed to overwrite existing " + "package, likely due to path aliasing. Expanding and trying " + "again!" + ) + )) + dep = c.dep.copy() + elif not retry: # The Installation failed… # We echo both c.out and c.err because pip returns error details on out. err = c.err.strip().splitlines() if c.err else [] @@ -683,16 +693,17 @@ def _cleanup_procs(procs, failed_deps_queue, retry=True): err_lines = [line for message in [out, err] for line in message] # Return the subprocess' return code. raise exceptions.InstallError(c.dep.name, extra=err_lines) + else: + # Alert the user. + dep = c.dep.copy() + click.echo( + "{0} {1}! Will try again.".format( + crayons.red("An error occurred while installing"), + crayons.green(dep.as_line()), + ), err=True + ) # Save the Failed Dependency for later. - dep = c.dep.copy() failed_deps_queue.put(dep) - # Alert the user. - click.echo( - "{0} {1}! Will try again.".format( - crayons.red("An error occurred while installing"), - crayons.green(dep.as_line()), - ), err=True - ) def batch_install(deps_list, procs, failed_deps_queue, @@ -757,7 +768,7 @@ def batch_install(deps_list, procs, failed_deps_queue, pypi_mirror=pypi_mirror, trusted_hosts=trusted_hosts, extra_indexes=extra_indexes, - use_pep517=not failed, + use_pep517=True, ) c.dep = dep # if dep.is_vcs or dep.editable: diff --git a/pipenv/environment.py b/pipenv/environment.py index d472e3455e..6b267810f0 100644 --- a/pipenv/environment.py +++ b/pipenv/environment.py @@ -468,6 +468,28 @@ def pip_version(self): return parse_version(pip.version) return parse_version("19.3") + def expand_egg_links(self): + """ + Expand paths specified in egg-link files to prevent pip errors during + reinstall + """ + prefixes = [ + vistir.compat.Path(prefix) + for prefix in self.base_paths["libdirs"].split(os.pathsep) + if _normalized(prefix).startswith(_normalized(self.prefix.as_posix())) + ] + for loc in prefixes: + if not loc.exists(): + continue + for pth in loc.iterdir(): + if not pth.suffix == ".egg-link": + continue + contents = [ + vistir.path.normalize_path(line.strip()) + for line in pth.read_text().splitlines() + ] + pth.write_text("\n".join(contents)) + def get_distributions(self): """ Retrives the distributions installed on the library path of the environment From 520e9be5b0a8d621cf4a08e58675b98c0363ca80 Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Thu, 9 Apr 2020 18:58:40 -0400 Subject: [PATCH 40/49] adjust virtualenv installation order - Fix azure pipelines script Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- .azure-pipelines/steps/run-tests-windows.yml | 2 +- .github/workflows/ci.yaml | 4 ++-- pipenv/core.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.azure-pipelines/steps/run-tests-windows.yml b/.azure-pipelines/steps/run-tests-windows.yml index 275beeebf3..34c4f772ac 100644 --- a/.azure-pipelines/steps/run-tests-windows.yml +++ b/.azure-pipelines/steps/run-tests-windows.yml @@ -43,7 +43,7 @@ steps: parameters: python_version: ${{ parameters.python_version }} - - shell: | + - script: | git submodule sync git submodule update --init --recursive pipenv run pytest -ra -n 4 --junit-xml=junit/test-results.xml tests/ diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 8070b0fccc..b9f641a82b 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -27,7 +27,7 @@ jobs: - name: Install latest pip, setuptools, wheel run: | - python -m pip install --upgrade pip setuptools wheel virtualenv --upgrade-strategy=eager + python -m pip install --upgrade pip setuptools wheel --upgrade-strategy=eager - name: Install dependencies env: PIPENV_DEFAULT_PYTHON_VERSION: ${{ matrix.python-version }} @@ -37,8 +37,8 @@ jobs: run: | git submodule sync git submodule update --init --recursive - python -m pip install -e . --upgrade python -m pip install "virtualenv<20" + python -m pip install -e . --upgrade pipenv install --deploy --dev --python=${{ steps.python-path.outputs.path }} - name: Run tests env: diff --git a/pipenv/core.py b/pipenv/core.py index 7f4b913492..fffbfb6b3c 100644 --- a/pipenv/core.py +++ b/pipenv/core.py @@ -768,7 +768,7 @@ def batch_install(deps_list, procs, failed_deps_queue, pypi_mirror=pypi_mirror, trusted_hosts=trusted_hosts, extra_indexes=extra_indexes, - use_pep517=True, + use_pep517=not failed, ) c.dep = dep # if dep.is_vcs or dep.editable: From 8cf5ee5b2b58d18f0cdf975e04a1fe23041cdf69 Mon Sep 17 00:00:00 2001 From: Pradyun Gedam <pradyunsg@gmail.com> Date: Fri, 10 Apr 2020 18:23:45 +0530 Subject: [PATCH 41/49] GA: Don't run on non-master branches --- .github/workflows/ci.yaml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index b9f641a82b..259cea45f0 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -1,6 +1,10 @@ name: pipenv_ci -on: [push, pull_request] +on: + push: + branches: + - master + pull_request: jobs: build: From ab3e80a7d5bcb39b9779b69c2c990c683a303b17 Mon Sep 17 00:00:00 2001 From: Pradyun Gedam <pradyunsg@gmail.com> Date: Fri, 10 Apr 2020 18:44:22 +0530 Subject: [PATCH 42/49] AP: Remove Linux job --- azure-pipelines.yml | 23 ----------------------- 1 file changed, 23 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index c7943d4a6e..8827786248 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -24,29 +24,6 @@ variables: - group: CI jobs: -- job: TestLinux - pool: - vmImage: 'Ubuntu-latest' - strategy: - matrix: - Python27: - python.version: '2.7' - python.architecture: x64 - Python36: - python.version: '3.6' - python.architecture: x64 - Python37: - python.version: '3.7' - python.architecture: x64 - Python38: - python.version: '3.8' - python.architecture: x64 - maxParallel: 8 - steps: - - template: .azure-pipelines/steps/run-tests.yml - parameters: - vmImage: 'Ubuntu-latest' - - job: TestVendoring pool: vmImage: 'Ubuntu-latest' From 8807630db3d86a63e8fa1e5ffb7c96538d0e7afd Mon Sep 17 00:00:00 2001 From: Pradyun Gedam <pradyunsg@gmail.com> Date: Fri, 10 Apr 2020 18:44:29 +0530 Subject: [PATCH 43/49] AP: Re-add Linux job Done for a cleaner diff when moving this block. --- azure-pipelines.yml | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 8827786248..9876654992 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -46,6 +46,29 @@ jobs: parameters: vmImage: 'Ubuntu-latest' +- job: TestLinux + pool: + vmImage: 'Ubuntu-latest' + strategy: + matrix: + "2.7": + python.version: '2.7' + python.architecture: x64 + "3.6": + python.version: '3.6' + python.architecture: x64 + "3.7": + python.version: '3.7' + python.architecture: x64 + "3.8": + python.version: '3.8' + python.architecture: x64 + maxParallel: 8 + steps: + - template: .azure-pipelines/steps/run-tests.yml + parameters: + vmImage: 'Ubuntu-latest' + - job: TestWindows timeoutInMinutes: 0 pool: From d668af0d27b8499debb725e4505442b9b14b4852 Mon Sep 17 00:00:00 2001 From: Pradyun Gedam <pradyunsg@gmail.com> Date: Fri, 10 Apr 2020 17:17:09 +0530 Subject: [PATCH 44/49] GA: Nicer naming! --- .github/workflows/ci.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 259cea45f0..09be266d26 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -1,4 +1,4 @@ -name: pipenv_ci +name: CI on: push: @@ -8,13 +8,13 @@ on: jobs: build: - name: pipenv CI python ${{ matrix.python-version }} on ${{matrix.os}} - runs-on: ${{ matrix.os }} + name: ${{matrix.os}} / ${{ matrix.python-version }} + runs-on: ${{ matrix.os }}-latest strategy: + fail-fast: false matrix: python-version: [2.7, 3.6, 3.7, 3.8] - os: [macOS-latest, ubuntu-latest, windows-latest] - fail-fast: false + os: [MacOS, Ubuntu, Windows] steps: - uses: actions/checkout@v1 From d75cc6e8b3b5b40e21bc34b67fed3fef061d515f Mon Sep 17 00:00:00 2001 From: Pradyun Gedam <pradyunsg@gmail.com> Date: Fri, 10 Apr 2020 18:34:00 +0530 Subject: [PATCH 45/49] AP: Nicer names! This also needs renaming the pipeline on Azure Pipeline's Web UI --- azure-pipelines.yml | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 9876654992..021b4200ba 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -1,4 +1,4 @@ -name: Pipenv Build Rules +name: CI trigger: batch: true branches: @@ -25,6 +25,7 @@ variables: jobs: - job: TestVendoring + displayName: Vendoring pool: vmImage: 'Ubuntu-latest' variables: @@ -36,6 +37,7 @@ jobs: vmImage: 'Ubuntu-latest' - job: TestPackaging + displayName: Packaging pool: vmImage: 'Ubuntu-latest' variables: @@ -47,6 +49,7 @@ jobs: vmImage: 'Ubuntu-latest' - job: TestLinux + displayName: Linux / pool: vmImage: 'Ubuntu-latest' strategy: @@ -70,21 +73,22 @@ jobs: vmImage: 'Ubuntu-latest' - job: TestWindows + displayName: Windows / timeoutInMinutes: 0 pool: vmImage: windows-latest strategy: matrix: - Python27: + "2.7": python.version: '2.7' python.architecture: x64 - Python36: + "3.6": python.version: '3.6' python.architecture: x64 - Python37: + "3.7": python.version: '3.7' python.architecture: x64 - Python38: + "3.8": python.version: '3.8' python.architecture: x64 maxParallel: 8 @@ -94,20 +98,21 @@ jobs: vmImage: windows-latest - job: TestMacOS + displayName: MacOS / pool: vmImage: macOS-latest strategy: matrix: - Python27: + "2.7": python.version: '2.7' python.architecture: x64 - Python36: + "3.6": python.version: '3.6' python.architecture: x64 - Python37: + "3.7": python.version: '3.7' python.architecture: x64 - Python38: + "3.8": python.version: '3.8' python.architecture: x64 maxParallel: 8 From 2eb3d5b7ced8e367236f79fe135d1538ff7179fe Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Fri, 10 Apr 2020 12:08:29 -0400 Subject: [PATCH 46/49] Fix missing normalization import Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- pipenv/environment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipenv/environment.py b/pipenv/environment.py index 6b267810f0..65e7188da4 100644 --- a/pipenv/environment.py +++ b/pipenv/environment.py @@ -476,7 +476,7 @@ def expand_egg_links(self): prefixes = [ vistir.compat.Path(prefix) for prefix in self.base_paths["libdirs"].split(os.pathsep) - if _normalized(prefix).startswith(_normalized(self.prefix.as_posix())) + if vistir.path.is_in_path(prefix, self.prefix.as_posix()) ] for loc in prefixes: if not loc.exists(): From 88fe8504822d30488b6e93708f156d1addebd833 Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Fri, 10 Apr 2020 17:45:13 -0400 Subject: [PATCH 47/49] Fix monkeypatch warnings on python 2.7 Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- tests/unit/test_core.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/unit/test_core.py b/tests/unit/test_core.py index 663cd46d3f..8eca202cfe 100644 --- a/tests/unit/test_core.py +++ b/tests/unit/test_core.py @@ -31,8 +31,8 @@ def test_load_dot_env_from_environment_variable_location(monkeypatch, capsys): with open(dotenv_path, 'w') as f: f.write('{}={}'.format(key, val)) - m.setenv("PIPENV_DOTENV_LOCATION", dotenv_path) - m.setattr("pipenv.environments.PIPENV_DOTENV_LOCATION", dotenv_path) + m.setenv("PIPENV_DOTENV_LOCATION", str(dotenv_path)) + m.setattr("pipenv.environments.PIPENV_DOTENV_LOCATION", str(dotenv_path)) load_dot_env() assert os.environ[key] == val @@ -49,8 +49,8 @@ def test_doesnt_load_dot_env_if_disabled(monkeypatch, capsys): with open(dotenv_path, 'w') as f: f.write('{}={}'.format(key, val)) - m.setenv("PIPENV_DOTENV_LOCATION", dotenv_path) - m.setattr("pipenv.environments.PIPENV_DOTENV_LOCATION", dotenv_path) + m.setenv("PIPENV_DOTENV_LOCATION", str(dotenv_path)) + m.setattr("pipenv.environments.PIPENV_DOTENV_LOCATION", str(dotenv_path)) m.setattr("pipenv.environments.PIPENV_DONT_LOAD_ENV", True) load_dot_env() assert key not in os.environ @@ -67,8 +67,8 @@ def test_load_dot_env_warns_if_file_doesnt_exist(monkeypatch, capsys): is_console = False m.setattr(click._winconsole, "_is_console", lambda x: is_console) dotenv_path = os.path.join(tempdir.name, 'does-not-exist.env') - m.setenv("PIPENV_DOTENV_LOCATION", dotenv_path) - m.setattr("pipenv.environments.PIPENV_DOTENV_LOCATION", dotenv_path) + m.setenv("PIPENV_DOTENV_LOCATION", str(dotenv_path)) + m.setattr("pipenv.environments.PIPENV_DOTENV_LOCATION", str(dotenv_path)) load_dot_env() output, err = capsys.readouterr() assert 'Warning' in err From b493c7784eda91a8d6cf539140713c28bda840db Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Fri, 10 Apr 2020 17:48:56 -0400 Subject: [PATCH 48/49] Only include first two requires_python markers Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- tests/pytest-pypi/pytest_pypi/app.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/pytest-pypi/pytest_pypi/app.py b/tests/pytest-pypi/pytest_pypi/app.py index 5b1e693ee2..6f829819ba 100644 --- a/tests/pytest-pypi/pytest_pypi/app.py +++ b/tests/pytest-pypi/pytest_pypi/app.py @@ -90,6 +90,8 @@ def add_release(self, path_to_binary): pkg = distlib.wheel.Wheel(path_to_binary) md_dict = pkg.metadata.todict() requires_python = md_dict.get("requires_python", "") + if requires_python.count(".") > 1: + requires_python, _, _ = requires_python.rpartition(".") self.releases[release] = ReleaseTuple(path_to_binary, requires_python) self._package_dirs.add(ReleaseTuple(path, requires_python)) From 8eddee63d8c9f929bd130ce31f6561055f244148 Mon Sep 17 00:00:00 2001 From: Dan Ryan <dan.ryan@canonical.com> Date: Fri, 10 Apr 2020 18:21:28 -0400 Subject: [PATCH 49/49] Fix incorrect marker in test Signed-off-by: Dan Ryan <dan.ryan@canonical.com> --- tests/integration/test_install_markers.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/integration/test_install_markers.py b/tests/integration/test_install_markers.py index 80532ddf22..c755395239 100644 --- a/tests/integration/test_install_markers.py +++ b/tests/integration/test_install_markers.py @@ -148,7 +148,11 @@ def test_resolver_unique_markers(PipenvInstance): yarl = p.lockfile['default']['yarl'] assert 'markers' in yarl # Two possible marker sets are ok here - assert yarl['markers'] in ["python_version in '3.4, 3.5, 3.6'", "python_version >= '3.4'"] + assert yarl['markers'] in [ + "python_version in '3.4, 3.5, 3.6'", + "python_version >= '3.4'", + "python_version >= '3.5'", # yarl 1.3.0 requires python 3.5.3 + ] @flaky