From 552d1274eacbd04c61f46f639bd967d294f25e6a Mon Sep 17 00:00:00 2001 From: Dan Ryan Date: Sun, 17 Feb 2019 13:39:04 -0500 Subject: [PATCH] Merge with updates Signed-off-by: Dan Ryan --- .gitignore | 5 + pipenv/environment.py | 17 +- pipenv/environments.py | 32 +- pipenv/utils.py | 665 +++-- pipenv/vendor/cached_property.py | 2 +- pipenv/vendor/certifi/__init__.py | 4 +- pipenv/vendor/certifi/cacert.pem | 242 ++ pipenv/vendor/certifi/core.py | 17 - pipenv/vendor/colorama/LICENSE.txt | 1 - pipenv/vendor/colorama/__init__.py | 3 +- pipenv/vendor/colorama/ansitowin32.py | 43 +- pipenv/vendor/colorama/initialise.py | 2 - pipenv/vendor/colorama/win32.py | 18 +- pipenv/vendor/colorama/winterm.py | 11 +- pipenv/vendor/dotenv/compat.py | 5 + pipenv/vendor/dotenv/environ.py | 54 + pipenv/vendor/dotenv/main.py | 212 +- pipenv/vendor/dotenv/version.py | 2 +- pipenv/vendor/idna/__init__.py | 0 pipenv/vendor/idna/codec.py | 0 pipenv/vendor/idna/compat.py | 0 pipenv/vendor/idna/core.py | 5 +- pipenv/vendor/idna/idnadata.py | 122 +- pipenv/vendor/idna/intranges.py | 0 pipenv/vendor/idna/package_data.py | 2 +- pipenv/vendor/idna/uts46data.py | 660 ++--- pipenv/vendor/packaging/__about__.py | 14 +- pipenv/vendor/packaging/__init__.py | 20 +- pipenv/vendor/packaging/_compat.py | 7 +- pipenv/vendor/packaging/_structures.py | 2 - pipenv/vendor/packaging/markers.py | 89 +- pipenv/vendor/packaging/requirements.py | 34 +- pipenv/vendor/packaging/specifiers.py | 67 +- pipenv/vendor/packaging/utils.py | 8 +- pipenv/vendor/packaging/version.py | 49 +- pipenv/vendor/passa/cli/options.py | 4 +- pipenv/vendor/pep517/LICENSE | 21 + pipenv/vendor/pep517/__init__.py | 4 + pipenv/vendor/pep517/_in_process.py | 207 ++ pipenv/vendor/pep517/build.py | 108 + pipenv/vendor/pep517/check.py | 202 ++ pipenv/vendor/pep517/colorlog.py | 115 + pipenv/vendor/pep517/compat.py | 23 + pipenv/vendor/pep517/envbuild.py | 158 ++ pipenv/vendor/pep517/wrappers.py | 163 ++ pipenv/vendor/pipdeptree.py | 2 +- pipenv/vendor/pyparsing.py | 2313 +++++++++++------ pipenv/vendor/pythonfinder/__init__.py | 4 +- pipenv/vendor/pythonfinder/__main__.py | 7 +- pipenv/vendor/pythonfinder/cli.py | 22 +- pipenv/vendor/pythonfinder/environment.py | 12 +- pipenv/vendor/pythonfinder/exceptions.py | 2 +- pipenv/vendor/pythonfinder/models/mixins.py | 319 ++- pipenv/vendor/pythonfinder/models/path.py | 457 ++-- pipenv/vendor/pythonfinder/models/python.py | 516 ++-- pipenv/vendor/pythonfinder/models/windows.py | 118 +- pipenv/vendor/pythonfinder/pythonfinder.py | 197 +- pipenv/vendor/pythonfinder/utils.py | 89 +- pipenv/vendor/pytoml/__init__.py | 4 + pipenv/vendor/pytoml/core.py | 13 + pipenv/vendor/pytoml/parser.py | 341 +++ pipenv/vendor/pytoml/test.py | 30 + pipenv/vendor/pytoml/utils.py | 67 + pipenv/vendor/pytoml/writer.py | 106 + pipenv/vendor/requests/__version__.py | 4 +- pipenv/vendor/requests/models.py | 2 +- pipenv/vendor/requirementslib/__init__.py | 3 +- .../vendor/requirementslib/models/lockfile.py | 2 +- .../vendor/requirementslib/models/pipfile.py | 63 +- .../vendor/requirementslib/models/project.py | 2 +- .../requirementslib/models/requirements.py | 2205 +++++++++++++--- .../requirementslib/models/setup_info.py | 856 ++++-- pipenv/vendor/requirementslib/models/utils.py | 375 ++- pipenv/vendor/requirementslib/models/vcs.py | 40 +- pipenv/vendor/requirementslib/utils.py | 156 +- pipenv/vendor/shellingham/__init__.py | 2 +- pipenv/vendor/shellingham/nt.py | 13 +- pipenv/vendor/shellingham/posix.py | 2 +- pipenv/vendor/shellingham/posix/_default.py | 27 + pipenv/vendor/shellingham/posix/_proc.py | 34 +- pipenv/vendor/shellingham/posix/_ps.py | 4 +- pipenv/vendor/shellingham/posix/linux.py | 35 + pipenv/vendor/shellingham/posix/ps.py | 11 +- pipenv/vendor/six.LICENSE | 2 +- pipenv/vendor/six.py | 65 +- pipenv/vendor/tomlkit/__init__.py | 2 +- pipenv/vendor/tomlkit/container.py | 15 + pipenv/vendor/tomlkit/items.py | 5 +- pipenv/vendor/tomlkit/source.py | 4 - pipenv/vendor/urllib3/__init__.py | 2 +- pipenv/vendor/urllib3/response.py | 8 +- pipenv/vendor/urllib3/util/ssl_.py | 2 + pipenv/vendor/vendor.txt | 35 +- pipenv/vendor/vistir/__init__.py | 13 +- pipenv/vendor/vistir/compat.py | 114 +- pipenv/vendor/vistir/contextmanagers.py | 58 +- pipenv/vendor/vistir/misc.py | 200 +- pipenv/vendor/vistir/path.py | 32 +- pipenv/vendor/vistir/spin.py | 170 +- 99 files changed, 9531 insertions(+), 3070 deletions(-) create mode 100644 pipenv/vendor/dotenv/environ.py mode change 100755 => 100644 pipenv/vendor/idna/__init__.py mode change 100755 => 100644 pipenv/vendor/idna/codec.py mode change 100755 => 100644 pipenv/vendor/idna/compat.py mode change 100755 => 100644 pipenv/vendor/idna/core.py mode change 100755 => 100644 pipenv/vendor/idna/idnadata.py mode change 100755 => 100644 pipenv/vendor/idna/intranges.py mode change 100755 => 100644 pipenv/vendor/idna/package_data.py mode change 100755 => 100644 pipenv/vendor/idna/uts46data.py create mode 100644 pipenv/vendor/pep517/LICENSE create mode 100644 pipenv/vendor/pep517/__init__.py create mode 100644 pipenv/vendor/pep517/_in_process.py create mode 100644 pipenv/vendor/pep517/build.py create mode 100644 pipenv/vendor/pep517/check.py create mode 100644 pipenv/vendor/pep517/colorlog.py create mode 100644 pipenv/vendor/pep517/compat.py create mode 100644 pipenv/vendor/pep517/envbuild.py create mode 100644 pipenv/vendor/pep517/wrappers.py create mode 100644 pipenv/vendor/pytoml/__init__.py create mode 100644 pipenv/vendor/pytoml/core.py create mode 100644 pipenv/vendor/pytoml/parser.py create mode 100644 pipenv/vendor/pytoml/test.py create mode 100644 pipenv/vendor/pytoml/utils.py create mode 100644 pipenv/vendor/pytoml/writer.py create mode 100644 pipenv/vendor/shellingham/posix/_default.py create mode 100644 pipenv/vendor/shellingham/posix/linux.py diff --git a/.gitignore b/.gitignore index 766ffe3a68..44abed16ae 100644 --- a/.gitignore +++ b/.gitignore @@ -153,3 +153,8 @@ venv.bak/ # Custom rules (everything added below won't be overriden by 'Generate .gitignore File' if you use 'Update' option) .vs/slnx.sqlite + +# mypy/typing section +typeshed/ +.dmypy.json +mypyhtml/ diff --git a/pipenv/environment.py b/pipenv/environment.py index e1b37cf2df..8521501f5e 100644 --- a/pipenv/environment.py +++ b/pipenv/environment.py @@ -163,7 +163,7 @@ def base_paths(self): paths["libdir"] = purelib paths["purelib"] = purelib paths["platlib"] = platlib - paths['PYTHONPATH'] = lib_dirs + paths['PYTHONPATH'] = os.pathsep.join(["", ".", lib_dirs]) paths["libdirs"] = lib_dirs return paths @@ -526,6 +526,7 @@ def activated(self, include_extras=True, extra_dists=None): vendor_dir = parent_path.joinpath("vendor").as_posix() patched_dir = parent_path.joinpath("patched").as_posix() parent_path = parent_path.as_posix() + self.add_dist("pip") prefix = self.prefix.as_posix() with vistir.contextmanagers.temp_environ(), vistir.contextmanagers.temp_path(): os.environ["PATH"] = os.pathsep.join([ @@ -535,12 +536,24 @@ def activated(self, include_extras=True, extra_dists=None): ]) os.environ["PYTHONIOENCODING"] = vistir.compat.fs_str("utf-8") os.environ["PYTHONDONTWRITEBYTECODE"] = vistir.compat.fs_str("1") - os.environ["PYTHONPATH"] = self.base_paths["PYTHONPATH"] + from .environments import PIPENV_USE_SYSTEM if self.is_venv: + os.environ["PYTHONPATH"] = self.base_paths["PYTHONPATH"] os.environ["VIRTUAL_ENV"] = vistir.compat.fs_str(prefix) + else: + if not PIPENV_USE_SYSTEM and not os.environ.get("VIRTUAL_ENV"): + os.environ["PYTHONPATH"] = self.base_paths["PYTHONPATH"] + os.environ.pop("PYTHONHOME", None) sys.path = self.sys_path sys.prefix = self.sys_prefix site.addsitedir(self.base_paths["purelib"]) + pip = self.safe_import("pip") + pip_vendor = self.safe_import("pip._vendor") + pep517_dir = os.path.join(os.path.dirname(pip_vendor.__file__), "pep517") + site.addsitedir(pep517_dir) + os.environ["PYTHONPATH"] = os.pathsep.join([ + os.environ.get("PYTHONPATH", self.base_paths["PYTHONPATH"]), pep517_dir + ]) if include_extras: site.addsitedir(parent_path) sys.path.extend([parent_path, patched_dir, vendor_dir]) diff --git a/pipenv/environments.py b/pipenv/environments.py index 445e3b63f4..f451062160 100644 --- a/pipenv/environments.py +++ b/pipenv/environments.py @@ -280,13 +280,37 @@ def is_quiet(threshold=-1): def is_in_virtualenv(): - pipenv_active = os.environ.get("PIPENV_ACTIVE") - virtual_env = os.environ.get("VIRTUAL_ENV") - return (PIPENV_USE_SYSTEM or virtual_env) and not ( - pipenv_active or PIPENV_IGNORE_VIRTUALENVS + """ + Check virtualenv membership dynamically + + :return: True or false depending on whether we are in a regular virtualenv or not + :rtype: bool + """ + + pipenv_active = os.environ.get("PIPENV_ACTIVE", False) + virtual_env = None + use_system = False + ignore_virtualenvs = bool(os.environ.get("PIPENV_IGNORE_VIRTUALENVS", False)) + + if not pipenv_active and not ignore_virtualenvs: + virtual_env = os.environ.get("VIRTUAL_ENV") + use_system = bool(virtual_env) + return (use_system or virtual_env) and not ( + pipenv_active or ignore_virtualenvs ) PIPENV_SPINNER_FAIL_TEXT = fix_utf8(u"✘ {0}") if not PIPENV_HIDE_EMOJIS else ("{0}") PIPENV_SPINNER_OK_TEXT = fix_utf8(u"✔ {0}") if not PIPENV_HIDE_EMOJIS else ("{0}") + + +def is_type_checking(): + try: + from typing import TYPE_CHECKING + except ImportError: + return False + return TYPE_CHECKING + + +MYPY_RUNNING = is_type_checking() diff --git a/pipenv/utils.py b/pipenv/utils.py index 5467300ca3..a44ae1662d 100644 --- a/pipenv/utils.py +++ b/pipenv/utils.py @@ -23,8 +23,7 @@ six.add_move(six.MovedAttribute("Set", "collections", "collections.abc")) # noqa from six.moves import Mapping, Sequence, Set from six.moves.urllib.parse import urlparse -from urllib3 import util as urllib3_util -from vistir.compat import ResourceWarning +from vistir.compat import ResourceWarning, lru_cache from vistir.misc import fs_str import crayons @@ -33,6 +32,7 @@ from . import environments from .exceptions import PipenvUsageError from .pep508checker import lookup +from .vendor.urllib3 import util as urllib3_util logging.basicConfig(level=logging.ERROR) @@ -44,6 +44,12 @@ requests_session = None +if environments.MYPY_RUNNING: + from typing import Tuple, Dict, Any, List, Union, Optional + from .vendor.requirementslib.models.requirements import Requirement, Line + from .project import Project + + def _get_requests_session(): """Load requests lazily.""" global requests_session @@ -228,30 +234,16 @@ def prepare_pip_source_args(sources, pip_args=None): return pip_args -def get_resolver_metadata(deps, index_lookup, markers_lookup, project, sources): - from .vendor.requirementslib.models.requirements import Requirement - constraints = [] - for dep in deps: - if not dep: - continue - url = None - indexes, trusted_hosts, remainder = parse_indexes(dep) - if indexes: - url = indexes[0] - dep = " ".join(remainder) - req = Requirement.from_line(dep) - constraints.append(req.constraint_line) - if url: - source = first( - s for s in sources if s.get("url") and url.startswith(s["url"])) - if source: - index_lookup[req.name] = source.get("name") - # strip the marker and re-add it later after resolution - # but we will need a fallback in case resolution fails - # eg pypiwin32 - if req.markers: - markers_lookup[req.name] = req.markers.replace('"', "'") - return constraints +@lru_cache() +def get_pipenv_sitedir(): + # type: () -> Optional[str] + import pkg_resources + site_dir = next( + iter(d for d in pkg_resources.working_set if d.key.lower() == "pipenv"), None + ) + if site_dir is not None: + return site_dir.location + return None class Resolver(object): @@ -267,7 +259,7 @@ def __init__(self, constraints, req_dir, project, sources, clear=False, pre=Fals self.resolved_tree = set() self.hashes = {} self.clear = clear - self.pre = pre + self.pre = bool(pre) self.results = None self._pip_args = None self._constraints = None @@ -286,7 +278,9 @@ def __repr__(self): "sources={self.sources})>".format(self=self) ) - def _get_pip_command(self): + @staticmethod + @lru_cache() + def _get_pip_command(): from pip_shims.shims import Command class PipCommand(Command): @@ -297,6 +291,129 @@ class PipCommand(Command): from pipenv.patched.piptools.scripts.compile import get_pip_command return get_pip_command() + @classmethod + def get_metadata( + cls, + deps, # type: List[str] + index_lookup, # type: Dict[str, str] + markers_lookup, # type: Dict[str, str] + project, # type: Project + sources # type: Dict[str, str] + ): + # type: (...) -> Tuple[Set[str], Dict[str, Dict[str, Union[str, bool, List[str]]]], Dict[str, str], Dict[str, str]] + constraints = set() # type: Set[str] + skipped = dict() # type: Dict[str, Dict[str, Union[str, bool, List[str]]]] + if index_lookup is None: + index_lookup = {} + if markers_lookup is None: + markers_lookup = {} + for dep in deps: + if not dep: + continue + req, req_idx, markers_idx = cls.parse_line( + dep, index_lookup=index_lookup, markers_lookup=markers_lookup, project=project + ) + index_lookup.update(req_idx) + markers_lookup.update(markers_idx) + constraint_update, lockfile_update = cls.get_deps_from_req(req) + constraints |= constraint_update + skipped.update(lockfile_update) + return constraints, skipped, index_lookup, markers_lookup + + @classmethod + def parse_line( + cls, + line, # type: str + index_lookup=None, # type: Dict[str, str] + markers_lookup=None, # type: Dict[str, str] + project=None # type: Optional[Project] + ): + # type: (...) -> Tuple[Requirement, Dict[str, str], Dict[str, str]] + from .vendor.requirementslib.models.requirements import Requirement + if index_lookup is None: + index_lookup = {} + if markers_lookup is None: + markers_lookup = {} + if project is None: + from .project import Project + project = Project() + url = None + indexes, trusted_hosts, remainder = parse_indexes(line) + if indexes: + url = indexes[0] + line = " ".join(remainder) + req = Requirement.from_line(line) + if url: + index_lookup[req.normalized_name] = project.get_source( + url=url, refresh=True).get("name") + # strip the marker and re-add it later after resolution + # but we will need a fallback in case resolution fails + # eg pypiwin32 + if req.markers: + markers_lookup[req.normalized_name] = req.markers.replace('"', "'") + return req, index_lookup, markers_lookup + + @classmethod + def get_deps_from_line(cls, line): + # type: (str) -> Tuple[Set[str], Dict[str, Dict[str, Union[str, bool, List[str]]]]] + req, _, _ = cls.parse_line(line) + return cls.get_deps_from_req(req) + + @classmethod + def get_deps_from_req(cls, req): + # type: (Requirement) -> Tuple[Set[str], Dict[str, Dict[str, Union[str, bool, List[str]]]]] + from requirementslib.models.utils import _requirement_to_str_lowercase_name + constraints = set() # type: Set[str] + locked_deps = dict() # type: Dict[str, Dict[str, Union[str, bool, List[str]]]] + if req.is_file_or_url or req.is_vcs and not req.is_wheel: + # for local packages with setup.py files and potential direct url deps: + if req.is_vcs: + req_list, lockfile = get_vcs_deps(reqs=[req]) + req = next(iter(req for req in req_list if req is not None), req_list) + entry = lockfile[pep423_name(req.normalized_name)] + else: + _, entry = req.pipfile_entry + parsed_line = req.req.parsed_line # type: Line + setup_info = None # type: Any + name = req.normalized_name + setup_info = req.req.setup_info + locked_deps[pep423_name(name)] = entry + requirements = [v for v in getattr(setup_info, "requires", {}).values()] + for r in requirements: + if getattr(r, "url", None) and not getattr(r, "editable", False): + if r is not None: + if not r.url: + continue + line = _requirement_to_str_lowercase_name(r) + new_req, _, _ = cls.parse_line(line) + new_constraints, new_lock = cls.get_deps_from_req(new_req) + locked_deps.update(new_lock) + constraints |= new_constraints + else: + if r is not None: + line = _requirement_to_str_lowercase_name(r) + constraints.add(line) + # ensure the top level entry remains as provided + # note that we shouldn't pin versions for editable vcs deps + if (not req.is_vcs or (req.is_vcs and not req.editable)): + if req.specifiers: + locked_deps[name]["version"] = req.specifiers + elif parsed_line.setup_info and parsed_line.setup_info.version: + locked_deps[name]["version"] = "=={}".format( + parsed_line.setup_info.version + ) + # if not req.is_vcs: + locked_deps.update({name: entry}) + if req.is_vcs and req.editable: + constraints.add(req.constraint_line) + if req.is_file_or_url and req.req.is_local and req.editable and ( + req.req.setup_path is not None and os.path.exists(req.req.setup_path)): + constraints.add(req.constraint_line) + else: + constraints.add(req.constraint_line) + return constraints, locked_deps + return constraints, locked_deps + @property def pip_command(self): if self._pip_command is None: @@ -418,6 +535,58 @@ def resolve(self): self.resolved_tree.update(results) return self.resolved_tree + @classmethod + def prepend_hash_types(cls, checksums): + cleaned_checksums = [] + for checksum in checksums: + if not checksum: + continue + if not checksum.startswith("sha256:"): + checksum = "sha256:{0}".format(checksum) + cleaned_checksums.append(checksum) + return cleaned_checksums + + def collect_hashes(self, ireq): + collected_hashes = [] + if ireq in self.hashes: + collected_hashes += list(self.hashes.get(ireq, [])) + if self._should_include_hash(ireq): + try: + hash_map = self.get_hash(ireq) + collected_hashes += list(hash_map) + except (ValueError, KeyError, IndexError, ConnectionError): + pass + elif any( + "python.org" in source["url"] or "pypi.org" in source["url"] + for source in self.sources + ): + pkg_url = "https://pypi.org/pypi/{0}/json".format(ireq.name) + session = _get_requests_session() + try: + # Grab the hashes from the new warehouse API. + r = session.get(pkg_url, timeout=10) + api_releases = r.json()["releases"] + cleaned_releases = {} + for api_version, api_info in api_releases.items(): + api_version = clean_pkg_version(api_version) + cleaned_releases[api_version] = api_info + version = "" + if ireq.specifier: + spec = next(iter(s for s in list(ireq.specifier._specs)), None) + if spec: + version = spec.version + for release in cleaned_releases[version]: + collected_hashes.append(release["digests"]["sha256"]) + collected_hashes = self.prepend_hash_types(collected_hashes) + except (ValueError, KeyError, ConnectionError): + if environments.is_verbose(): + click_echo( + "{0}: Error generating hash for {1}".format( + crayons.red("Warning", bold=True), ireq.name + ), err=True + ) + return collected_hashes + @staticmethod def _should_include_hash(ireq): from pipenv.vendor.vistir.compat import Path, to_native_string @@ -442,31 +611,43 @@ def _should_include_hash(ireq): return False return True + def get_hash(self, ireq, ireq_hashes=None): + """ + Retrieve hashes for a specific ``InstallRequirement`` instance. + + :param ireq: An ``InstallRequirement`` to retrieve hashes for + :type ireq: :class:`~pip_shims.InstallRequirement` + :return: A set of hashes. + :rtype: Set + """ + + # We _ALWAYS MUST PRIORITIZE_ the inclusion of hashes from local sources + # PLEASE *DO NOT MODIFY THIS* TO CHECK WHETHER AN IREQ ALREADY HAS A HASH + # RESOLVED. The resolver will pull hashes from PyPI and only from PyPI. + # The entire purpose of this approach is to include missing hashes. + # This fixes a race condition in resolution for missing dependency caches + # see pypa/pipenv#3289 + if self._should_include_hash(ireq) and ( + not ireq_hashes or ireq.link.scheme == "file" + ): + if not ireq_hashes: + ireq_hashes = set() + new_hashes = self.resolver.repository._hash_cache.get_hash(ireq.link) + ireq_hashes = add_to_set(ireq_hashes, new_hashes) + else: + ireq_hashes = set(ireq_hashes) + # The _ONLY CASE_ where we flat out set the value is if it isn't present + # It's a set, so otherwise we *always* need to do a union update + if ireq not in self.hashes: + return ireq_hashes + else: + return self.hashes[ireq] | ireq_hashes + def resolve_hashes(self): if self.results is not None: resolved_hashes = self.resolver.resolve_hashes(self.results) for ireq, ireq_hashes in resolved_hashes.items(): - # We _ALWAYS MUST PRIORITIZE_ the inclusion of hashes from local sources - # PLEASE *DO NOT MODIFY THIS* TO CHECK WHETHER AN IREQ ALREADY HAS A HASH - # RESOLVED. The resolver will pull hashes from PyPI and only from PyPI. - # The entire purpose of this approach is to include missing hashes. - # This fixes a race condition in resolution for missing dependency caches - # see pypa/pipenv#3289 - if self._should_include_hash(ireq) and ( - not ireq_hashes or ireq.link.scheme == "file" - ): - if not ireq_hashes: - ireq_hashes = set() - new_hashes = self.resolver.repository._hash_cache.get_hash(ireq.link) - add_to_set(ireq_hashes, new_hashes) - else: - ireq_hashes = set(ireq_hashes) - # The _ONLY CASE_ where we flat out set the value is if it isn't present - # It's a set, so otherwise we *always* need to do a union update - if ireq not in self.hashes: - self.hashes[ireq] = ireq_hashes - else: - self.hashes[ireq] |= ireq_hashes + self.hashes[ireq] = self.get_hash(ireq, ireq_hashes=ireq_hashes) return self.hashes @@ -487,23 +668,87 @@ def actually_resolve_deps( req_dir=None, ): from pipenv.vendor.vistir.path import create_tracked_tempdir + from pipenv.vendor.requirementslib.models.requirements import Requirement if not req_dir: req_dir = create_tracked_tempdir(suffix="-requirements", prefix="pipenv-") warning_list = [] with warnings.catch_warnings(record=True) as warning_list: - constraints = get_resolver_metadata( + constraints, skipped, index_lookup, markers_lookup = Resolver.get_metadata( deps, index_lookup, markers_lookup, project, sources, ) resolver = Resolver(constraints, req_dir, project, sources, clear=clear, pre=pre) resolved_tree = resolver.resolve() hashes = resolver.resolve_hashes() - + reqs = [(Requirement.from_ireq(ireq), ireq) for ireq in resolved_tree] + results = {} + for req, ireq in reqs: + if (req.vcs and req.editable and not req.is_direct_url): + continue + collected_hashes = resolver.collect_hashes(ireq) + if collected_hashes: + req = req.add_hashes(collected_hashes) + elif resolver._should_include_hash(ireq): + existing_hashes = hashes.get(ireq, set()) + discovered_hashes = existing_hashes | resolver.get_hash(ireq) + if discovered_hashes: + req = req.add_hashes(discovered_hashes) + resolver.hashes[ireq] = discovered_hashes + if req.specifiers: + version = str(req.get_version()) + else: + version = None + index = index_lookup.get(req.normalized_name) + markers = markers_lookup.get(req.normalized_name) + req.index = index + name, pf_entry = req.pipfile_entry + name = pep423_name(req.name) + entry = {} + if isinstance(pf_entry, six.string_types): + entry["version"] = pf_entry.lstrip("=") + else: + entry.update(pf_entry) + if version is not None: + entry["version"] = version + if req.line_instance.is_direct_url: + entry["file"] = req.req.uri + if collected_hashes: + entry["hashes"] = sorted(set(collected_hashes)) + entry["name"] = name + if index: # and index != next(iter(project.sources), {}).get("name"): + entry.update({"index": index}) + if markers: + entry.update({"markers": markers}) + entry = translate_markers(entry) + if name in results: + results[name].update(entry) + else: + results[name] = entry + for k in list(skipped.keys()): + req = Requirement.from_pipfile(k, skipped[k]) + ref = None + if req.is_vcs: + ref = req.commit_hash + ireq = req.as_ireq() + entry = skipped[k].copy() + entry["name"] = req.name + ref = ref if ref is not None else entry.get("ref") + if ref: + entry["ref"] = ref + if resolver._should_include_hash(ireq): + collected_hashes = resolver.collect_hashes(ireq) + if collected_hashes: + entry["hashes"] = sorted(set(collected_hashes)) + if k in results: + results[k].update(entry) + else: + results[k] = entry + results = list(results.values()) for warning in warning_list: _show_warning(warning.message, warning.category, warning.filename, warning.lineno, warning.line) - return (resolved_tree, hashes, markers_lookup, resolver) + return (results, hashes, markers_lookup, resolver, skipped) @contextlib.contextmanager @@ -560,7 +805,7 @@ def resolve(cmd, sp): return c -def get_locked_dep(dep, pipfile_section, prefer_pipfile=False): +def get_locked_dep(dep, pipfile_section, prefer_pipfile=True): # the prefer pipfile flag is not used yet, but we are introducing # it now for development purposes # TODO: Is this implementation clear? How can it be improved? @@ -570,8 +815,11 @@ def get_locked_dep(dep, pipfile_section, prefer_pipfile=False): "pipfile_entry": None } if isinstance(dep, Mapping) and dep.get("name", ""): - name_options = [dep["name"], pep423_name(dep["name"])] - name = next(iter(k for k in name_options if k in pipfile_section), None) + dep_name = pep423_name(dep["name"]) + name = next(iter( + k for k in pipfile_section.keys() + if pep423_name(k) == dep_name + ), None) entry = pipfile_section[name] if name else None if entry: @@ -581,24 +829,33 @@ def get_locked_dep(dep, pipfile_section, prefer_pipfile=False): version = entry.get("version", "") if entry else "" else: version = entry if entry else "" - lockfile_version = lockfile_entry.get("version", "") + lockfile_name, lockfile_dict = lockfile_entry.copy().popitem() + lockfile_version = lockfile_dict.get("version", "") # Keep pins from the lockfile if prefer_pipfile and lockfile_version != version and version.startswith("=="): - lockfile_version = version + lockfile_dict["version"] = version + lockfile_entry[lockfile_name] = lockfile_dict return lockfile_entry def prepare_lockfile(results, pipfile, lockfile): - from .vendor.requirementslib.utils import is_vcs + # from .vendor.requirementslib.utils import is_vcs for dep in results: + if not dep: + continue # Merge in any relevant information from the pipfile entry, including # markers, normalized names, URL info, etc that we may have dropped during lock - if not is_vcs(dep): - lockfile_entry = get_locked_dep(dep, pipfile) - name = next(iter(k for k in lockfile_entry.keys())) - current_entry = lockfile.get(name) - if not current_entry or not is_vcs(current_entry): - lockfile.update(lockfile_entry) + # if not is_vcs(dep): + lockfile_entry = get_locked_dep(dep, pipfile) + name = next(iter(k for k in lockfile_entry.keys())) + current_entry = lockfile.get(name) + if current_entry: + if not isinstance(current_entry, Mapping): + lockfile[name] = lockfile_entry[name] + else: + lockfile[name].update(lockfile_entry[name]) + else: + lockfile[name] = lockfile_entry[name] return lockfile @@ -612,42 +869,59 @@ def venv_resolve_deps( pypi_mirror=None, dev=False, pipfile=None, - lockfile=None, - keep_outdated=False + lockfile=None ): + """ + Resolve dependencies for a pipenv project, acts as a portal to the target environment. + + Regardless of whether a virtual environment is present or not, this will spawn + a subproces which is isolated to the target environment and which will perform + dependency resolution. This function reads the output of that call and mutates + the provided lockfile accordingly, returning nothing. + + :param List[:class:`~requirementslib.Requirement`] deps: A list of dependencies to resolve. + :param Callable which: [description] + :param project: The pipenv Project instance to use during resolution + :param Optional[bool] pre: Whether to resolve pre-release candidates, defaults to False + :param Optional[bool] clear: Whether to clear the cache during resolution, defaults to False + :param Optional[bool] allow_global: Whether to use *sys.executable* as the python binary, defaults to False + :param Optional[str] pypi_mirror: A URL to substitute any time *pypi.org* is encountered, defaults to None + :param Optional[bool] dev: Whether to target *dev-packages* or not, defaults to False + :param pipfile: A Pipfile section to operate on, defaults to None + :type pipfile: Optional[Dict[str, Union[str, Dict[str, bool, List[str]]]]] + :param Dict[str, Any] lockfile: A project lockfile to mutate, defaults to None + :raises RuntimeError: Raised on resolution failure + :return: Nothing + :rtype: None + """ + from .vendor.vistir.misc import fs_str - from .vendor.vistir.compat import Path, to_native_string, JSONDecodeError + from .vendor.vistir.compat import Path, JSONDecodeError from .vendor.vistir.path import create_tracked_tempdir from . import resolver import json - vcs_deps = [] - vcs_lockfile = {} results = [] - pipfile_section = "dev_packages" if dev else "packages" + pipfile_section = "dev-packages" if dev else "packages" lockfile_section = "develop" if dev else "default" - vcs_section = "vcs_{0}".format(pipfile_section) - vcs_deps = getattr(project, vcs_section, {}) - if not deps and not vcs_deps: - return {} + if not deps: + if not project.pipfile_exists: + return None + # This is a requirementslib pipfile instance which provides `Requirement` instances + # rather than simply locked dependencies in a lockfile format + deps = convert_deps_to_pip( + project.parsed_pipfile.get(pipfile_section, {}), project=project, + r=False, include_index=True + ) + if not deps: + return None if not pipfile: - pipfile = getattr(project, pipfile_section, None) + pipfile = getattr(project, pipfile_section, {}) if not lockfile: lockfile = project._lockfile req_dir = create_tracked_tempdir(prefix="pipenv", suffix="requirements") - if vcs_deps: - with create_spinner(text=fs_str("Pinning VCS Packages...")) as sp: - vcs_reqs, vcs_lockfile = get_vcs_deps( - project, - which=which, - clear=clear, - pre=pre, - allow_global=allow_global, - dev=dev, - ) - vcs_deps = [req.as_line() for req in vcs_reqs if req.editable] - lockfile[lockfile_section].update(vcs_lockfile) + constraints = set(deps) cmd = [ which("python", allow_global=allow_global), Path(resolver.__file__.rstrip("co")).as_posix() @@ -658,53 +932,31 @@ def venv_resolve_deps( cmd.append("--clear") if allow_global: cmd.append("--system") - if dev: - cmd.append("--dev") with temp_environ(): - os.environ = {fs_str(k): fs_str(val) for k, val in os.environ.items()} - os.environ["PIPENV_PACKAGES"] = str("\n".join(deps)) + os.environ.update({fs_str(k): fs_str(val) for k, val in os.environ.items()}) + os.environ["PIPENV_PACKAGES"] = str("\n".join(constraints)) if pypi_mirror: os.environ["PIPENV_PYPI_MIRROR"] = str(pypi_mirror) os.environ["PIPENV_VERBOSITY"] = str(environments.PIPENV_VERBOSITY) os.environ["PIPENV_REQ_DIR"] = fs_str(req_dir) - if keep_outdated: - os.environ["PIPENV_KEEP_OUTDATED"] = fs_str("1") os.environ["PIP_NO_INPUT"] = fs_str("1") + os.environ["PIPENV_SITE_DIR"] = get_pipenv_sitedir() with create_spinner(text=fs_str("Locking...")) as sp: c = resolve(cmd, sp) - results = c.out - if vcs_deps: - with temp_environ(): - os.environ["PIPENV_PACKAGES"] = str("\n".join(vcs_deps)) - sp.text = to_native_string("Locking VCS Dependencies...") - vcs_c = resolve(cmd, sp) - vcs_results, vcs_err = vcs_c.out, vcs_c.err - else: - vcs_results, vcs_err = "", "" + results = c.out.strip() sp.green.ok(environments.PIPENV_SPINNER_OK_TEXT.format("Success!")) - outputs = [results, vcs_results] if environments.is_verbose(): - for output in outputs: - click_echo(output.split("RESULTS:")[0], err=True) + click_echo(results.split("RESULTS:")[1], err=True) try: results = json.loads(results.split("RESULTS:")[1].strip()) - if vcs_results: - # For vcs dependencies, treat the initial pass at locking (i.e. checkout) - # as the pipfile entry because it gets us an actual ref to use - vcs_results = json.loads(vcs_results.split("RESULTS:")[1].strip()) - vcs_lockfile = prepare_lockfile(vcs_results, vcs_lockfile.copy(), vcs_lockfile) - else: - vcs_results = [] except (IndexError, JSONDecodeError): - for out, err in [(c.out, c.err), (vcs_results, vcs_err)]: - click_echo(out.strip(), err=True) - click_echo(err.strip(), err=True) + click_echo(c.out.strip(), err=True) + click_echo(c.err.strip(), err=True) raise RuntimeError("There was a problem with locking.") - lockfile[lockfile_section] = prepare_lockfile(results, pipfile, lockfile[lockfile_section]) - for k, v in vcs_lockfile.items(): - if k in getattr(project, vcs_section, {}) or k not in lockfile[lockfile_section]: - lockfile[lockfile_section][k].update(v) + if lockfile_section not in lockfile: + lockfile[lockfile_section] = {} + prepare_lockfile(results, pipfile, lockfile[lockfile_section]) def resolve_deps( @@ -721,9 +973,6 @@ def resolve_deps( """Given a list of dependencies, return a resolved list of dependencies, using pip-tools -- and their hashes, using the warehouse API / pip. """ - from .vendor.requests.exceptions import ConnectionError - from .vendor.requirementslib.models.requirements import Requirement - index_lookup = {} markers_lookup = {} python_path = which("python", allow_global=allow_global) @@ -731,8 +980,9 @@ def resolve_deps( os.environ["PIP_SRC"] = project.virtualenv_src_location backup_python_path = sys.executable results = [] + resolver = None if not deps: - return results + return results, None # First (proper) attempt: req_dir = req_dir if req_dir else os.environ.get("req_dir", None) if not req_dir: @@ -740,7 +990,7 @@ def resolve_deps( req_dir = create_tracked_tempdir(prefix="pipenv-", suffix="-requirements") with HackedPythonVersion(python_version=python, python_path=python_path): try: - resolved_tree, hashes, markers_lookup, resolver = actually_resolve_deps( + results, hashes, markers_lookup, resolver, skipped = actually_resolve_deps( deps, index_lookup, markers_lookup, @@ -752,9 +1002,9 @@ def resolve_deps( ) except RuntimeError: # Don't exit here, like usual. - resolved_tree = None + results = None # Second (last-resort) attempt: - if resolved_tree is None: + if results is None: with HackedPythonVersion( python_version=".".join([str(s) for s in sys.version_info[:3]]), python_path=backup_python_path, @@ -762,7 +1012,7 @@ def resolve_deps( try: # Attempt to resolve again, with different Python version information, # particularly for particularly particular packages. - resolved_tree, hashes, markers_lookup, resolver = actually_resolve_deps( + results, hashes, markers_lookup, resolver, skipped = actually_resolve_deps( deps, index_lookup, markers_lookup, @@ -774,64 +1024,7 @@ def resolve_deps( ) except RuntimeError: sys.exit(1) - for result in resolved_tree: - if not result.editable: - req = Requirement.from_ireq(result) - name = pep423_name(req.name) - version = str(req.get_version()) - index = index_lookup.get(result.name) - req.index = index - collected_hashes = [] - if result in hashes: - collected_hashes = list(hashes.get(result)) - elif any( - "python.org" in source["url"] or "pypi.org" in source["url"] - for source in sources - ): - pkg_url = "https://pypi.org/pypi/{0}/json".format(name) - session = _get_requests_session() - try: - # Grab the hashes from the new warehouse API. - r = session.get(pkg_url, timeout=10) - api_releases = r.json()["releases"] - cleaned_releases = {} - for api_version, api_info in api_releases.items(): - api_version = clean_pkg_version(api_version) - cleaned_releases[api_version] = api_info - for release in cleaned_releases[version]: - collected_hashes.append(release["digests"]["sha256"]) - collected_hashes = ["sha256:" + s for s in collected_hashes] - except (ValueError, KeyError, ConnectionError): - if environments.is_verbose(): - click_echo( - "{0}: Error generating hash for {1}".format( - crayons.red("Warning", bold=True), name - ), err=True - ) - # # Collect un-collectable hashes (should work with devpi). - # try: - # collected_hashes = collected_hashes + list( - # list(resolver.resolve_hashes([result]).items())[0][1] - # ) - # except (ValueError, KeyError, ConnectionError, IndexError): - # if verbose: - # print('Error generating hash for {}'.format(name)) - req.hashes = sorted(set(collected_hashes)) - name, _entry = req.pipfile_entry - entry = {} - if isinstance(_entry, six.string_types): - entry["version"] = _entry.lstrip("=") - else: - entry.update(_entry) - entry["version"] = version - entry["name"] = name - # if index: - # d.update({"index": index}) - if markers_lookup.get(result.name): - entry.update({"markers": markers_lookup.get(result.name)}) - entry = translate_markers(entry) - results.append(entry) - return (results, resolver) + return results, resolver def is_star(val): @@ -850,7 +1043,9 @@ def convert_deps_to_pip(deps, project=None, r=True, include_index=True): dependencies = [] for dep_name, dep in deps.items(): - indexes = project.pipfile_sources if hasattr(project, "pipfile_sources") else [] + if project: + project.clear_pipfile_cache() + indexes = getattr(project, "pipfile_sources", []) if project is not None else [] new_dep = Requirement.from_pipfile(dep_name, dep) if new_dep.index: include_index = True @@ -1310,37 +1505,53 @@ def safe_expandvars(value): def get_vcs_deps( - project, - which=None, - clear=False, - pre=False, - allow_global=False, + project=None, dev=False, pypi_mirror=None, + packages=None, + reqs=None ): from .vendor.requirementslib.models.requirements import Requirement section = "vcs_dev_packages" if dev else "vcs_packages" - reqs = [] + if reqs is None: + reqs = [] lockfile = {} - try: - packages = getattr(project, section) - except AttributeError: - return [], [] - for pkg_name, pkg_pipfile in packages.items(): - requirement = Requirement.from_pipfile(pkg_name, pkg_pipfile) + if not reqs: + if not project and not packages: + raise ValueError( + "Must supply either a project or a pipfile section to lock vcs dependencies." + ) + if not packages: + try: + packages = getattr(project, section) + except AttributeError: + return [], [] + reqs = [Requirement.from_pipfile(name, entry) for name, entry in packages.items()] + result = [] + for requirement in reqs: name = requirement.normalized_name commit_hash = None if requirement.is_vcs: try: - with locked_repository(requirement) as repo: + with temp_path(), locked_repository(requirement) as repo: + from pipenv.vendor.requirementslib.models.requirements import Requirement + # from distutils.sysconfig import get_python_lib + # sys.path = [repo.checkout_directory, "", ".", get_python_lib(plat_specific=0)] commit_hash = repo.get_commit_hash() + name = requirement.normalized_name + version = requirement._specifiers = "=={0}".format(requirement.req.setup_info.version) lockfile[name] = requirement.pipfile_entry[1] lockfile[name]['ref'] = commit_hash - reqs.append(requirement) + result.append(requirement) + version = requirement.specifiers + if not version and requirement.specifiers: + version = requirement.specifiers + if version: + lockfile[name]['version'] = version except OSError: continue - return reqs, lockfile + return result, lockfile def translate_markers(pipfile_entry): @@ -1383,25 +1594,38 @@ def translate_markers(pipfile_entry): def clean_resolved_dep(dep, is_top_level=False, pipfile_entry=None): + from .vendor.requirementslib.utils import is_vcs name = pep423_name(dep["name"]) + lockfile = {} # We use this to determine if there are any markers on top level packages # So we can make sure those win out during resolution if the packages reoccur - lockfile = {"version": "=={0}".format(dep["version"])} - for key in ["hashes", "index", "extras"]: + if "version" in dep: + version = "{0}".format(dep["version"]) + if not version.startswith("=="): + version = "=={0}".format(version) + lockfile["version"] = version + if is_vcs(dep): + ref = dep.get("ref", None) + if ref is not None: + lockfile["ref"] = ref + vcs_type = next(iter(k for k in dep.keys() if k in VCS_LIST), None) + if vcs_type: + lockfile[vcs_type] = dep[vcs_type] + if "subdirectory" in dep: + lockfile["subdirectory"] = dep["subdirectory"] + for key in ["hashes", "index", "extras", "editable"]: if key in dep: lockfile[key] = dep[key] # In case we lock a uri or a file when the user supplied a path # remove the uri or file keys from the entry and keep the path - if pipfile_entry and any(k in pipfile_entry for k in ["file", "path"]): - fs_key = next((k for k in ["path", "file"] if k in pipfile_entry), None) - lockfile_key = next((k for k in ["uri", "file", "path"] if k in lockfile), None) - if fs_key != lockfile_key: - try: - del lockfile[lockfile_key] - except KeyError: - # pass when there is no lock file, usually because it's the first time - pass - lockfile[fs_key] = pipfile_entry[fs_key] + fs_key = next(iter(k for k in ["path", "file"] if k in dep), None) + pipfile_fs_key = None + if pipfile_entry: + pipfile_fs_key = next(iter(k for k in ["path", "file"] if k in pipfile_entry), None) + if fs_key and pipfile_fs_key and fs_key != pipfile_fs_key: + lockfile[pipfile_fs_key] = pipfile_entry[pipfile_fs_key] + elif fs_key is not None: + lockfile[fs_key] = dep[fs_key] # If a package is **PRESENT** in the pipfile but has no markers, make sure we # **NEVER** include markers in the lockfile @@ -1541,3 +1765,18 @@ def add_to_set(original_set, element): original_set |= set(element) else: original_set.add(element) + return original_set + + +def is_url_equal(url, other_url): + # type: (str, str) -> bool + """Compare two urls by scheme, host, and path, ignoring auth""" + if not isinstance(url, six.string_types): + raise TypeError("Expected string for url, received {0!r}".format(url)) + if not isinstance(other_url, six.string_types): + raise TypeError("Expected string for url, received {0!r}".format(other_url)) + parsed_url = urllib3_util.parse_url(url) + parsed_other_url = urllib3_util.parse_url(other_url) + unparsed = parsed_url._replace(auth=None, query=None, fragment=None).url + unparsed_other = parsed_other_url._replace(auth=None, query=None, fragment=None).url + return unparsed == unparsed_other diff --git a/pipenv/vendor/cached_property.py b/pipenv/vendor/cached_property.py index a06be97a19..125f619588 100644 --- a/pipenv/vendor/cached_property.py +++ b/pipenv/vendor/cached_property.py @@ -2,7 +2,7 @@ __author__ = "Daniel Greenfeld" __email__ = "pydanny@gmail.com" -__version__ = "1.4.3" +__version__ = "1.5.1" __license__ = "BSD" from time import time diff --git a/pipenv/vendor/certifi/__init__.py b/pipenv/vendor/certifi/__init__.py index 50f2e1301f..ef71f3af34 100644 --- a/pipenv/vendor/certifi/__init__.py +++ b/pipenv/vendor/certifi/__init__.py @@ -1,3 +1,3 @@ -from .core import where, old_where +from .core import where -__version__ = "2018.10.15" +__version__ = "2018.11.29" diff --git a/pipenv/vendor/certifi/cacert.pem b/pipenv/vendor/certifi/cacert.pem index e75d85b38a..db68797e24 100644 --- a/pipenv/vendor/certifi/cacert.pem +++ b/pipenv/vendor/certifi/cacert.pem @@ -4268,3 +4268,245 @@ rYy0UGYwEAYJKwYBBAGCNxUBBAMCAQAwCgYIKoZIzj0EAwMDaAAwZQIwJsdpW9zV 57LnyAyMjMPdeYwbY9XJUpROTYJKcx6ygISpJcBMWm1JKWB4E+J+SOtkAjEA2zQg Mgj/mkkCtojeFK9dbJlxjRo/i9fgojaGHAeCOnZT/cKi7e97sIBPWA9LUzm9 -----END CERTIFICATE----- + +# Issuer: CN=GTS Root R1 O=Google Trust Services LLC +# Subject: CN=GTS Root R1 O=Google Trust Services LLC +# Label: "GTS Root R1" +# Serial: 146587175971765017618439757810265552097 +# MD5 Fingerprint: 82:1a:ef:d4:d2:4a:f2:9f:e2:3d:97:06:14:70:72:85 +# SHA1 Fingerprint: e1:c9:50:e6:ef:22:f8:4c:56:45:72:8b:92:20:60:d7:d5:a7:a3:e8 +# SHA256 Fingerprint: 2a:57:54:71:e3:13:40:bc:21:58:1c:bd:2c:f1:3e:15:84:63:20:3e:ce:94:bc:f9:d3:cc:19:6b:f0:9a:54:72 +-----BEGIN CERTIFICATE----- +MIIFWjCCA0KgAwIBAgIQbkepxUtHDA3sM9CJuRz04TANBgkqhkiG9w0BAQwFADBH +MQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExM +QzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIy +MDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNl +cnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaM +f/vo27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vX +mX7wCl7raKb0xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7 +zUjwTcLCeoiKu7rPWRnWr4+wB7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0P +fyblqAj+lug8aJRT7oM6iCsVlgmy4HqMLnXWnOunVmSPlk9orj2XwoSPwLxAwAtc +vfaHszVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk9+aCEI3oncKKiPo4 +Zor8Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zqkUsp +zBmkMiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOO +Rc92wO1AK/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYW +k70paDPvOmbsB4om3xPXV2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+ +DVrNVjzRlwW5y0vtOUucxD/SVRNuJLDWcfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgF +lQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQADggIBADiW +Cu49tJYeX++dnAsznyvgyv3SjgofQXSlfKqE1OXyHuY3UjKcC9FhHb8owbZEKTV1 +d5iyfNm9dKyKaOOpMQkpAWBz40d8U6iQSifvS9efk+eCNs6aaAyC58/UEBZvXw6Z +XPYfcX3v73svfuo21pdwCxXu11xWajOl40k4DLh9+42FpLFZXvRq4d2h9mREruZR +gyFmxhE+885H7pwoHyXa/6xmld01D1zvICxi/ZG6qcz8WpyTgYMpl0p8WnK0OdC3 +d8t5/Wk6kjftbjhlRn7pYL15iJdfOBL07q9bgsiG1eGZbYwE8na6SfZu6W0eX6Dv +J4J2QPim01hcDyxC2kLGe4g0x8HYRZvBPsVhHdljUEn2NIVq4BjFbkerQUIpm/Zg +DdIx02OYI5NaAIFItO/Nis3Jz5nu2Z6qNuFoS3FJFDYoOj0dzpqPJeaAcWErtXvM ++SUWgeExX6GjfhaknBZqlxi9dnKlC54dNuYvoS++cJEPqOba+MSSQGwlfnuzCdyy +F62ARPBopY+Udf90WuioAnwMCeKpSwughQtiue+hMZL77/ZRBIls6Kl0obsXs7X9 +SQ98POyDGCBDTtWTurQ0sR8WNh8M5mQ5Fkzc4P4dyKliPUDqysU0ArSuiYgzNdws +E3PYJ/HQcu51OyLemGhmW/HGY0dVHLqlCFF1pkgl +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R2 O=Google Trust Services LLC +# Subject: CN=GTS Root R2 O=Google Trust Services LLC +# Label: "GTS Root R2" +# Serial: 146587176055767053814479386953112547951 +# MD5 Fingerprint: 44:ed:9a:0e:a4:09:3b:00:f2:ae:4c:a3:c6:61:b0:8b +# SHA1 Fingerprint: d2:73:96:2a:2a:5e:39:9f:73:3f:e1:c7:1e:64:3f:03:38:34:fc:4d +# SHA256 Fingerprint: c4:5d:7b:b0:8e:6d:67:e6:2e:42:35:11:0b:56:4e:5f:78:fd:92:ef:05:8c:84:0a:ea:4e:64:55:d7:58:5c:60 +-----BEGIN CERTIFICATE----- +MIIFWjCCA0KgAwIBAgIQbkepxlqz5yDFMJo/aFLybzANBgkqhkiG9w0BAQwFADBH +MQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExM +QzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIy +MDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNl +cnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQDO3v2m++zsFDQ8BwZabFn3GTXd98GdVarTzTukk3Lv +CvptnfbwhYBboUhSnznFt+4orO/LdmgUud+tAWyZH8QiHZ/+cnfgLFuv5AS/T3Kg +GjSY6Dlo7JUle3ah5mm5hRm9iYz+re026nO8/4Piy33B0s5Ks40FnotJk9/BW9Bu +XvAuMC6C/Pq8tBcKSOWIm8Wba96wyrQD8Nr0kLhlZPdcTK3ofmZemde4wj7I0BOd +re7kRXuJVfeKH2JShBKzwkCX44ofR5GmdFrS+LFjKBC4swm4VndAoiaYecb+3yXu +PuWgf9RhD1FLPD+M2uFwdNjCaKH5wQzpoeJ/u1U8dgbuak7MkogwTZq9TwtImoS1 +mKPV+3PBV2HdKFZ1E66HjucMUQkQdYhMvI35ezzUIkgfKtzra7tEscszcTJGr61K +8YzodDqs5xoic4DSMPclQsciOzsSrZYuxsN2B6ogtzVJV+mSSeh2FnIxZyuWfoqj +x5RWIr9qS34BIbIjMt/kmkRtWVtd9QCgHJvGeJeNkP+byKq0rxFROV7Z+2et1VsR +nTKaG73VululycslaVNVJ1zgyjbLiGH7HrfQy+4W+9OmTN6SpdTi3/UGVN4unUu0 +kzCqgc7dGtxRcw1PcOnlthYhGXmy5okLdWTK1au8CcEYof/UVKGFPP0UJAOyh9Ok +twIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQUu//KjiOfT5nK2+JopqUVJxce2Q4wDQYJKoZIhvcNAQEMBQADggIBALZp +8KZ3/p7uC4Gt4cCpx/k1HUCCq+YEtN/L9x0Pg/B+E02NjO7jMyLDOfxA325BS0JT +vhaI8dI4XsRomRyYUpOM52jtG2pzegVATX9lO9ZY8c6DR2Dj/5epnGB3GFW1fgiT +z9D2PGcDFWEJ+YF59exTpJ/JjwGLc8R3dtyDovUMSRqodt6Sm2T4syzFJ9MHwAiA +pJiS4wGWAqoC7o87xdFtCjMwc3i5T1QWvwsHoaRc5svJXISPD+AVdyx+Jn7axEvb +pxZ3B7DNdehyQtaVhJ2Gg/LkkM0JR9SLA3DaWsYDQvTtN6LwG1BUSw7YhN4ZKJmB +R64JGz9I0cNv4rBgF/XuIwKl2gBbbZCr7qLpGzvpx0QnRY5rn/WkhLx3+WuXrD5R +RaIRpsyF7gpo8j5QOHokYh4XIDdtak23CZvJ/KRY9bb7nE4Yu5UC56GtmwfuNmsk +0jmGwZODUNKBRqhfYlcsu2xkiAhu7xNUX90txGdj08+JN7+dIPT7eoOboB6BAFDC +5AwiWVIQ7UNWhwD4FFKnHYuTjKJNRn8nxnGbJN7k2oaLDX5rIMHAnuFl2GqjpuiF +izoHCBy69Y9Vmhh1fuXsgWbRIXOhNUQLgD1bnF5vKheW0YMjiGZt5obicDIvUiLn +yOd/xCxgXS/Dr55FBcOEArf9LAhST4Ldo/DUhgkC +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R3 O=Google Trust Services LLC +# Subject: CN=GTS Root R3 O=Google Trust Services LLC +# Label: "GTS Root R3" +# Serial: 146587176140553309517047991083707763997 +# MD5 Fingerprint: 1a:79:5b:6b:04:52:9c:5d:c7:74:33:1b:25:9a:f9:25 +# SHA1 Fingerprint: 30:d4:24:6f:07:ff:db:91:89:8a:0b:e9:49:66:11:eb:8c:5e:46:e5 +# SHA256 Fingerprint: 15:d5:b8:77:46:19:ea:7d:54:ce:1c:a6:d0:b0:c4:03:e0:37:a9:17:f1:31:e8:a0:4e:1e:6b:7a:71:ba:bc:e5 +-----BEGIN CERTIFICATE----- +MIICDDCCAZGgAwIBAgIQbkepx2ypcyRAiQ8DVd2NHTAKBggqhkjOPQQDAzBHMQsw +CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU +MBIGA1UEAxMLR1RTIFJvb3QgUjMwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw +MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp +Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjMwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAAQfTzOHMymKoYTey8chWEGJ6ladK0uFxh1MJ7x/JlFyb+Kf1qPKzEUURout +736GjOyxfi//qXGdGIRFBEFVbivqJn+7kAHjSxm65FSWRQmx1WyRRK2EE46ajA2A +DDL24CejQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud +DgQWBBTB8Sa6oC2uhYHP0/EqEr24Cmf9vDAKBggqhkjOPQQDAwNpADBmAjEAgFuk +fCPAlaUs3L6JbyO5o91lAFJekazInXJ0glMLfalAvWhgxeG4VDvBNhcl2MG9AjEA +njWSdIUlUfUk7GRSJFClH9voy8l27OyCbvWFGFPouOOaKaqW04MjyaR7YbPMAuhd +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R4 O=Google Trust Services LLC +# Subject: CN=GTS Root R4 O=Google Trust Services LLC +# Label: "GTS Root R4" +# Serial: 146587176229350439916519468929765261721 +# MD5 Fingerprint: 5d:b6:6a:c4:60:17:24:6a:1a:99:a8:4b:ee:5e:b4:26 +# SHA1 Fingerprint: 2a:1d:60:27:d9:4a:b1:0a:1c:4d:91:5c:cd:33:a0:cb:3e:2d:54:cb +# SHA256 Fingerprint: 71:cc:a5:39:1f:9e:79:4b:04:80:25:30:b3:63:e1:21:da:8a:30:43:bb:26:66:2f:ea:4d:ca:7f:c9:51:a4:bd +-----BEGIN CERTIFICATE----- +MIICCjCCAZGgAwIBAgIQbkepyIuUtui7OyrYorLBmTAKBggqhkjOPQQDAzBHMQsw +CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU +MBIGA1UEAxMLR1RTIFJvb3QgUjQwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw +MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp +Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjQwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAATzdHOnaItgrkO4NcWBMHtLSZ37wWHO5t5GvWvVYRg1rkDdc/eJkTBa6zzu +hXyiQHY7qca4R9gq55KRanPpsXI5nymfopjTX15YhmUPoYRlBtHci8nHc8iMai/l +xKvRHYqjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud +DgQWBBSATNbrdP9JNqPV2Py1PsVq8JQdjDAKBggqhkjOPQQDAwNnADBkAjBqUFJ0 +CMRw3J5QdCHojXohw0+WbhXRIjVhLfoIN+4Zba3bssx9BzT1YBkstTTZbyACMANx +sbqjYAuG7ZoIapVon+Kz4ZNkfF6Tpt95LY2F45TPI11xzPKwTdb+mciUqXWi4w== +-----END CERTIFICATE----- + +# Issuer: CN=UCA Global G2 Root O=UniTrust +# Subject: CN=UCA Global G2 Root O=UniTrust +# Label: "UCA Global G2 Root" +# Serial: 124779693093741543919145257850076631279 +# MD5 Fingerprint: 80:fe:f0:c4:4a:f0:5c:62:32:9f:1c:ba:78:a9:50:f8 +# SHA1 Fingerprint: 28:f9:78:16:19:7a:ff:18:25:18:aa:44:fe:c1:a0:ce:5c:b6:4c:8a +# SHA256 Fingerprint: 9b:ea:11:c9:76:fe:01:47:64:c1:be:56:a6:f9:14:b5:a5:60:31:7a:bd:99:88:39:33:82:e5:16:1a:a0:49:3c +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIQXd+x2lqj7V2+WmUgZQOQ7zANBgkqhkiG9w0BAQsFADA9 +MQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxGzAZBgNVBAMMElVDQSBH +bG9iYWwgRzIgUm9vdDAeFw0xNjAzMTEwMDAwMDBaFw00MDEyMzEwMDAwMDBaMD0x +CzAJBgNVBAYTAkNOMREwDwYDVQQKDAhVbmlUcnVzdDEbMBkGA1UEAwwSVUNBIEds +b2JhbCBHMiBSb290MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxeYr +b3zvJgUno4Ek2m/LAfmZmqkywiKHYUGRO8vDaBsGxUypK8FnFyIdK+35KYmToni9 +kmugow2ifsqTs6bRjDXVdfkX9s9FxeV67HeToI8jrg4aA3++1NDtLnurRiNb/yzm +VHqUwCoV8MmNsHo7JOHXaOIxPAYzRrZUEaalLyJUKlgNAQLx+hVRZ2zA+te2G3/R +VogvGjqNO7uCEeBHANBSh6v7hn4PJGtAnTRnvI3HLYZveT6OqTwXS3+wmeOwcWDc +C/Vkw85DvG1xudLeJ1uK6NjGruFZfc8oLTW4lVYa8bJYS7cSN8h8s+1LgOGN+jIj +tm+3SJUIsUROhYw6AlQgL9+/V087OpAh18EmNVQg7Mc/R+zvWr9LesGtOxdQXGLY +D0tK3Cv6brxzks3sx1DoQZbXqX5t2Okdj4q1uViSukqSKwxW/YDrCPBeKW4bHAyv +j5OJrdu9o54hyokZ7N+1wxrrFv54NkzWbtA+FxyQF2smuvt6L78RHBgOLXMDj6Dl +NaBa4kx1HXHhOThTeEDMg5PXCp6dW4+K5OXgSORIskfNTip1KnvyIvbJvgmRlld6 +iIis7nCs+dwp4wwcOxJORNanTrAmyPPZGpeRaOrvjUYG0lZFWJo8DA+DuAUlwznP +O6Q0ibd5Ei9Hxeepl2n8pndntd978XplFeRhVmUCAwEAAaNCMEAwDgYDVR0PAQH/ +BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFIHEjMz15DD/pQwIX4wV +ZyF0Ad/fMA0GCSqGSIb3DQEBCwUAA4ICAQATZSL1jiutROTL/7lo5sOASD0Ee/oj +L3rtNtqyzm325p7lX1iPyzcyochltq44PTUbPrw7tgTQvPlJ9Zv3hcU2tsu8+Mg5 +1eRfB70VVJd0ysrtT7q6ZHafgbiERUlMjW+i67HM0cOU2kTC5uLqGOiiHycFutfl +1qnN3e92mI0ADs0b+gO3joBYDic/UvuUospeZcnWhNq5NXHzJsBPd+aBJ9J3O5oU +b3n09tDh05S60FdRvScFDcH9yBIw7m+NESsIndTUv4BFFJqIRNow6rSn4+7vW4LV +PtateJLbXDzz2K36uGt/xDYotgIVilQsnLAXc47QN6MUPJiVAAwpBVueSUmxX8fj +y88nZY41F7dXyDDZQVu5FLbowg+UMaeUmMxq67XhJ/UQqAHojhJi6IjMtX9Gl8Cb +EGY4GjZGXyJoPd/JxhMnq1MGrKI8hgZlb7F+sSlEmqO6SWkoaY/X5V+tBIZkbxqg +DMUIYs6Ao9Dz7GjevjPHF1t/gMRMTLGmhIrDO7gJzRSBuhjjVFc2/tsvfEehOjPI ++Vg7RE+xygKJBJYoaMVLuCaJu9YzL1DV/pqJuhgyklTGW+Cd+V7lDSKb9triyCGy +YiGqhkCyLmTTX8jjfhFnRR8F/uOi77Oos/N9j/gMHyIfLXC0uAE0djAA5SN4p1bX +UB+K+wb1whnw0A== +-----END CERTIFICATE----- + +# Issuer: CN=UCA Extended Validation Root O=UniTrust +# Subject: CN=UCA Extended Validation Root O=UniTrust +# Label: "UCA Extended Validation Root" +# Serial: 106100277556486529736699587978573607008 +# MD5 Fingerprint: a1:f3:5f:43:c6:34:9b:da:bf:8c:7e:05:53:ad:96:e2 +# SHA1 Fingerprint: a3:a1:b0:6f:24:61:23:4a:e3:36:a5:c2:37:fc:a6:ff:dd:f0:d7:3a +# SHA256 Fingerprint: d4:3a:f9:b3:54:73:75:5c:96:84:fc:06:d7:d8:cb:70:ee:5c:28:e7:73:fb:29:4e:b4:1e:e7:17:22:92:4d:24 +-----BEGIN CERTIFICATE----- +MIIFWjCCA0KgAwIBAgIQT9Irj/VkyDOeTzRYZiNwYDANBgkqhkiG9w0BAQsFADBH +MQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNVBAMMHFVDQSBF +eHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwHhcNMTUwMzEzMDAwMDAwWhcNMzgxMjMx +MDAwMDAwWjBHMQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNV +BAMMHFVDQSBFeHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQCpCQcoEwKwmeBkqh5DFnpzsZGgdT6o+uM4AHrsiWog +D4vFsJszA1qGxliG1cGFu0/GnEBNyr7uaZa4rYEwmnySBesFK5pI0Lh2PpbIILvS +sPGP2KxFRv+qZ2C0d35qHzwaUnoEPQc8hQ2E0B92CvdqFN9y4zR8V05WAT558aop +O2z6+I9tTcg1367r3CTueUWnhbYFiN6IXSV8l2RnCdm/WhUFhvMJHuxYMjMR83dk +sHYf5BA1FxvyDrFspCqjc/wJHx4yGVMR59mzLC52LqGj3n5qiAno8geK+LLNEOfi +c0CTuwjRP+H8C5SzJe98ptfRr5//lpr1kXuYC3fUfugH0mK1lTnj8/FtDw5lhIpj +VMWAtuCeS31HJqcBCF3RiJ7XwzJE+oJKCmhUfzhTA8ykADNkUVkLo4KRel7sFsLz +KuZi2irbWWIQJUoqgQtHB0MGcIfS+pMRKXpITeuUx3BNr2fVUbGAIAEBtHoIppB/ +TuDvB0GHr2qlXov7z1CymlSvw4m6WC31MJixNnI5fkkE/SmnTHnkBVfblLkWU41G +sx2VYVdWf6/wFlthWG82UBEL2KwrlRYaDh8IzTY0ZRBiZtWAXxQgXy0MoHgKaNYs +1+lvK9JKBZP8nm9rZ/+I8U6laUpSNwXqxhaN0sSZ0YIrO7o1dfdRUVjzyAfd5LQD +fwIDAQABo0IwQDAdBgNVHQ4EFgQU2XQ65DA9DfcS3H5aBZ8eNJr34RQwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQADggIBADaN +l8xCFWQpN5smLNb7rhVpLGsaGvdftvkHTFnq88nIua7Mui563MD1sC3AO6+fcAUR +ap8lTwEpcOPlDOHqWnzcSbvBHiqB9RZLcpHIojG5qtr8nR/zXUACE/xOHAbKsxSQ +VBcZEhrxH9cMaVr2cXj0lH2RC47skFSOvG+hTKv8dGT9cZr4QQehzZHkPJrgmzI5 +c6sq1WnIeJEmMX3ixzDx/BR4dxIOE/TdFpS/S2d7cFOFyrC78zhNLJA5wA3CXWvp +4uXViI3WLL+rG761KIcSF3Ru/H38j9CHJrAb+7lsq+KePRXBOy5nAliRn+/4Qh8s +t2j1da3Ptfb/EX3C8CSlrdP6oDyp+l3cpaDvRKS+1ujl5BOWF3sGPjLtx7dCvHaj +2GU4Kzg1USEODm8uNBNA4StnDG1KQTAYI1oyVZnJF+A83vbsea0rWBmirSwiGpWO +vpaQXUJXxPkUAzUrHC1RVwinOt4/5Mi0A3PCwSaAuwtCH60NryZy2sy+s6ODWA2C +xR9GUeOcGMyNm43sSet1UNWMKFnKdDTajAshqx7qG+XH/RU+wBeq+yNuJkbL+vmx +cmtpzyKEC2IPrNkZAJSidjzULZrtBJ4tBmIQN1IchXIbJ+XMxjHsN+xjWZsLHXbM +fjKaiJUINlK73nZfdklJrX+9ZSCyycErdhh2n1ax +-----END CERTIFICATE----- + +# Issuer: CN=Certigna Root CA O=Dhimyotis OU=0002 48146308100036 +# Subject: CN=Certigna Root CA O=Dhimyotis OU=0002 48146308100036 +# Label: "Certigna Root CA" +# Serial: 269714418870597844693661054334862075617 +# MD5 Fingerprint: 0e:5c:30:62:27:eb:5b:bc:d7:ae:62:ba:e9:d5:df:77 +# SHA1 Fingerprint: 2d:0d:52:14:ff:9e:ad:99:24:01:74:20:47:6e:6c:85:27:27:f5:43 +# SHA256 Fingerprint: d4:8d:3d:23:ee:db:50:a4:59:e5:51:97:60:1c:27:77:4b:9d:7b:18:c9:4d:5a:05:95:11:a1:02:50:b9:31:68 +-----BEGIN CERTIFICATE----- +MIIGWzCCBEOgAwIBAgIRAMrpG4nxVQMNo+ZBbcTjpuEwDQYJKoZIhvcNAQELBQAw +WjELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczEcMBoGA1UECwwTMDAw +MiA0ODE0NjMwODEwMDAzNjEZMBcGA1UEAwwQQ2VydGlnbmEgUm9vdCBDQTAeFw0x +MzEwMDEwODMyMjdaFw0zMzEwMDEwODMyMjdaMFoxCzAJBgNVBAYTAkZSMRIwEAYD +VQQKDAlEaGlteW90aXMxHDAaBgNVBAsMEzAwMDIgNDgxNDYzMDgxMDAwMzYxGTAX +BgNVBAMMEENlcnRpZ25hIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDNGDllGlmx6mQWDoyUJJV8g9PFOSbcDO8WV43X2KyjQn+Cyu3NW9sO +ty3tRQgXstmzy9YXUnIo245Onoq2C/mehJpNdt4iKVzSs9IGPjA5qXSjklYcoW9M +CiBtnyN6tMbaLOQdLNyzKNAT8kxOAkmhVECe5uUFoC2EyP+YbNDrihqECB63aCPu +I9Vwzm1RaRDuoXrC0SIxwoKF0vJVdlB8JXrJhFwLrN1CTivngqIkicuQstDuI7pm +TLtipPlTWmR7fJj6o0ieD5Wupxj0auwuA0Wv8HT4Ks16XdG+RCYyKfHx9WzMfgIh +C59vpD++nVPiz32pLHxYGpfhPTc3GGYo0kDFUYqMwy3OU4gkWGQwFsWq4NYKpkDf +ePb1BHxpE4S80dGnBs8B92jAqFe7OmGtBIyT46388NtEbVncSVmurJqZNjBBe3Yz +IoejwpKGbvlw7q6Hh5UbxHq9MfPU0uWZ/75I7HX1eBYdpnDBfzwboZL7z8g81sWT +Co/1VTp2lc5ZmIoJlXcymoO6LAQ6l73UL77XbJuiyn1tJslV1c/DeVIICZkHJC1k +JWumIWmbat10TWuXekG9qxf5kBdIjzb5LdXF2+6qhUVB+s06RbFo5jZMm5BX7CO5 +hwjCxAnxl4YqKE3idMDaxIzb3+KhF1nOJFl0Mdp//TBt2dzhauH8XwIDAQABo4IB +GjCCARYwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE +FBiHVuBud+4kNTxOc5of1uHieX4rMB8GA1UdIwQYMBaAFBiHVuBud+4kNTxOc5of +1uHieX4rMEQGA1UdIAQ9MDswOQYEVR0gADAxMC8GCCsGAQUFBwIBFiNodHRwczov +L3d3d3cuY2VydGlnbmEuZnIvYXV0b3JpdGVzLzBtBgNVHR8EZjBkMC+gLaArhilo +dHRwOi8vY3JsLmNlcnRpZ25hLmZyL2NlcnRpZ25hcm9vdGNhLmNybDAxoC+gLYYr +aHR0cDovL2NybC5kaGlteW90aXMuY29tL2NlcnRpZ25hcm9vdGNhLmNybDANBgkq +hkiG9w0BAQsFAAOCAgEAlLieT/DjlQgi581oQfccVdV8AOItOoldaDgvUSILSo3L +6btdPrtcPbEo/uRTVRPPoZAbAh1fZkYJMyjhDSSXcNMQH+pkV5a7XdrnxIxPTGRG +HVyH41neQtGbqH6mid2PHMkwgu07nM3A6RngatgCdTer9zQoKJHyBApPNeNgJgH6 +0BGM+RFq7q89w1DTj18zeTyGqHNFkIwgtnJzFyO+B2XleJINugHA64wcZr+shncB +lA2c5uk5jR+mUYyZDDl34bSb+hxnV29qao6pK0xXeXpXIs/NX2NGjVxZOob4Mkdi +o2cNGJHc+6Zr9UhhcyNZjgKnvETq9Emd8VRY+WCv2hikLyhF3HqgiIZd8zvn/yk1 +gPxkQ5Tm4xxvvq0OKmOZK8l+hfZx6AYDlf7ej0gcWtSS6Cvu5zHbugRqh5jnxV/v +faci9wHYTfmJ0A6aBVmknpjZbyvKcL5kwlWj9Omvw5Ip3IgWJJk8jSaYtlu3zM63 +Nwf9JtmYhST/WSMDmu2dnajkXjjO11INb9I/bbEFa0nOipFGc/T2L/Coc3cOZayh +jWZSaX5LaAzHHjcng6WMxwLkFM1JAbBzs/3GkDpv0mztO+7skb6iQ12LAEpmJURw +3kAP+HwV96LOPNdeE4yBFxgX0b3xdxA61GU5wSesVywlVP+i2k+KYTlerj1KjL0= +-----END CERTIFICATE----- diff --git a/pipenv/vendor/certifi/core.py b/pipenv/vendor/certifi/core.py index eab9d1d178..2d02ea44c4 100644 --- a/pipenv/vendor/certifi/core.py +++ b/pipenv/vendor/certifi/core.py @@ -8,14 +8,6 @@ This module returns the installation location of cacert.pem. """ import os -import warnings - - -class DeprecatedBundleWarning(DeprecationWarning): - """ - The weak security bundle is being deprecated. Please bother your service - provider to get them to stop using cross-signed roots. - """ def where(): @@ -24,14 +16,5 @@ def where(): return os.path.join(f, 'cacert.pem') -def old_where(): - warnings.warn( - "The weak security bundle has been removed. certifi.old_where() is now an alias " - "of certifi.where(). Please update your code to use certifi.where() instead. " - "certifi.old_where() will be removed in 2018.", - DeprecatedBundleWarning - ) - return where() - if __name__ == '__main__': print(where()) diff --git a/pipenv/vendor/colorama/LICENSE.txt b/pipenv/vendor/colorama/LICENSE.txt index 5f567799f3..3105888ec1 100644 --- a/pipenv/vendor/colorama/LICENSE.txt +++ b/pipenv/vendor/colorama/LICENSE.txt @@ -25,4 +25,3 @@ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/pipenv/vendor/colorama/__init__.py b/pipenv/vendor/colorama/__init__.py index f4d9ce2108..2a3bf47142 100644 --- a/pipenv/vendor/colorama/__init__.py +++ b/pipenv/vendor/colorama/__init__.py @@ -3,5 +3,4 @@ from .ansi import Fore, Back, Style, Cursor from .ansitowin32 import AnsiToWin32 -__version__ = '0.3.9' - +__version__ = '0.4.1' diff --git a/pipenv/vendor/colorama/ansitowin32.py b/pipenv/vendor/colorama/ansitowin32.py index 1d6e6059c7..359c92be50 100644 --- a/pipenv/vendor/colorama/ansitowin32.py +++ b/pipenv/vendor/colorama/ansitowin32.py @@ -13,14 +13,6 @@ winterm = WinTerm() -def is_stream_closed(stream): - return not hasattr(stream, 'closed') or stream.closed - - -def is_a_tty(stream): - return hasattr(stream, 'isatty') and stream.isatty() - - class StreamWrapper(object): ''' Wraps a stream (such as stdout), acting as a transparent proxy for all @@ -36,9 +28,38 @@ def __init__(self, wrapped, converter): def __getattr__(self, name): return getattr(self.__wrapped, name) + def __enter__(self, *args, **kwargs): + # special method lookup bypasses __getattr__/__getattribute__, see + # https://stackoverflow.com/questions/12632894/why-doesnt-getattr-work-with-exit + # thus, contextlib magic methods are not proxied via __getattr__ + return self.__wrapped.__enter__(*args, **kwargs) + + def __exit__(self, *args, **kwargs): + return self.__wrapped.__exit__(*args, **kwargs) + def write(self, text): self.__convertor.write(text) + def isatty(self): + stream = self.__wrapped + if 'PYCHARM_HOSTED' in os.environ: + if stream is not None and (stream is sys.__stdout__ or stream is sys.__stderr__): + return True + try: + stream_isatty = stream.isatty + except AttributeError: + return False + else: + return stream_isatty() + + @property + def closed(self): + stream = self.__wrapped + try: + return stream.closed + except AttributeError: + return True + class AnsiToWin32(object): ''' @@ -68,12 +89,12 @@ def __init__(self, wrapped, convert=None, strip=None, autoreset=False): # should we strip ANSI sequences from our output? if strip is None: - strip = conversion_supported or (not is_stream_closed(wrapped) and not is_a_tty(wrapped)) + strip = conversion_supported or (not self.stream.closed and not self.stream.isatty()) self.strip = strip # should we should convert ANSI sequences into win32 calls? if convert is None: - convert = conversion_supported and not is_stream_closed(wrapped) and is_a_tty(wrapped) + convert = conversion_supported and not self.stream.closed and self.stream.isatty() self.convert = convert # dict of ansi codes to win32 functions and parameters @@ -149,7 +170,7 @@ def write(self, text): def reset_all(self): if self.convert: self.call_win32('m', (0,)) - elif not self.strip and not is_stream_closed(self.wrapped): + elif not self.strip and not self.stream.closed: self.wrapped.write(Style.RESET_ALL) diff --git a/pipenv/vendor/colorama/initialise.py b/pipenv/vendor/colorama/initialise.py index 834962a35f..430d066872 100644 --- a/pipenv/vendor/colorama/initialise.py +++ b/pipenv/vendor/colorama/initialise.py @@ -78,5 +78,3 @@ def wrap_stream(stream, convert, strip, autoreset, wrap): if wrapper.should_wrap(): stream = wrapper.stream return stream - - diff --git a/pipenv/vendor/colorama/win32.py b/pipenv/vendor/colorama/win32.py index 8262e350a6..c2d8360336 100644 --- a/pipenv/vendor/colorama/win32.py +++ b/pipenv/vendor/colorama/win32.py @@ -89,11 +89,6 @@ def __str__(self): ] _SetConsoleTitleW.restype = wintypes.BOOL - handles = { - STDOUT: _GetStdHandle(STDOUT), - STDERR: _GetStdHandle(STDERR), - } - def _winapi_test(handle): csbi = CONSOLE_SCREEN_BUFFER_INFO() success = _GetConsoleScreenBufferInfo( @@ -101,17 +96,18 @@ def _winapi_test(handle): return bool(success) def winapi_test(): - return any(_winapi_test(h) for h in handles.values()) + return any(_winapi_test(h) for h in + (_GetStdHandle(STDOUT), _GetStdHandle(STDERR))) def GetConsoleScreenBufferInfo(stream_id=STDOUT): - handle = handles[stream_id] + handle = _GetStdHandle(stream_id) csbi = CONSOLE_SCREEN_BUFFER_INFO() success = _GetConsoleScreenBufferInfo( handle, byref(csbi)) return csbi def SetConsoleTextAttribute(stream_id, attrs): - handle = handles[stream_id] + handle = _GetStdHandle(stream_id) return _SetConsoleTextAttribute(handle, attrs) def SetConsoleCursorPosition(stream_id, position, adjust=True): @@ -129,11 +125,11 @@ def SetConsoleCursorPosition(stream_id, position, adjust=True): adjusted_position.Y += sr.Top adjusted_position.X += sr.Left # Resume normal processing - handle = handles[stream_id] + handle = _GetStdHandle(stream_id) return _SetConsoleCursorPosition(handle, adjusted_position) def FillConsoleOutputCharacter(stream_id, char, length, start): - handle = handles[stream_id] + handle = _GetStdHandle(stream_id) char = c_char(char.encode()) length = wintypes.DWORD(length) num_written = wintypes.DWORD(0) @@ -144,7 +140,7 @@ def FillConsoleOutputCharacter(stream_id, char, length, start): def FillConsoleOutputAttribute(stream_id, attr, length, start): ''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )''' - handle = handles[stream_id] + handle = _GetStdHandle(stream_id) attribute = wintypes.WORD(attr) length = wintypes.DWORD(length) num_written = wintypes.DWORD(0) diff --git a/pipenv/vendor/colorama/winterm.py b/pipenv/vendor/colorama/winterm.py index 60309d3c07..0fdb4ec4e9 100644 --- a/pipenv/vendor/colorama/winterm.py +++ b/pipenv/vendor/colorama/winterm.py @@ -44,6 +44,7 @@ def set_attrs(self, value): def reset_all(self, on_stderr=None): self.set_attrs(self._default) self.set_console(attrs=self._default) + self._light = 0 def fore(self, fore=None, light=False, on_stderr=False): if fore is None: @@ -122,12 +123,15 @@ def erase_screen(self, mode=0, on_stderr=False): if mode == 0: from_coord = csbi.dwCursorPosition cells_to_erase = cells_in_screen - cells_before_cursor - if mode == 1: + elif mode == 1: from_coord = win32.COORD(0, 0) cells_to_erase = cells_before_cursor elif mode == 2: from_coord = win32.COORD(0, 0) cells_to_erase = cells_in_screen + else: + # invalid mode + return # fill the entire screen with blanks win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord) # now set the buffer's attributes accordingly @@ -147,12 +151,15 @@ def erase_line(self, mode=0, on_stderr=False): if mode == 0: from_coord = csbi.dwCursorPosition cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X - if mode == 1: + elif mode == 1: from_coord = win32.COORD(0, csbi.dwCursorPosition.Y) cells_to_erase = csbi.dwCursorPosition.X elif mode == 2: from_coord = win32.COORD(0, csbi.dwCursorPosition.Y) cells_to_erase = csbi.dwSize.X + else: + # invalid mode + return # fill the entire screen with blanks win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord) # now set the buffer's attributes accordingly diff --git a/pipenv/vendor/dotenv/compat.py b/pipenv/vendor/dotenv/compat.py index c4a481e6d0..f6baa3617b 100644 --- a/pipenv/vendor/dotenv/compat.py +++ b/pipenv/vendor/dotenv/compat.py @@ -1,4 +1,9 @@ +import sys try: from StringIO import StringIO # noqa except ImportError: from io import StringIO # noqa + +PY2 = sys.version_info[0] == 2 +WIN = sys.platform.startswith('win') +text_type = unicode if PY2 else str # noqa diff --git a/pipenv/vendor/dotenv/environ.py b/pipenv/vendor/dotenv/environ.py new file mode 100644 index 0000000000..ad3571656f --- /dev/null +++ b/pipenv/vendor/dotenv/environ.py @@ -0,0 +1,54 @@ +import os + + +class UndefinedValueError(Exception): + pass + + +class Undefined(object): + """Class to represent undefined type. """ + pass + + +# Reference instance to represent undefined values +undefined = Undefined() + + +def _cast_boolean(value): + """ + Helper to convert config values to boolean as ConfigParser do. + """ + _BOOLEANS = {'1': True, 'yes': True, 'true': True, 'on': True, + '0': False, 'no': False, 'false': False, 'off': False, '': False} + value = str(value) + if value.lower() not in _BOOLEANS: + raise ValueError('Not a boolean: %s' % value) + + return _BOOLEANS[value.lower()] + + +def getenv(option, default=undefined, cast=undefined): + """ + Return the value for option or default if defined. + """ + + # We can't avoid __contains__ because value may be empty. + if option in os.environ: + value = os.environ[option] + else: + if isinstance(default, Undefined): + raise UndefinedValueError('{} not found. Declare it as envvar or define a default value.'.format(option)) + + value = default + + if isinstance(cast, Undefined): + return value + + if cast is bool: + value = _cast_boolean(value) + elif cast is list: + value = [x for x in value.split(',') if x] + else: + value = cast(value) + + return value diff --git a/pipenv/vendor/dotenv/main.py b/pipenv/vendor/dotenv/main.py index 6ba28bbbc3..98b22ec0d6 100644 --- a/pipenv/vendor/dotenv/main.py +++ b/pipenv/vendor/dotenv/main.py @@ -2,47 +2,90 @@ from __future__ import absolute_import, print_function, unicode_literals import codecs -import fileinput import io import os import re +import shutil import sys -from subprocess import Popen, PIPE, STDOUT +from subprocess import Popen +import tempfile import warnings -from collections import OrderedDict +from collections import OrderedDict, namedtuple +from contextlib import contextmanager -from .compat import StringIO +from .compat import StringIO, PY2, WIN, text_type -__escape_decoder = codecs.getdecoder('unicode_escape') -__posix_variable = re.compile('\$\{[^\}]*\}') +__posix_variable = re.compile(r'\$\{[^\}]*\}') +_binding = re.compile( + r""" + ( + \s* # leading whitespace + (?:export{0}+)? # export -def decode_escaped(escaped): - return __escape_decoder(escaped)[0] + ( '[^']+' # single-quoted key + | [^=\#\s]+ # or unquoted key + )? + (?: + (?:{0}*={0}*) # equal sign -def parse_line(line): - line = line.strip() + ( '(?:\\'|[^'])*' # single-quoted value + | "(?:\\"|[^"])*" # or double-quoted value + | [^\#\r\n]* # or unquoted value + ) + )? - # Ignore lines with `#` or which doesn't have `=` in it. - if not line or line.startswith('#') or '=' not in line: - return None, None + \s* # trailing whitespace + (?:\#[^\r\n]*)? # comment + (?:\r|\n|\r\n)? # newline + ) + """.format(r'[^\S\r\n]'), + re.MULTILINE | re.VERBOSE, +) - k, v = line.split('=', 1) +_escape_sequence = re.compile(r"\\[\\'\"abfnrtv]") - if k.startswith('export '): - (_, _, k) = k.partition('export ') - # Remove any leading and trailing spaces in key, value - k, v = k.strip(), v.strip() +Binding = namedtuple('Binding', 'key value original') - if v: - v = v.encode('unicode-escape').decode('ascii') - quoted = v[0] == v[-1] in ['"', "'"] - if quoted: - v = decode_escaped(v[1:-1]) - return k, v +def decode_escapes(string): + def decode_match(match): + return codecs.decode(match.group(0), 'unicode-escape') + + return _escape_sequence.sub(decode_match, string) + + +def is_surrounded_by(string, char): + return ( + len(string) > 1 + and string[0] == string[-1] == char + ) + + +def parse_binding(string, position): + match = _binding.match(string, position) + (matched, key, value) = match.groups() + if key is None or value is None: + key = None + value = None + else: + value_quoted = is_surrounded_by(value, "'") or is_surrounded_by(value, '"') + if value_quoted: + value = decode_escapes(value[1:-1]) + else: + value = value.strip() + return (Binding(key=key, value=value, original=matched), match.end()) + + +def parse_stream(stream): + string = stream.read() + position = 0 + length = len(string) + while position < length: + (binding, position) = parse_binding(string, position) + yield binding class DotEnv(): @@ -52,19 +95,17 @@ def __init__(self, dotenv_path, verbose=False): self._dict = None self.verbose = verbose + @contextmanager def _get_stream(self): - self._is_file = False if isinstance(self.dotenv_path, StringIO): - return self.dotenv_path - - if os.path.exists(self.dotenv_path): - self._is_file = True - return io.open(self.dotenv_path) - - if self.verbose: - warnings.warn("File doesn't exist {}".format(self.dotenv_path)) - - return StringIO('') + yield self.dotenv_path + elif os.path.isfile(self.dotenv_path): + with io.open(self.dotenv_path) as stream: + yield stream + else: + if self.verbose: + warnings.warn("File doesn't exist {}".format(self.dotenv_path)) + yield StringIO('') def dict(self): """Return dotenv as dict""" @@ -76,17 +117,10 @@ def dict(self): return self._dict def parse(self): - f = self._get_stream() - - for line in f: - key, value = parse_line(line) - if not key: - continue - - yield key, value - - if self._is_file: - f.close() + with self._get_stream() as stream: + for mapping in parse_stream(stream): + if mapping.key is not None and mapping.value is not None: + yield mapping.key, mapping.value def set_as_environment_variables(self, override=False): """ @@ -95,13 +129,12 @@ def set_as_environment_variables(self, override=False): for k, v in self.dict().items(): if k in os.environ and not override: continue - # With Python 2 on Windows, ensuree environment variables are - # system strings to avoid "TypeError: environment can only contain - # strings" in Python's subprocess module. - if sys.version_info.major < 3 and sys.platform == 'win32': - from pipenv.utils import fs_str - k = fs_str(k) - v = fs_str(v) + # With Python2 on Windows, force environment variables to str to avoid + # "TypeError: environment can only contain strings" in Python's subprocess.py. + if PY2 and WIN: + if isinstance(k, text_type) or isinstance(v, text_type): + k = k.encode('ascii') + v = v.encode('ascii') os.environ[k] = v return True @@ -127,6 +160,20 @@ def get_key(dotenv_path, key_to_get): return DotEnv(dotenv_path, verbose=True).get(key_to_get) +@contextmanager +def rewrite(path): + try: + with tempfile.NamedTemporaryFile(mode="w+", delete=False) as dest: + with io.open(path) as source: + yield (source, dest) + except BaseException: + if os.path.isfile(dest.name): + os.unlink(dest.name) + raise + else: + shutil.move(dest.name, path) + + def set_key(dotenv_path, key_to_set, value_to_set, quote_mode="always"): """ Adds or Updates a key/value to the given .env @@ -142,20 +189,19 @@ def set_key(dotenv_path, key_to_set, value_to_set, quote_mode="always"): if " " in value_to_set: quote_mode = "always" - line_template = '{}="{}"' if quote_mode == "always" else '{}={}' + line_template = '{}="{}"\n' if quote_mode == "always" else '{}={}\n' line_out = line_template.format(key_to_set, value_to_set) - replaced = False - for line in fileinput.input(dotenv_path, inplace=True): - k, v = parse_line(line) - if k == key_to_set: - replaced = True - line = line_out - print(line, end='') - - if not replaced: - with io.open(dotenv_path, "a") as f: - f.write("{}\n".format(line_out)) + with rewrite(dotenv_path) as (source, dest): + replaced = False + for mapping in parse_stream(source): + if mapping.key == key_to_set: + dest.write(line_out) + replaced = True + else: + dest.write(mapping.original) + if not replaced: + dest.write(line_out) return True, key_to_set, value_to_set @@ -167,18 +213,17 @@ def unset_key(dotenv_path, key_to_unset, quote_mode="always"): If the .env path given doesn't exist, fails If the given key doesn't exist in the .env, fails """ - removed = False - if not os.path.exists(dotenv_path): warnings.warn("can't delete from %s - it doesn't exist." % dotenv_path) return None, key_to_unset - for line in fileinput.input(dotenv_path, inplace=True): - k, v = parse_line(line) - if k == key_to_unset: - removed = True - line = '' - print(line, end='') + removed = False + with rewrite(dotenv_path) as (source, dest): + for mapping in parse_stream(source): + if mapping.key == key_to_unset: + removed = True + else: + dest.write(mapping.original) if not removed: warnings.warn("key %s not removed from %s - key doesn't exist." % (key_to_unset, dotenv_path)) @@ -194,7 +239,7 @@ def _replacement(name): first search in environ, if not found, then look into the dotenv variables """ - ret = os.getenv(name, values.get(name, "")) + ret = os.getenv(name, new_values.get(name, "")) return ret def _re_sub_callback(match_object): @@ -204,10 +249,12 @@ def _re_sub_callback(match_object): """ return _replacement(match_object.group()[2:-1]) + new_values = {} + for k, v in values.items(): - values[k] = __posix_variable.sub(_re_sub_callback, v) + new_values[k] = __posix_variable.sub(_re_sub_callback, v) - return values + return new_values def _walk_to_root(path): @@ -248,7 +295,7 @@ def find_dotenv(filename='.env', raise_error_if_not_found=False, usecwd=False): for dirname in _walk_to_root(path): check_path = os.path.join(dirname, filename) - if os.path.exists(check_path): + if os.path.isfile(check_path): return check_path if raise_error_if_not_found: @@ -292,19 +339,10 @@ def run_command(command, env): cmd_env.update(env) p = Popen(command, - stdin=PIPE, - stdout=PIPE, - stderr=STDOUT, universal_newlines=True, bufsize=0, shell=False, env=cmd_env) - try: - out, _ = p.communicate() - print(out) - except Exception: - warnings.warn('An error occured, running the command:') - out, _ = p.communicate() - warnings.warn(out) + _, _ = p.communicate() return p.returncode diff --git a/pipenv/vendor/dotenv/version.py b/pipenv/vendor/dotenv/version.py index d69d16e980..1f4c4d43b2 100644 --- a/pipenv/vendor/dotenv/version.py +++ b/pipenv/vendor/dotenv/version.py @@ -1 +1 @@ -__version__ = "0.9.1" +__version__ = "0.10.1" diff --git a/pipenv/vendor/idna/__init__.py b/pipenv/vendor/idna/__init__.py old mode 100755 new mode 100644 diff --git a/pipenv/vendor/idna/codec.py b/pipenv/vendor/idna/codec.py old mode 100755 new mode 100644 diff --git a/pipenv/vendor/idna/compat.py b/pipenv/vendor/idna/compat.py old mode 100755 new mode 100644 diff --git a/pipenv/vendor/idna/core.py b/pipenv/vendor/idna/core.py old mode 100755 new mode 100644 index 090c2c18d5..104624ad2d --- a/pipenv/vendor/idna/core.py +++ b/pipenv/vendor/idna/core.py @@ -267,10 +267,7 @@ def alabel(label): try: label = label.encode('ascii') - try: - ulabel(label) - except IDNAError: - raise IDNAError('The label {0} is not a valid A-label'.format(label)) + ulabel(label) if not valid_label_length(label): raise IDNAError('Label too long') return label diff --git a/pipenv/vendor/idna/idnadata.py b/pipenv/vendor/idna/idnadata.py old mode 100755 new mode 100644 index 17974e2337..a80c959d2a --- a/pipenv/vendor/idna/idnadata.py +++ b/pipenv/vendor/idna/idnadata.py @@ -1,6 +1,6 @@ # This file is automatically generated by tools/idna-data -__version__ = "10.0.0" +__version__ = "11.0.0" scripts = { 'Greek': ( 0x37000000374, @@ -49,7 +49,7 @@ 0x30210000302a, 0x30380000303c, 0x340000004db6, - 0x4e0000009feb, + 0x4e0000009ff0, 0xf9000000fa6e, 0xfa700000fada, 0x200000002a6d7, @@ -62,7 +62,7 @@ 'Hebrew': ( 0x591000005c8, 0x5d0000005eb, - 0x5f0000005f5, + 0x5ef000005f5, 0xfb1d0000fb37, 0xfb380000fb3d, 0xfb3e0000fb3f, @@ -248,6 +248,7 @@ 0x6fb: 68, 0x6fc: 68, 0x6ff: 68, + 0x70f: 84, 0x710: 82, 0x712: 68, 0x713: 68, @@ -522,6 +523,7 @@ 0x1875: 68, 0x1876: 68, 0x1877: 68, + 0x1878: 68, 0x1880: 85, 0x1881: 85, 0x1882: 85, @@ -690,6 +692,70 @@ 0x10bad: 68, 0x10bae: 68, 0x10baf: 85, + 0x10d00: 76, + 0x10d01: 68, + 0x10d02: 68, + 0x10d03: 68, + 0x10d04: 68, + 0x10d05: 68, + 0x10d06: 68, + 0x10d07: 68, + 0x10d08: 68, + 0x10d09: 68, + 0x10d0a: 68, + 0x10d0b: 68, + 0x10d0c: 68, + 0x10d0d: 68, + 0x10d0e: 68, + 0x10d0f: 68, + 0x10d10: 68, + 0x10d11: 68, + 0x10d12: 68, + 0x10d13: 68, + 0x10d14: 68, + 0x10d15: 68, + 0x10d16: 68, + 0x10d17: 68, + 0x10d18: 68, + 0x10d19: 68, + 0x10d1a: 68, + 0x10d1b: 68, + 0x10d1c: 68, + 0x10d1d: 68, + 0x10d1e: 68, + 0x10d1f: 68, + 0x10d20: 68, + 0x10d21: 68, + 0x10d22: 82, + 0x10d23: 68, + 0x10f30: 68, + 0x10f31: 68, + 0x10f32: 68, + 0x10f33: 82, + 0x10f34: 68, + 0x10f35: 68, + 0x10f36: 68, + 0x10f37: 68, + 0x10f38: 68, + 0x10f39: 68, + 0x10f3a: 68, + 0x10f3b: 68, + 0x10f3c: 68, + 0x10f3d: 68, + 0x10f3e: 68, + 0x10f3f: 68, + 0x10f40: 68, + 0x10f41: 68, + 0x10f42: 68, + 0x10f43: 68, + 0x10f44: 68, + 0x10f45: 85, + 0x10f51: 68, + 0x10f52: 68, + 0x10f53: 68, + 0x10f54: 82, + 0x110bd: 85, + 0x110cd: 85, 0x1e900: 68, 0x1e901: 68, 0x1e902: 68, @@ -1034,14 +1100,15 @@ 0x52d0000052e, 0x52f00000530, 0x5590000055a, - 0x56100000587, + 0x56000000587, + 0x58800000589, 0x591000005be, 0x5bf000005c0, 0x5c1000005c3, 0x5c4000005c6, 0x5c7000005c8, 0x5d0000005eb, - 0x5f0000005f3, + 0x5ef000005f3, 0x6100000061b, 0x62000000640, 0x64100000660, @@ -1054,12 +1121,13 @@ 0x7100000074b, 0x74d000007b2, 0x7c0000007f6, + 0x7fd000007fe, 0x8000000082e, 0x8400000085c, 0x8600000086b, 0x8a0000008b5, 0x8b6000008be, - 0x8d4000008e2, + 0x8d3000008e2, 0x8e300000958, 0x96000000964, 0x96600000970, @@ -1077,6 +1145,7 @@ 0x9e0000009e4, 0x9e6000009f2, 0x9fc000009fd, + 0x9fe000009ff, 0xa0100000a04, 0xa0500000a0b, 0xa0f00000a11, @@ -1136,8 +1205,7 @@ 0xbd000000bd1, 0xbd700000bd8, 0xbe600000bf0, - 0xc0000000c04, - 0xc0500000c0d, + 0xc0000000c0d, 0xc0e00000c11, 0xc1200000c29, 0xc2a00000c3a, @@ -1276,7 +1344,7 @@ 0x17dc000017de, 0x17e0000017ea, 0x18100000181a, - 0x182000001878, + 0x182000001879, 0x1880000018ab, 0x18b0000018f6, 0x19000000191f, @@ -1544,11 +1612,11 @@ 0x309d0000309f, 0x30a1000030fb, 0x30fc000030ff, - 0x31050000312f, + 0x310500003130, 0x31a0000031bb, 0x31f000003200, 0x340000004db6, - 0x4e0000009feb, + 0x4e0000009ff0, 0xa0000000a48d, 0xa4d00000a4fe, 0xa5000000a60d, @@ -1655,8 +1723,10 @@ 0xa7a50000a7a6, 0xa7a70000a7a8, 0xa7a90000a7aa, + 0xa7af0000a7b0, 0xa7b50000a7b6, 0xa7b70000a7b8, + 0xa7b90000a7ba, 0xa7f70000a7f8, 0xa7fa0000a828, 0xa8400000a874, @@ -1664,8 +1734,7 @@ 0xa8d00000a8da, 0xa8e00000a8f8, 0xa8fb0000a8fc, - 0xa8fd0000a8fe, - 0xa9000000a92e, + 0xa8fd0000a92e, 0xa9300000a954, 0xa9800000a9c1, 0xa9cf0000a9da, @@ -1743,7 +1812,7 @@ 0x10a0500010a07, 0x10a0c00010a14, 0x10a1500010a18, - 0x10a1900010a34, + 0x10a1900010a36, 0x10a3800010a3b, 0x10a3f00010a40, 0x10a6000010a7d, @@ -1756,6 +1825,11 @@ 0x10b8000010b92, 0x10c0000010c49, 0x10cc000010cf3, + 0x10d0000010d28, + 0x10d3000010d3a, + 0x10f0000010f1d, + 0x10f2700010f28, + 0x10f3000010f51, 0x1100000011047, 0x1106600011070, 0x1107f000110bb, @@ -1763,10 +1837,11 @@ 0x110f0000110fa, 0x1110000011135, 0x1113600011140, + 0x1114400011147, 0x1115000011174, 0x1117600011177, 0x11180000111c5, - 0x111ca000111cd, + 0x111c9000111cd, 0x111d0000111db, 0x111dc000111dd, 0x1120000011212, @@ -1786,7 +1861,7 @@ 0x1132a00011331, 0x1133200011334, 0x113350001133a, - 0x1133c00011345, + 0x1133b00011345, 0x1134700011349, 0x1134b0001134e, 0x1135000011351, @@ -1796,6 +1871,7 @@ 0x1137000011375, 0x114000001144b, 0x114500001145a, + 0x1145e0001145f, 0x11480000114c6, 0x114c7000114c8, 0x114d0000114da, @@ -1807,15 +1883,17 @@ 0x116500001165a, 0x11680000116b8, 0x116c0000116ca, - 0x117000001171a, + 0x117000001171b, 0x1171d0001172c, 0x117300001173a, + 0x118000001183b, 0x118c0000118ea, 0x118ff00011900, 0x11a0000011a3f, 0x11a4700011a48, 0x11a5000011a84, 0x11a8600011a9a, + 0x11a9d00011a9e, 0x11ac000011af9, 0x11c0000011c09, 0x11c0a00011c37, @@ -1831,6 +1909,13 @@ 0x11d3c00011d3e, 0x11d3f00011d48, 0x11d5000011d5a, + 0x11d6000011d66, + 0x11d6700011d69, + 0x11d6a00011d8f, + 0x11d9000011d92, + 0x11d9300011d99, + 0x11da000011daa, + 0x11ee000011ef7, 0x120000001239a, 0x1248000012544, 0x130000001342f, @@ -1845,11 +1930,12 @@ 0x16b5000016b5a, 0x16b6300016b78, 0x16b7d00016b90, + 0x16e6000016e80, 0x16f0000016f45, 0x16f5000016f7f, 0x16f8f00016fa0, 0x16fe000016fe2, - 0x17000000187ed, + 0x17000000187f2, 0x1880000018af3, 0x1b0000001b11f, 0x1b1700001b2fc, diff --git a/pipenv/vendor/idna/intranges.py b/pipenv/vendor/idna/intranges.py old mode 100755 new mode 100644 diff --git a/pipenv/vendor/idna/package_data.py b/pipenv/vendor/idna/package_data.py old mode 100755 new mode 100644 index 39c192bae6..257e898939 --- a/pipenv/vendor/idna/package_data.py +++ b/pipenv/vendor/idna/package_data.py @@ -1,2 +1,2 @@ -__version__ = '2.7' +__version__ = '2.8' diff --git a/pipenv/vendor/idna/uts46data.py b/pipenv/vendor/idna/uts46data.py old mode 100755 new mode 100644 index 79731cb9e7..a68ed4c0ec --- a/pipenv/vendor/idna/uts46data.py +++ b/pipenv/vendor/idna/uts46data.py @@ -4,7 +4,7 @@ """IDNA Mapping Table from UTS46.""" -__version__ = "10.0.0" +__version__ = "11.0.0" def _seg_0(): return [ (0x0, '3'), @@ -1029,11 +1029,8 @@ def _seg_9(): (0x556, 'M', u'ֆ'), (0x557, 'X'), (0x559, 'V'), - (0x560, 'X'), - (0x561, 'V'), (0x587, 'M', u'եւ'), - (0x588, 'X'), - (0x589, 'V'), + (0x588, 'V'), (0x58B, 'X'), (0x58D, 'V'), (0x590, 'X'), @@ -1041,15 +1038,15 @@ def _seg_9(): (0x5C8, 'X'), (0x5D0, 'V'), (0x5EB, 'X'), - (0x5F0, 'V'), + (0x5EF, 'V'), (0x5F5, 'X'), + (0x606, 'V'), + (0x61C, 'X'), + (0x61E, 'V'), ] def _seg_10(): return [ - (0x606, 'V'), - (0x61C, 'X'), - (0x61E, 'V'), (0x675, 'M', u'اٴ'), (0x676, 'M', u'وٴ'), (0x677, 'M', u'ۇٴ'), @@ -1064,7 +1061,7 @@ def _seg_10(): (0x7B2, 'X'), (0x7C0, 'V'), (0x7FB, 'X'), - (0x800, 'V'), + (0x7FD, 'V'), (0x82E, 'X'), (0x830, 'V'), (0x83F, 'X'), @@ -1078,7 +1075,7 @@ def _seg_10(): (0x8B5, 'X'), (0x8B6, 'V'), (0x8BE, 'X'), - (0x8D4, 'V'), + (0x8D3, 'V'), (0x8E2, 'X'), (0x8E3, 'V'), (0x958, 'M', u'क़'), @@ -1118,7 +1115,7 @@ def _seg_10(): (0x9E0, 'V'), (0x9E4, 'X'), (0x9E6, 'V'), - (0x9FE, 'X'), + (0x9FF, 'X'), (0xA01, 'V'), (0xA04, 'X'), (0xA05, 'V'), @@ -1147,19 +1144,19 @@ def _seg_10(): (0xA4E, 'X'), (0xA51, 'V'), (0xA52, 'X'), + (0xA59, 'M', u'ਖ਼'), + (0xA5A, 'M', u'ਗ਼'), + (0xA5B, 'M', u'ਜ਼'), ] def _seg_11(): return [ - (0xA59, 'M', u'ਖ਼'), - (0xA5A, 'M', u'ਗ਼'), - (0xA5B, 'M', u'ਜ਼'), (0xA5C, 'V'), (0xA5D, 'X'), (0xA5E, 'M', u'ਫ਼'), (0xA5F, 'X'), (0xA66, 'V'), - (0xA76, 'X'), + (0xA77, 'X'), (0xA81, 'V'), (0xA84, 'X'), (0xA85, 'V'), @@ -1250,16 +1247,14 @@ def _seg_11(): (0xBE6, 'V'), (0xBFB, 'X'), (0xC00, 'V'), - (0xC04, 'X'), - ] - -def _seg_12(): - return [ - (0xC05, 'V'), (0xC0D, 'X'), (0xC0E, 'V'), (0xC11, 'X'), (0xC12, 'V'), + ] + +def _seg_12(): + return [ (0xC29, 'X'), (0xC2A, 'V'), (0xC3A, 'X'), @@ -1278,8 +1273,6 @@ def _seg_12(): (0xC66, 'V'), (0xC70, 'X'), (0xC78, 'V'), - (0xC84, 'X'), - (0xC85, 'V'), (0xC8D, 'X'), (0xC8E, 'V'), (0xC91, 'X'), @@ -1355,10 +1348,6 @@ def _seg_12(): (0xE83, 'X'), (0xE84, 'V'), (0xE85, 'X'), - ] - -def _seg_13(): - return [ (0xE87, 'V'), (0xE89, 'X'), (0xE8A, 'V'), @@ -1366,6 +1355,10 @@ def _seg_13(): (0xE8D, 'V'), (0xE8E, 'X'), (0xE94, 'V'), + ] + +def _seg_13(): + return [ (0xE98, 'X'), (0xE99, 'V'), (0xEA0, 'X'), @@ -1459,10 +1452,6 @@ def _seg_13(): (0x124E, 'X'), (0x1250, 'V'), (0x1257, 'X'), - ] - -def _seg_14(): - return [ (0x1258, 'V'), (0x1259, 'X'), (0x125A, 'V'), @@ -1470,6 +1459,10 @@ def _seg_14(): (0x1260, 'V'), (0x1289, 'X'), (0x128A, 'V'), + ] + +def _seg_14(): + return [ (0x128E, 'X'), (0x1290, 'V'), (0x12B1, 'X'), @@ -1538,7 +1531,7 @@ def _seg_14(): (0x1810, 'V'), (0x181A, 'X'), (0x1820, 'V'), - (0x1878, 'X'), + (0x1879, 'X'), (0x1880, 'V'), (0x18AB, 'X'), (0x18B0, 'V'), @@ -1563,10 +1556,6 @@ def _seg_14(): (0x19DB, 'X'), (0x19DE, 'V'), (0x1A1C, 'X'), - ] - -def _seg_15(): - return [ (0x1A1E, 'V'), (0x1A5F, 'X'), (0x1A60, 'V'), @@ -1574,6 +1563,10 @@ def _seg_15(): (0x1A7F, 'V'), (0x1A8A, 'X'), (0x1A90, 'V'), + ] + +def _seg_15(): + return [ (0x1A9A, 'X'), (0x1AA0, 'V'), (0x1AAE, 'X'), @@ -1667,10 +1660,6 @@ def _seg_15(): (0x1D68, 'M', u'ρ'), (0x1D69, 'M', u'φ'), (0x1D6A, 'M', u'χ'), - ] - -def _seg_16(): - return [ (0x1D6B, 'V'), (0x1D78, 'M', u'н'), (0x1D79, 'V'), @@ -1678,6 +1667,10 @@ def _seg_16(): (0x1D9C, 'M', u'c'), (0x1D9D, 'M', u'ɕ'), (0x1D9E, 'M', u'ð'), + ] + +def _seg_16(): + return [ (0x1D9F, 'M', u'ɜ'), (0x1DA0, 'M', u'f'), (0x1DA1, 'M', u'ɟ'), @@ -1771,10 +1764,6 @@ def _seg_16(): (0x1E36, 'M', u'ḷ'), (0x1E37, 'V'), (0x1E38, 'M', u'ḹ'), - ] - -def _seg_17(): - return [ (0x1E39, 'V'), (0x1E3A, 'M', u'ḻ'), (0x1E3B, 'V'), @@ -1782,6 +1771,10 @@ def _seg_17(): (0x1E3D, 'V'), (0x1E3E, 'M', u'ḿ'), (0x1E3F, 'V'), + ] + +def _seg_17(): + return [ (0x1E40, 'M', u'ṁ'), (0x1E41, 'V'), (0x1E42, 'M', u'ṃ'), @@ -1875,10 +1868,6 @@ def _seg_17(): (0x1E9F, 'V'), (0x1EA0, 'M', u'ạ'), (0x1EA1, 'V'), - ] - -def _seg_18(): - return [ (0x1EA2, 'M', u'ả'), (0x1EA3, 'V'), (0x1EA4, 'M', u'ấ'), @@ -1886,6 +1875,10 @@ def _seg_18(): (0x1EA6, 'M', u'ầ'), (0x1EA7, 'V'), (0x1EA8, 'M', u'ẩ'), + ] + +def _seg_18(): + return [ (0x1EA9, 'V'), (0x1EAA, 'M', u'ẫ'), (0x1EAB, 'V'), @@ -1979,10 +1972,6 @@ def _seg_18(): (0x1F0B, 'M', u'ἃ'), (0x1F0C, 'M', u'ἄ'), (0x1F0D, 'M', u'ἅ'), - ] - -def _seg_19(): - return [ (0x1F0E, 'M', u'ἆ'), (0x1F0F, 'M', u'ἇ'), (0x1F10, 'V'), @@ -1990,6 +1979,10 @@ def _seg_19(): (0x1F18, 'M', u'ἐ'), (0x1F19, 'M', u'ἑ'), (0x1F1A, 'M', u'ἒ'), + ] + +def _seg_19(): + return [ (0x1F1B, 'M', u'ἓ'), (0x1F1C, 'M', u'ἔ'), (0x1F1D, 'M', u'ἕ'), @@ -2083,10 +2076,6 @@ def _seg_19(): (0x1F9A, 'M', u'ἢι'), (0x1F9B, 'M', u'ἣι'), (0x1F9C, 'M', u'ἤι'), - ] - -def _seg_20(): - return [ (0x1F9D, 'M', u'ἥι'), (0x1F9E, 'M', u'ἦι'), (0x1F9F, 'M', u'ἧι'), @@ -2094,6 +2083,10 @@ def _seg_20(): (0x1FA1, 'M', u'ὡι'), (0x1FA2, 'M', u'ὢι'), (0x1FA3, 'M', u'ὣι'), + ] + +def _seg_20(): + return [ (0x1FA4, 'M', u'ὤι'), (0x1FA5, 'M', u'ὥι'), (0x1FA6, 'M', u'ὦι'), @@ -2187,10 +2180,6 @@ def _seg_20(): (0x2024, 'X'), (0x2027, 'V'), (0x2028, 'X'), - ] - -def _seg_21(): - return [ (0x202F, '3', u' '), (0x2030, 'V'), (0x2033, 'M', u'′′'), @@ -2198,6 +2187,10 @@ def _seg_21(): (0x2035, 'V'), (0x2036, 'M', u'‵‵'), (0x2037, 'M', u'‵‵‵'), + ] + +def _seg_21(): + return [ (0x2038, 'V'), (0x203C, '3', u'!!'), (0x203D, 'V'), @@ -2291,10 +2284,6 @@ def _seg_21(): (0x2120, 'M', u'sm'), (0x2121, 'M', u'tel'), (0x2122, 'M', u'tm'), - ] - -def _seg_22(): - return [ (0x2123, 'V'), (0x2124, 'M', u'z'), (0x2125, 'V'), @@ -2302,6 +2291,10 @@ def _seg_22(): (0x2127, 'V'), (0x2128, 'M', u'z'), (0x2129, 'V'), + ] + +def _seg_22(): + return [ (0x212A, 'M', u'k'), (0x212B, 'M', u'å'), (0x212C, 'M', u'b'), @@ -2395,10 +2388,6 @@ def _seg_22(): (0x226E, '3'), (0x2270, 'V'), (0x2329, 'M', u'〈'), - ] - -def _seg_23(): - return [ (0x232A, 'M', u'〉'), (0x232B, 'V'), (0x2427, 'X'), @@ -2406,6 +2395,10 @@ def _seg_23(): (0x244B, 'X'), (0x2460, 'M', u'1'), (0x2461, 'M', u'2'), + ] + +def _seg_23(): + return [ (0x2462, 'M', u'3'), (0x2463, 'M', u'4'), (0x2464, 'M', u'5'), @@ -2499,10 +2492,6 @@ def _seg_23(): (0x24CF, 'M', u'z'), (0x24D0, 'M', u'a'), (0x24D1, 'M', u'b'), - ] - -def _seg_24(): - return [ (0x24D2, 'M', u'c'), (0x24D3, 'M', u'd'), (0x24D4, 'M', u'e'), @@ -2510,6 +2499,10 @@ def _seg_24(): (0x24D6, 'M', u'g'), (0x24D7, 'M', u'h'), (0x24D8, 'M', u'i'), + ] + +def _seg_24(): + return [ (0x24D9, 'M', u'j'), (0x24DA, 'M', u'k'), (0x24DB, 'M', u'l'), @@ -2541,13 +2534,9 @@ def _seg_24(): (0x2B76, 'V'), (0x2B96, 'X'), (0x2B98, 'V'), - (0x2BBA, 'X'), - (0x2BBD, 'V'), (0x2BC9, 'X'), (0x2BCA, 'V'), - (0x2BD3, 'X'), - (0x2BEC, 'V'), - (0x2BF0, 'X'), + (0x2BFF, 'X'), (0x2C00, 'M', u'ⰰ'), (0x2C01, 'M', u'ⰱ'), (0x2C02, 'M', u'ⰲ'), @@ -2603,10 +2592,6 @@ def _seg_24(): (0x2C62, 'M', u'ɫ'), (0x2C63, 'M', u'ᵽ'), (0x2C64, 'M', u'ɽ'), - ] - -def _seg_25(): - return [ (0x2C65, 'V'), (0x2C67, 'M', u'ⱨ'), (0x2C68, 'V'), @@ -2618,6 +2603,10 @@ def _seg_25(): (0x2C6E, 'M', u'ɱ'), (0x2C6F, 'M', u'ɐ'), (0x2C70, 'M', u'ɒ'), + ] + +def _seg_25(): + return [ (0x2C71, 'V'), (0x2C72, 'M', u'ⱳ'), (0x2C73, 'V'), @@ -2707,10 +2696,6 @@ def _seg_25(): (0x2CCD, 'V'), (0x2CCE, 'M', u'ⳏ'), (0x2CCF, 'V'), - ] - -def _seg_26(): - return [ (0x2CD0, 'M', u'ⳑ'), (0x2CD1, 'V'), (0x2CD2, 'M', u'ⳓ'), @@ -2722,6 +2707,10 @@ def _seg_26(): (0x2CD8, 'M', u'ⳙ'), (0x2CD9, 'V'), (0x2CDA, 'M', u'ⳛ'), + ] + +def _seg_26(): + return [ (0x2CDB, 'V'), (0x2CDC, 'M', u'ⳝ'), (0x2CDD, 'V'), @@ -2768,7 +2757,7 @@ def _seg_26(): (0x2DD8, 'V'), (0x2DDF, 'X'), (0x2DE0, 'V'), - (0x2E4A, 'X'), + (0x2E4F, 'X'), (0x2E80, 'V'), (0x2E9A, 'X'), (0x2E9B, 'V'), @@ -2811,10 +2800,6 @@ def _seg_26(): (0x2F20, 'M', u'士'), (0x2F21, 'M', u'夂'), (0x2F22, 'M', u'夊'), - ] - -def _seg_27(): - return [ (0x2F23, 'M', u'夕'), (0x2F24, 'M', u'大'), (0x2F25, 'M', u'女'), @@ -2826,6 +2811,10 @@ def _seg_27(): (0x2F2B, 'M', u'尸'), (0x2F2C, 'M', u'屮'), (0x2F2D, 'M', u'山'), + ] + +def _seg_27(): + return [ (0x2F2E, 'M', u'巛'), (0x2F2F, 'M', u'工'), (0x2F30, 'M', u'己'), @@ -2915,10 +2904,6 @@ def _seg_27(): (0x2F84, 'M', u'至'), (0x2F85, 'M', u'臼'), (0x2F86, 'M', u'舌'), - ] - -def _seg_28(): - return [ (0x2F87, 'M', u'舛'), (0x2F88, 'M', u'舟'), (0x2F89, 'M', u'艮'), @@ -2930,6 +2915,10 @@ def _seg_28(): (0x2F8F, 'M', u'行'), (0x2F90, 'M', u'衣'), (0x2F91, 'M', u'襾'), + ] + +def _seg_28(): + return [ (0x2F92, 'M', u'見'), (0x2F93, 'M', u'角'), (0x2F94, 'M', u'言'), @@ -3019,13 +3008,9 @@ def _seg_28(): (0x309F, 'M', u'より'), (0x30A0, 'V'), (0x30FF, 'M', u'コト'), - ] - -def _seg_29(): - return [ (0x3100, 'X'), (0x3105, 'V'), - (0x312F, 'X'), + (0x3130, 'X'), (0x3131, 'M', u'ᄀ'), (0x3132, 'M', u'ᄁ'), (0x3133, 'M', u'ᆪ'), @@ -3034,6 +3019,10 @@ def _seg_29(): (0x3136, 'M', u'ᆭ'), (0x3137, 'M', u'ᄃ'), (0x3138, 'M', u'ᄄ'), + ] + +def _seg_29(): + return [ (0x3139, 'M', u'ᄅ'), (0x313A, 'M', u'ᆰ'), (0x313B, 'M', u'ᆱ'), @@ -3123,10 +3112,6 @@ def _seg_29(): (0x318F, 'X'), (0x3190, 'V'), (0x3192, 'M', u'一'), - ] - -def _seg_30(): - return [ (0x3193, 'M', u'二'), (0x3194, 'M', u'三'), (0x3195, 'M', u'四'), @@ -3138,6 +3123,10 @@ def _seg_30(): (0x319B, 'M', u'丙'), (0x319C, 'M', u'丁'), (0x319D, 'M', u'天'), + ] + +def _seg_30(): + return [ (0x319E, 'M', u'地'), (0x319F, 'M', u'人'), (0x31A0, 'V'), @@ -3227,10 +3216,6 @@ def _seg_30(): (0x3256, 'M', u'26'), (0x3257, 'M', u'27'), (0x3258, 'M', u'28'), - ] - -def _seg_31(): - return [ (0x3259, 'M', u'29'), (0x325A, 'M', u'30'), (0x325B, 'M', u'31'), @@ -3242,6 +3227,10 @@ def _seg_31(): (0x3261, 'M', u'ᄂ'), (0x3262, 'M', u'ᄃ'), (0x3263, 'M', u'ᄅ'), + ] + +def _seg_31(): + return [ (0x3264, 'M', u'ᄆ'), (0x3265, 'M', u'ᄇ'), (0x3266, 'M', u'ᄉ'), @@ -3331,10 +3320,6 @@ def _seg_31(): (0x32BA, 'M', u'45'), (0x32BB, 'M', u'46'), (0x32BC, 'M', u'47'), - ] - -def _seg_32(): - return [ (0x32BD, 'M', u'48'), (0x32BE, 'M', u'49'), (0x32BF, 'M', u'50'), @@ -3346,6 +3331,10 @@ def _seg_32(): (0x32C5, 'M', u'6月'), (0x32C6, 'M', u'7月'), (0x32C7, 'M', u'8月'), + ] + +def _seg_32(): + return [ (0x32C8, 'M', u'9月'), (0x32C9, 'M', u'10月'), (0x32CA, 'M', u'11月'), @@ -3435,10 +3424,6 @@ def _seg_32(): (0x331E, 'M', u'コーポ'), (0x331F, 'M', u'サイクル'), (0x3320, 'M', u'サンチーム'), - ] - -def _seg_33(): - return [ (0x3321, 'M', u'シリング'), (0x3322, 'M', u'センチ'), (0x3323, 'M', u'セント'), @@ -3450,6 +3435,10 @@ def _seg_33(): (0x3329, 'M', u'ノット'), (0x332A, 'M', u'ハイツ'), (0x332B, 'M', u'パーセント'), + ] + +def _seg_33(): + return [ (0x332C, 'M', u'パーツ'), (0x332D, 'M', u'バーレル'), (0x332E, 'M', u'ピアストル'), @@ -3539,10 +3528,6 @@ def _seg_33(): (0x3382, 'M', u'μa'), (0x3383, 'M', u'ma'), (0x3384, 'M', u'ka'), - ] - -def _seg_34(): - return [ (0x3385, 'M', u'kb'), (0x3386, 'M', u'mb'), (0x3387, 'M', u'gb'), @@ -3554,6 +3539,10 @@ def _seg_34(): (0x338D, 'M', u'μg'), (0x338E, 'M', u'mg'), (0x338F, 'M', u'kg'), + ] + +def _seg_34(): + return [ (0x3390, 'M', u'hz'), (0x3391, 'M', u'khz'), (0x3392, 'M', u'mhz'), @@ -3643,10 +3632,6 @@ def _seg_34(): (0x33E6, 'M', u'7日'), (0x33E7, 'M', u'8日'), (0x33E8, 'M', u'9日'), - ] - -def _seg_35(): - return [ (0x33E9, 'M', u'10日'), (0x33EA, 'M', u'11日'), (0x33EB, 'M', u'12日'), @@ -3658,6 +3643,10 @@ def _seg_35(): (0x33F1, 'M', u'18日'), (0x33F2, 'M', u'19日'), (0x33F3, 'M', u'20日'), + ] + +def _seg_35(): + return [ (0x33F4, 'M', u'21日'), (0x33F5, 'M', u'22日'), (0x33F6, 'M', u'23日'), @@ -3673,7 +3662,7 @@ def _seg_35(): (0x3400, 'V'), (0x4DB6, 'X'), (0x4DC0, 'V'), - (0x9FEB, 'X'), + (0x9FF0, 'X'), (0xA000, 'V'), (0xA48D, 'X'), (0xA490, 'V'), @@ -3747,10 +3736,6 @@ def _seg_35(): (0xA692, 'M', u'ꚓ'), (0xA693, 'V'), (0xA694, 'M', u'ꚕ'), - ] - -def _seg_36(): - return [ (0xA695, 'V'), (0xA696, 'M', u'ꚗ'), (0xA697, 'V'), @@ -3762,6 +3747,10 @@ def _seg_36(): (0xA69D, 'M', u'ь'), (0xA69E, 'V'), (0xA6F8, 'X'), + ] + +def _seg_36(): + return [ (0xA700, 'V'), (0xA722, 'M', u'ꜣ'), (0xA723, 'V'), @@ -3851,10 +3840,6 @@ def _seg_36(): (0xA780, 'M', u'ꞁ'), (0xA781, 'V'), (0xA782, 'M', u'ꞃ'), - ] - -def _seg_37(): - return [ (0xA783, 'V'), (0xA784, 'M', u'ꞅ'), (0xA785, 'V'), @@ -3866,6 +3851,10 @@ def _seg_37(): (0xA78E, 'V'), (0xA790, 'M', u'ꞑ'), (0xA791, 'V'), + ] + +def _seg_37(): + return [ (0xA792, 'M', u'ꞓ'), (0xA793, 'V'), (0xA796, 'M', u'ꞗ'), @@ -3893,7 +3882,7 @@ def _seg_37(): (0xA7AC, 'M', u'ɡ'), (0xA7AD, 'M', u'ɬ'), (0xA7AE, 'M', u'ɪ'), - (0xA7AF, 'X'), + (0xA7AF, 'V'), (0xA7B0, 'M', u'ʞ'), (0xA7B1, 'M', u'ʇ'), (0xA7B2, 'M', u'ʝ'), @@ -3903,6 +3892,8 @@ def _seg_37(): (0xA7B6, 'M', u'ꞷ'), (0xA7B7, 'V'), (0xA7B8, 'X'), + (0xA7B9, 'V'), + (0xA7BA, 'X'), (0xA7F7, 'V'), (0xA7F8, 'M', u'ħ'), (0xA7F9, 'M', u'œ'), @@ -3917,8 +3908,6 @@ def _seg_37(): (0xA8CE, 'V'), (0xA8DA, 'X'), (0xA8E0, 'V'), - (0xA8FE, 'X'), - (0xA900, 'V'), (0xA954, 'X'), (0xA95F, 'V'), (0xA97D, 'X'), @@ -3955,10 +3944,6 @@ def _seg_37(): (0xAB5F, 'M', u'ꭒ'), (0xAB60, 'V'), (0xAB66, 'X'), - ] - -def _seg_38(): - return [ (0xAB70, 'M', u'Ꭰ'), (0xAB71, 'M', u'Ꭱ'), (0xAB72, 'M', u'Ꭲ'), @@ -3970,6 +3955,10 @@ def _seg_38(): (0xAB78, 'M', u'Ꭸ'), (0xAB79, 'M', u'Ꭹ'), (0xAB7A, 'M', u'Ꭺ'), + ] + +def _seg_38(): + return [ (0xAB7B, 'M', u'Ꭻ'), (0xAB7C, 'M', u'Ꭼ'), (0xAB7D, 'M', u'Ꭽ'), @@ -4059,10 +4048,6 @@ def _seg_38(): (0xF907, 'M', u'龜'), (0xF909, 'M', u'契'), (0xF90A, 'M', u'金'), - ] - -def _seg_39(): - return [ (0xF90B, 'M', u'喇'), (0xF90C, 'M', u'奈'), (0xF90D, 'M', u'懶'), @@ -4074,6 +4059,10 @@ def _seg_39(): (0xF913, 'M', u'邏'), (0xF914, 'M', u'樂'), (0xF915, 'M', u'洛'), + ] + +def _seg_39(): + return [ (0xF916, 'M', u'烙'), (0xF917, 'M', u'珞'), (0xF918, 'M', u'落'), @@ -4163,10 +4152,6 @@ def _seg_39(): (0xF96C, 'M', u'塞'), (0xF96D, 'M', u'省'), (0xF96E, 'M', u'葉'), - ] - -def _seg_40(): - return [ (0xF96F, 'M', u'說'), (0xF970, 'M', u'殺'), (0xF971, 'M', u'辰'), @@ -4178,6 +4163,10 @@ def _seg_40(): (0xF977, 'M', u'亮'), (0xF978, 'M', u'兩'), (0xF979, 'M', u'凉'), + ] + +def _seg_40(): + return [ (0xF97A, 'M', u'梁'), (0xF97B, 'M', u'糧'), (0xF97C, 'M', u'良'), @@ -4267,10 +4256,6 @@ def _seg_40(): (0xF9D0, 'M', u'類'), (0xF9D1, 'M', u'六'), (0xF9D2, 'M', u'戮'), - ] - -def _seg_41(): - return [ (0xF9D3, 'M', u'陸'), (0xF9D4, 'M', u'倫'), (0xF9D5, 'M', u'崙'), @@ -4282,6 +4267,10 @@ def _seg_41(): (0xF9DB, 'M', u'率'), (0xF9DC, 'M', u'隆'), (0xF9DD, 'M', u'利'), + ] + +def _seg_41(): + return [ (0xF9DE, 'M', u'吏'), (0xF9DF, 'M', u'履'), (0xF9E0, 'M', u'易'), @@ -4371,10 +4360,6 @@ def _seg_41(): (0xFA39, 'M', u'塀'), (0xFA3A, 'M', u'墨'), (0xFA3B, 'M', u'層'), - ] - -def _seg_42(): - return [ (0xFA3C, 'M', u'屮'), (0xFA3D, 'M', u'悔'), (0xFA3E, 'M', u'慨'), @@ -4386,6 +4371,10 @@ def _seg_42(): (0xFA44, 'M', u'梅'), (0xFA45, 'M', u'海'), (0xFA46, 'M', u'渚'), + ] + +def _seg_42(): + return [ (0xFA47, 'M', u'漢'), (0xFA48, 'M', u'煮'), (0xFA49, 'M', u'爫'), @@ -4475,10 +4464,6 @@ def _seg_42(): (0xFA9F, 'M', u'犯'), (0xFAA0, 'M', u'猪'), (0xFAA1, 'M', u'瑱'), - ] - -def _seg_43(): - return [ (0xFAA2, 'M', u'甆'), (0xFAA3, 'M', u'画'), (0xFAA4, 'M', u'瘝'), @@ -4490,6 +4475,10 @@ def _seg_43(): (0xFAAA, 'M', u'着'), (0xFAAB, 'M', u'磌'), (0xFAAC, 'M', u'窱'), + ] + +def _seg_43(): + return [ (0xFAAD, 'M', u'節'), (0xFAAE, 'M', u'类'), (0xFAAF, 'M', u'絛'), @@ -4579,10 +4568,6 @@ def _seg_43(): (0xFB38, 'M', u'טּ'), (0xFB39, 'M', u'יּ'), (0xFB3A, 'M', u'ךּ'), - ] - -def _seg_44(): - return [ (0xFB3B, 'M', u'כּ'), (0xFB3C, 'M', u'לּ'), (0xFB3D, 'X'), @@ -4594,6 +4579,10 @@ def _seg_44(): (0xFB43, 'M', u'ףּ'), (0xFB44, 'M', u'פּ'), (0xFB45, 'X'), + ] + +def _seg_44(): + return [ (0xFB46, 'M', u'צּ'), (0xFB47, 'M', u'קּ'), (0xFB48, 'M', u'רּ'), @@ -4683,10 +4672,6 @@ def _seg_44(): (0xFC19, 'M', u'خج'), (0xFC1A, 'M', u'خح'), (0xFC1B, 'M', u'خم'), - ] - -def _seg_45(): - return [ (0xFC1C, 'M', u'سج'), (0xFC1D, 'M', u'سح'), (0xFC1E, 'M', u'سخ'), @@ -4698,6 +4683,10 @@ def _seg_45(): (0xFC24, 'M', u'ضخ'), (0xFC25, 'M', u'ضم'), (0xFC26, 'M', u'طح'), + ] + +def _seg_45(): + return [ (0xFC27, 'M', u'طم'), (0xFC28, 'M', u'ظم'), (0xFC29, 'M', u'عج'), @@ -4787,10 +4776,6 @@ def _seg_45(): (0xFC7D, 'M', u'في'), (0xFC7E, 'M', u'قى'), (0xFC7F, 'M', u'قي'), - ] - -def _seg_46(): - return [ (0xFC80, 'M', u'كا'), (0xFC81, 'M', u'كل'), (0xFC82, 'M', u'كم'), @@ -4802,6 +4787,10 @@ def _seg_46(): (0xFC88, 'M', u'ما'), (0xFC89, 'M', u'مم'), (0xFC8A, 'M', u'نر'), + ] + +def _seg_46(): + return [ (0xFC8B, 'M', u'نز'), (0xFC8C, 'M', u'نم'), (0xFC8D, 'M', u'نن'), @@ -4891,10 +4880,6 @@ def _seg_46(): (0xFCE1, 'M', u'بم'), (0xFCE2, 'M', u'به'), (0xFCE3, 'M', u'تم'), - ] - -def _seg_47(): - return [ (0xFCE4, 'M', u'ته'), (0xFCE5, 'M', u'ثم'), (0xFCE6, 'M', u'ثه'), @@ -4906,6 +4891,10 @@ def _seg_47(): (0xFCEC, 'M', u'كم'), (0xFCED, 'M', u'لم'), (0xFCEE, 'M', u'نم'), + ] + +def _seg_47(): + return [ (0xFCEF, 'M', u'نه'), (0xFCF0, 'M', u'يم'), (0xFCF1, 'M', u'يه'), @@ -4995,10 +4984,6 @@ def _seg_47(): (0xFD57, 'M', u'تمخ'), (0xFD58, 'M', u'جمح'), (0xFD5A, 'M', u'حمي'), - ] - -def _seg_48(): - return [ (0xFD5B, 'M', u'حمى'), (0xFD5C, 'M', u'سحج'), (0xFD5D, 'M', u'سجح'), @@ -5010,6 +4995,10 @@ def _seg_48(): (0xFD66, 'M', u'صمم'), (0xFD67, 'M', u'شحم'), (0xFD69, 'M', u'شجي'), + ] + +def _seg_48(): + return [ (0xFD6A, 'M', u'شمخ'), (0xFD6C, 'M', u'شمم'), (0xFD6E, 'M', u'ضحى'), @@ -5099,10 +5088,6 @@ def _seg_48(): (0xFDF3, 'M', u'اكبر'), (0xFDF4, 'M', u'محمد'), (0xFDF5, 'M', u'صلعم'), - ] - -def _seg_49(): - return [ (0xFDF6, 'M', u'رسول'), (0xFDF7, 'M', u'عليه'), (0xFDF8, 'M', u'وسلم'), @@ -5114,6 +5099,10 @@ def _seg_49(): (0xFDFE, 'X'), (0xFE00, 'I'), (0xFE10, '3', u','), + ] + +def _seg_49(): + return [ (0xFE11, 'M', u'、'), (0xFE12, 'X'), (0xFE13, '3', u':'), @@ -5203,10 +5192,6 @@ def _seg_49(): (0xFE8F, 'M', u'ب'), (0xFE93, 'M', u'ة'), (0xFE95, 'M', u'ت'), - ] - -def _seg_50(): - return [ (0xFE99, 'M', u'ث'), (0xFE9D, 'M', u'ج'), (0xFEA1, 'M', u'ح'), @@ -5218,6 +5203,10 @@ def _seg_50(): (0xFEB1, 'M', u'س'), (0xFEB5, 'M', u'ش'), (0xFEB9, 'M', u'ص'), + ] + +def _seg_50(): + return [ (0xFEBD, 'M', u'ض'), (0xFEC1, 'M', u'ط'), (0xFEC5, 'M', u'ظ'), @@ -5307,10 +5296,6 @@ def _seg_50(): (0xFF41, 'M', u'a'), (0xFF42, 'M', u'b'), (0xFF43, 'M', u'c'), - ] - -def _seg_51(): - return [ (0xFF44, 'M', u'd'), (0xFF45, 'M', u'e'), (0xFF46, 'M', u'f'), @@ -5322,6 +5307,10 @@ def _seg_51(): (0xFF4C, 'M', u'l'), (0xFF4D, 'M', u'm'), (0xFF4E, 'M', u'n'), + ] + +def _seg_51(): + return [ (0xFF4F, 'M', u'o'), (0xFF50, 'M', u'p'), (0xFF51, 'M', u'q'), @@ -5411,10 +5400,6 @@ def _seg_51(): (0xFFA5, 'M', u'ᆬ'), (0xFFA6, 'M', u'ᆭ'), (0xFFA7, 'M', u'ᄃ'), - ] - -def _seg_52(): - return [ (0xFFA8, 'M', u'ᄄ'), (0xFFA9, 'M', u'ᄅ'), (0xFFAA, 'M', u'ᆰ'), @@ -5426,6 +5411,10 @@ def _seg_52(): (0xFFB0, 'M', u'ᄚ'), (0xFFB1, 'M', u'ᄆ'), (0xFFB2, 'M', u'ᄇ'), + ] + +def _seg_52(): + return [ (0xFFB3, 'M', u'ᄈ'), (0xFFB4, 'M', u'ᄡ'), (0xFFB5, 'M', u'ᄉ'), @@ -5515,10 +5504,6 @@ def _seg_52(): (0x10300, 'V'), (0x10324, 'X'), (0x1032D, 'V'), - ] - -def _seg_53(): - return [ (0x1034B, 'X'), (0x10350, 'V'), (0x1037B, 'X'), @@ -5530,6 +5515,10 @@ def _seg_53(): (0x103D6, 'X'), (0x10400, 'M', u'𐐨'), (0x10401, 'M', u'𐐩'), + ] + +def _seg_53(): + return [ (0x10402, 'M', u'𐐪'), (0x10403, 'M', u'𐐫'), (0x10404, 'M', u'𐐬'), @@ -5619,10 +5608,6 @@ def _seg_53(): (0x10570, 'X'), (0x10600, 'V'), (0x10737, 'X'), - ] - -def _seg_54(): - return [ (0x10740, 'V'), (0x10756, 'X'), (0x10760, 'V'), @@ -5634,6 +5619,10 @@ def _seg_54(): (0x1080A, 'V'), (0x10836, 'X'), (0x10837, 'V'), + ] + +def _seg_54(): + return [ (0x10839, 'X'), (0x1083C, 'V'), (0x1083D, 'X'), @@ -5666,11 +5655,11 @@ def _seg_54(): (0x10A15, 'V'), (0x10A18, 'X'), (0x10A19, 'V'), - (0x10A34, 'X'), + (0x10A36, 'X'), (0x10A38, 'V'), (0x10A3B, 'X'), (0x10A3F, 'V'), - (0x10A48, 'X'), + (0x10A49, 'X'), (0x10A50, 'V'), (0x10A59, 'X'), (0x10A60, 'V'), @@ -5723,10 +5712,6 @@ def _seg_54(): (0x10C9B, 'M', u'𐳛'), (0x10C9C, 'M', u'𐳜'), (0x10C9D, 'M', u'𐳝'), - ] - -def _seg_55(): - return [ (0x10C9E, 'M', u'𐳞'), (0x10C9F, 'M', u'𐳟'), (0x10CA0, 'M', u'𐳠'), @@ -5738,6 +5723,10 @@ def _seg_55(): (0x10CA6, 'M', u'𐳦'), (0x10CA7, 'M', u'𐳧'), (0x10CA8, 'M', u'𐳨'), + ] + +def _seg_55(): + return [ (0x10CA9, 'M', u'𐳩'), (0x10CAA, 'M', u'𐳪'), (0x10CAB, 'M', u'𐳫'), @@ -5752,9 +5741,15 @@ def _seg_55(): (0x10CC0, 'V'), (0x10CF3, 'X'), (0x10CFA, 'V'), - (0x10D00, 'X'), + (0x10D28, 'X'), + (0x10D30, 'V'), + (0x10D3A, 'X'), (0x10E60, 'V'), (0x10E7F, 'X'), + (0x10F00, 'V'), + (0x10F28, 'X'), + (0x10F30, 'V'), + (0x10F5A, 'X'), (0x11000, 'V'), (0x1104E, 'X'), (0x11052, 'V'), @@ -5770,7 +5765,7 @@ def _seg_55(): (0x11100, 'V'), (0x11135, 'X'), (0x11136, 'V'), - (0x11144, 'X'), + (0x11147, 'X'), (0x11150, 'V'), (0x11177, 'X'), (0x11180, 'V'), @@ -5811,7 +5806,7 @@ def _seg_55(): (0x11334, 'X'), (0x11335, 'V'), (0x1133A, 'X'), - (0x1133C, 'V'), + (0x1133B, 'V'), (0x11345, 'X'), (0x11347, 'V'), (0x11349, 'X'), @@ -5827,16 +5822,16 @@ def _seg_55(): (0x1136D, 'X'), (0x11370, 'V'), (0x11375, 'X'), - ] - -def _seg_56(): - return [ (0x11400, 'V'), (0x1145A, 'X'), (0x1145B, 'V'), (0x1145C, 'X'), (0x1145D, 'V'), - (0x1145E, 'X'), + ] + +def _seg_56(): + return [ + (0x1145F, 'X'), (0x11480, 'V'), (0x114C8, 'X'), (0x114D0, 'V'), @@ -5856,11 +5851,13 @@ def _seg_56(): (0x116C0, 'V'), (0x116CA, 'X'), (0x11700, 'V'), - (0x1171A, 'X'), + (0x1171B, 'X'), (0x1171D, 'V'), (0x1172C, 'X'), (0x11730, 'V'), (0x11740, 'X'), + (0x11800, 'V'), + (0x1183C, 'X'), (0x118A0, 'M', u'𑣀'), (0x118A1, 'M', u'𑣁'), (0x118A2, 'M', u'𑣂'), @@ -5902,8 +5899,6 @@ def _seg_56(): (0x11A50, 'V'), (0x11A84, 'X'), (0x11A86, 'V'), - (0x11A9D, 'X'), - (0x11A9E, 'V'), (0x11AA3, 'X'), (0x11AC0, 'V'), (0x11AF9, 'X'), @@ -5931,14 +5926,28 @@ def _seg_56(): (0x11D3B, 'X'), (0x11D3C, 'V'), (0x11D3E, 'X'), - ] - -def _seg_57(): - return [ (0x11D3F, 'V'), (0x11D48, 'X'), (0x11D50, 'V'), (0x11D5A, 'X'), + (0x11D60, 'V'), + ] + +def _seg_57(): + return [ + (0x11D66, 'X'), + (0x11D67, 'V'), + (0x11D69, 'X'), + (0x11D6A, 'V'), + (0x11D8F, 'X'), + (0x11D90, 'V'), + (0x11D92, 'X'), + (0x11D93, 'V'), + (0x11D99, 'X'), + (0x11DA0, 'V'), + (0x11DAA, 'X'), + (0x11EE0, 'V'), + (0x11EF9, 'X'), (0x12000, 'V'), (0x1239A, 'X'), (0x12400, 'V'), @@ -5973,6 +5982,8 @@ def _seg_57(): (0x16B78, 'X'), (0x16B7D, 'V'), (0x16B90, 'X'), + (0x16E60, 'V'), + (0x16E9B, 'X'), (0x16F00, 'V'), (0x16F45, 'X'), (0x16F50, 'V'), @@ -5982,7 +5993,7 @@ def _seg_57(): (0x16FE0, 'V'), (0x16FE2, 'X'), (0x17000, 'V'), - (0x187ED, 'X'), + (0x187F2, 'X'), (0x18800, 'V'), (0x18AF3, 'X'), (0x1B000, 'V'), @@ -6024,21 +6035,23 @@ def _seg_57(): (0x1D1C1, 'V'), (0x1D1E9, 'X'), (0x1D200, 'V'), + ] + +def _seg_58(): + return [ (0x1D246, 'X'), + (0x1D2E0, 'V'), + (0x1D2F4, 'X'), (0x1D300, 'V'), (0x1D357, 'X'), (0x1D360, 'V'), - (0x1D372, 'X'), + (0x1D379, 'X'), (0x1D400, 'M', u'a'), (0x1D401, 'M', u'b'), (0x1D402, 'M', u'c'), (0x1D403, 'M', u'd'), (0x1D404, 'M', u'e'), (0x1D405, 'M', u'f'), - ] - -def _seg_58(): - return [ (0x1D406, 'M', u'g'), (0x1D407, 'M', u'h'), (0x1D408, 'M', u'i'), @@ -6126,6 +6139,10 @@ def _seg_58(): (0x1D45A, 'M', u'm'), (0x1D45B, 'M', u'n'), (0x1D45C, 'M', u'o'), + ] + +def _seg_59(): + return [ (0x1D45D, 'M', u'p'), (0x1D45E, 'M', u'q'), (0x1D45F, 'M', u'r'), @@ -6139,10 +6156,6 @@ def _seg_58(): (0x1D467, 'M', u'z'), (0x1D468, 'M', u'a'), (0x1D469, 'M', u'b'), - ] - -def _seg_59(): - return [ (0x1D46A, 'M', u'c'), (0x1D46B, 'M', u'd'), (0x1D46C, 'M', u'e'), @@ -6230,6 +6243,10 @@ def _seg_59(): (0x1D4C1, 'M', u'l'), (0x1D4C2, 'M', u'm'), (0x1D4C3, 'M', u'n'), + ] + +def _seg_60(): + return [ (0x1D4C4, 'X'), (0x1D4C5, 'M', u'p'), (0x1D4C6, 'M', u'q'), @@ -6243,10 +6260,6 @@ def _seg_59(): (0x1D4CE, 'M', u'y'), (0x1D4CF, 'M', u'z'), (0x1D4D0, 'M', u'a'), - ] - -def _seg_60(): - return [ (0x1D4D1, 'M', u'b'), (0x1D4D2, 'M', u'c'), (0x1D4D3, 'M', u'd'), @@ -6334,6 +6347,10 @@ def _seg_60(): (0x1D526, 'M', u'i'), (0x1D527, 'M', u'j'), (0x1D528, 'M', u'k'), + ] + +def _seg_61(): + return [ (0x1D529, 'M', u'l'), (0x1D52A, 'M', u'm'), (0x1D52B, 'M', u'n'), @@ -6347,10 +6364,6 @@ def _seg_60(): (0x1D533, 'M', u'v'), (0x1D534, 'M', u'w'), (0x1D535, 'M', u'x'), - ] - -def _seg_61(): - return [ (0x1D536, 'M', u'y'), (0x1D537, 'M', u'z'), (0x1D538, 'M', u'a'), @@ -6438,6 +6451,10 @@ def _seg_61(): (0x1D58C, 'M', u'g'), (0x1D58D, 'M', u'h'), (0x1D58E, 'M', u'i'), + ] + +def _seg_62(): + return [ (0x1D58F, 'M', u'j'), (0x1D590, 'M', u'k'), (0x1D591, 'M', u'l'), @@ -6451,10 +6468,6 @@ def _seg_61(): (0x1D599, 'M', u't'), (0x1D59A, 'M', u'u'), (0x1D59B, 'M', u'v'), - ] - -def _seg_62(): - return [ (0x1D59C, 'M', u'w'), (0x1D59D, 'M', u'x'), (0x1D59E, 'M', u'y'), @@ -6542,6 +6555,10 @@ def _seg_62(): (0x1D5F0, 'M', u'c'), (0x1D5F1, 'M', u'd'), (0x1D5F2, 'M', u'e'), + ] + +def _seg_63(): + return [ (0x1D5F3, 'M', u'f'), (0x1D5F4, 'M', u'g'), (0x1D5F5, 'M', u'h'), @@ -6555,10 +6572,6 @@ def _seg_62(): (0x1D5FD, 'M', u'p'), (0x1D5FE, 'M', u'q'), (0x1D5FF, 'M', u'r'), - ] - -def _seg_63(): - return [ (0x1D600, 'M', u's'), (0x1D601, 'M', u't'), (0x1D602, 'M', u'u'), @@ -6646,6 +6659,10 @@ def _seg_63(): (0x1D654, 'M', u'y'), (0x1D655, 'M', u'z'), (0x1D656, 'M', u'a'), + ] + +def _seg_64(): + return [ (0x1D657, 'M', u'b'), (0x1D658, 'M', u'c'), (0x1D659, 'M', u'd'), @@ -6659,10 +6676,6 @@ def _seg_63(): (0x1D661, 'M', u'l'), (0x1D662, 'M', u'm'), (0x1D663, 'M', u'n'), - ] - -def _seg_64(): - return [ (0x1D664, 'M', u'o'), (0x1D665, 'M', u'p'), (0x1D666, 'M', u'q'), @@ -6750,6 +6763,10 @@ def _seg_64(): (0x1D6B9, 'M', u'θ'), (0x1D6BA, 'M', u'σ'), (0x1D6BB, 'M', u'τ'), + ] + +def _seg_65(): + return [ (0x1D6BC, 'M', u'υ'), (0x1D6BD, 'M', u'φ'), (0x1D6BE, 'M', u'χ'), @@ -6763,10 +6780,6 @@ def _seg_64(): (0x1D6C6, 'M', u'ε'), (0x1D6C7, 'M', u'ζ'), (0x1D6C8, 'M', u'η'), - ] - -def _seg_65(): - return [ (0x1D6C9, 'M', u'θ'), (0x1D6CA, 'M', u'ι'), (0x1D6CB, 'M', u'κ'), @@ -6854,6 +6867,10 @@ def _seg_65(): (0x1D71F, 'M', u'δ'), (0x1D720, 'M', u'ε'), (0x1D721, 'M', u'ζ'), + ] + +def _seg_66(): + return [ (0x1D722, 'M', u'η'), (0x1D723, 'M', u'θ'), (0x1D724, 'M', u'ι'), @@ -6867,10 +6884,6 @@ def _seg_65(): (0x1D72C, 'M', u'ρ'), (0x1D72D, 'M', u'θ'), (0x1D72E, 'M', u'σ'), - ] - -def _seg_66(): - return [ (0x1D72F, 'M', u'τ'), (0x1D730, 'M', u'υ'), (0x1D731, 'M', u'φ'), @@ -6958,6 +6971,10 @@ def _seg_66(): (0x1D785, 'M', u'φ'), (0x1D786, 'M', u'χ'), (0x1D787, 'M', u'ψ'), + ] + +def _seg_67(): + return [ (0x1D788, 'M', u'ω'), (0x1D789, 'M', u'∂'), (0x1D78A, 'M', u'ε'), @@ -6971,10 +6988,6 @@ def _seg_66(): (0x1D792, 'M', u'γ'), (0x1D793, 'M', u'δ'), (0x1D794, 'M', u'ε'), - ] - -def _seg_67(): - return [ (0x1D795, 'M', u'ζ'), (0x1D796, 'M', u'η'), (0x1D797, 'M', u'θ'), @@ -7062,6 +7075,10 @@ def _seg_67(): (0x1D7EC, 'M', u'0'), (0x1D7ED, 'M', u'1'), (0x1D7EE, 'M', u'2'), + ] + +def _seg_68(): + return [ (0x1D7EF, 'M', u'3'), (0x1D7F0, 'M', u'4'), (0x1D7F1, 'M', u'5'), @@ -7075,10 +7092,6 @@ def _seg_67(): (0x1D7F9, 'M', u'3'), (0x1D7FA, 'M', u'4'), (0x1D7FB, 'M', u'5'), - ] - -def _seg_68(): - return [ (0x1D7FC, 'M', u'6'), (0x1D7FD, 'M', u'7'), (0x1D7FE, 'M', u'8'), @@ -7143,6 +7156,8 @@ def _seg_68(): (0x1E95A, 'X'), (0x1E95E, 'V'), (0x1E960, 'X'), + (0x1EC71, 'V'), + (0x1ECB5, 'X'), (0x1EE00, 'M', u'ا'), (0x1EE01, 'M', u'ب'), (0x1EE02, 'M', u'ج'), @@ -7164,6 +7179,10 @@ def _seg_68(): (0x1EE12, 'M', u'ق'), (0x1EE13, 'M', u'ر'), (0x1EE14, 'M', u'ش'), + ] + +def _seg_69(): + return [ (0x1EE15, 'M', u'ت'), (0x1EE16, 'M', u'ث'), (0x1EE17, 'M', u'خ'), @@ -7179,10 +7198,6 @@ def _seg_68(): (0x1EE21, 'M', u'ب'), (0x1EE22, 'M', u'ج'), (0x1EE23, 'X'), - ] - -def _seg_69(): - return [ (0x1EE24, 'M', u'ه'), (0x1EE25, 'X'), (0x1EE27, 'M', u'ح'), @@ -7268,6 +7283,10 @@ def _seg_69(): (0x1EE81, 'M', u'ب'), (0x1EE82, 'M', u'ج'), (0x1EE83, 'M', u'د'), + ] + +def _seg_70(): + return [ (0x1EE84, 'M', u'ه'), (0x1EE85, 'M', u'و'), (0x1EE86, 'M', u'ز'), @@ -7283,10 +7302,6 @@ def _seg_69(): (0x1EE90, 'M', u'ف'), (0x1EE91, 'M', u'ص'), (0x1EE92, 'M', u'ق'), - ] - -def _seg_70(): - return [ (0x1EE93, 'M', u'ر'), (0x1EE94, 'M', u'ش'), (0x1EE95, 'M', u'ت'), @@ -7372,6 +7387,10 @@ def _seg_70(): (0x1F122, '3', u'(s)'), (0x1F123, '3', u'(t)'), (0x1F124, '3', u'(u)'), + ] + +def _seg_71(): + return [ (0x1F125, '3', u'(v)'), (0x1F126, '3', u'(w)'), (0x1F127, '3', u'(x)'), @@ -7382,15 +7401,11 @@ def _seg_70(): (0x1F12C, 'M', u'r'), (0x1F12D, 'M', u'cd'), (0x1F12E, 'M', u'wz'), - (0x1F12F, 'X'), + (0x1F12F, 'V'), (0x1F130, 'M', u'a'), (0x1F131, 'M', u'b'), (0x1F132, 'M', u'c'), (0x1F133, 'M', u'd'), - ] - -def _seg_71(): - return [ (0x1F134, 'M', u'e'), (0x1F135, 'M', u'f'), (0x1F136, 'M', u'g'), @@ -7476,6 +7491,10 @@ def _seg_71(): (0x1F239, 'M', u'割'), (0x1F23A, 'M', u'営'), (0x1F23B, 'M', u'配'), + ] + +def _seg_72(): + return [ (0x1F23C, 'X'), (0x1F240, 'M', u'〔本〕'), (0x1F241, 'M', u'〔三〕'), @@ -7491,21 +7510,17 @@ def _seg_71(): (0x1F251, 'M', u'可'), (0x1F252, 'X'), (0x1F260, 'V'), - ] - -def _seg_72(): - return [ (0x1F266, 'X'), (0x1F300, 'V'), (0x1F6D5, 'X'), (0x1F6E0, 'V'), (0x1F6ED, 'X'), (0x1F6F0, 'V'), - (0x1F6F9, 'X'), + (0x1F6FA, 'X'), (0x1F700, 'V'), (0x1F774, 'X'), (0x1F780, 'V'), - (0x1F7D5, 'X'), + (0x1F7D9, 'X'), (0x1F800, 'V'), (0x1F80C, 'X'), (0x1F810, 'V'), @@ -7521,15 +7536,21 @@ def _seg_72(): (0x1F910, 'V'), (0x1F93F, 'X'), (0x1F940, 'V'), - (0x1F94D, 'X'), - (0x1F950, 'V'), - (0x1F96C, 'X'), - (0x1F980, 'V'), - (0x1F998, 'X'), + (0x1F971, 'X'), + (0x1F973, 'V'), + (0x1F977, 'X'), + (0x1F97A, 'V'), + (0x1F97B, 'X'), + (0x1F97C, 'V'), + (0x1F9A3, 'X'), + (0x1F9B0, 'V'), + (0x1F9BA, 'X'), (0x1F9C0, 'V'), - (0x1F9C1, 'X'), + (0x1F9C3, 'X'), (0x1F9D0, 'V'), - (0x1F9E7, 'X'), + (0x1FA00, 'X'), + (0x1FA60, 'V'), + (0x1FA6E, 'X'), (0x20000, 'V'), (0x2A6D7, 'X'), (0x2A700, 'V'), @@ -7574,6 +7595,10 @@ def _seg_72(): (0x2F81F, 'M', u'㓟'), (0x2F820, 'M', u'刻'), (0x2F821, 'M', u'剆'), + ] + +def _seg_73(): + return [ (0x2F822, 'M', u'割'), (0x2F823, 'M', u'剷'), (0x2F824, 'M', u'㔕'), @@ -7595,10 +7620,6 @@ def _seg_72(): (0x2F836, 'M', u'及'), (0x2F837, 'M', u'叟'), (0x2F838, 'M', u'𠭣'), - ] - -def _seg_73(): - return [ (0x2F839, 'M', u'叫'), (0x2F83A, 'M', u'叱'), (0x2F83B, 'M', u'吆'), @@ -7678,6 +7699,10 @@ def _seg_73(): (0x2F887, 'M', u'幩'), (0x2F888, 'M', u'㡢'), (0x2F889, 'M', u'𢆃'), + ] + +def _seg_74(): + return [ (0x2F88A, 'M', u'㡼'), (0x2F88B, 'M', u'庰'), (0x2F88C, 'M', u'庳'), @@ -7699,10 +7724,6 @@ def _seg_73(): (0x2F89E, 'M', u'志'), (0x2F89F, 'M', u'忹'), (0x2F8A0, 'M', u'悁'), - ] - -def _seg_74(): - return [ (0x2F8A1, 'M', u'㤺'), (0x2F8A2, 'M', u'㤜'), (0x2F8A3, 'M', u'悔'), @@ -7782,6 +7803,10 @@ def _seg_74(): (0x2F8ED, 'M', u'櫛'), (0x2F8EE, 'M', u'㰘'), (0x2F8EF, 'M', u'次'), + ] + +def _seg_75(): + return [ (0x2F8F0, 'M', u'𣢧'), (0x2F8F1, 'M', u'歔'), (0x2F8F2, 'M', u'㱎'), @@ -7803,10 +7828,6 @@ def _seg_74(): (0x2F902, 'M', u'流'), (0x2F903, 'M', u'浩'), (0x2F904, 'M', u'浸'), - ] - -def _seg_75(): - return [ (0x2F905, 'M', u'涅'), (0x2F906, 'M', u'𣴞'), (0x2F907, 'M', u'洴'), @@ -7886,6 +7907,10 @@ def _seg_75(): (0x2F953, 'M', u'祖'), (0x2F954, 'M', u'𥚚'), (0x2F955, 'M', u'𥛅'), + ] + +def _seg_76(): + return [ (0x2F956, 'M', u'福'), (0x2F957, 'M', u'秫'), (0x2F958, 'M', u'䄯'), @@ -7907,10 +7932,6 @@ def _seg_75(): (0x2F969, 'M', u'糣'), (0x2F96A, 'M', u'紀'), (0x2F96B, 'M', u'𥾆'), - ] - -def _seg_76(): - return [ (0x2F96C, 'M', u'絣'), (0x2F96D, 'M', u'䌁'), (0x2F96E, 'M', u'緇'), @@ -7990,6 +8011,10 @@ def _seg_76(): (0x2F9B8, 'M', u'蚈'), (0x2F9B9, 'M', u'蜎'), (0x2F9BA, 'M', u'蛢'), + ] + +def _seg_77(): + return [ (0x2F9BB, 'M', u'蝹'), (0x2F9BC, 'M', u'蜨'), (0x2F9BD, 'M', u'蝫'), @@ -8011,10 +8036,6 @@ def _seg_76(): (0x2F9CD, 'M', u'䚾'), (0x2F9CE, 'M', u'䛇'), (0x2F9CF, 'M', u'誠'), - ] - -def _seg_77(): - return [ (0x2F9D0, 'M', u'諭'), (0x2F9D1, 'M', u'變'), (0x2F9D2, 'M', u'豕'), @@ -8094,6 +8115,10 @@ def _seg_77(): (0x2FA1D, 'M', u'𪘀'), (0x2FA1E, 'X'), (0xE0100, 'I'), + ] + +def _seg_78(): + return [ (0xE01F0, 'X'), ] @@ -8176,4 +8201,5 @@ def _seg_77(): + _seg_75() + _seg_76() + _seg_77() + + _seg_78() ) diff --git a/pipenv/vendor/packaging/__about__.py b/pipenv/vendor/packaging/__about__.py index 21fc6ce3e7..7481c9e298 100644 --- a/pipenv/vendor/packaging/__about__.py +++ b/pipenv/vendor/packaging/__about__.py @@ -4,18 +4,24 @@ from __future__ import absolute_import, division, print_function __all__ = [ - "__title__", "__summary__", "__uri__", "__version__", "__author__", - "__email__", "__license__", "__copyright__", + "__title__", + "__summary__", + "__uri__", + "__version__", + "__author__", + "__email__", + "__license__", + "__copyright__", ] __title__ = "packaging" __summary__ = "Core utilities for Python packages" __uri__ = "https://github.com/pypa/packaging" -__version__ = "18.0" +__version__ = "19.0" __author__ = "Donald Stufft and individual contributors" __email__ = "donald@stufft.io" __license__ = "BSD or Apache License, Version 2.0" -__copyright__ = "Copyright 2014-2018 %s" % __author__ +__copyright__ = "Copyright 2014-2019 %s" % __author__ diff --git a/pipenv/vendor/packaging/__init__.py b/pipenv/vendor/packaging/__init__.py index 5ee6220203..a0cf67df52 100644 --- a/pipenv/vendor/packaging/__init__.py +++ b/pipenv/vendor/packaging/__init__.py @@ -4,11 +4,23 @@ from __future__ import absolute_import, division, print_function from .__about__ import ( - __author__, __copyright__, __email__, __license__, __summary__, __title__, - __uri__, __version__ + __author__, + __copyright__, + __email__, + __license__, + __summary__, + __title__, + __uri__, + __version__, ) __all__ = [ - "__title__", "__summary__", "__uri__", "__version__", "__author__", - "__email__", "__license__", "__copyright__", + "__title__", + "__summary__", + "__uri__", + "__version__", + "__author__", + "__email__", + "__license__", + "__copyright__", ] diff --git a/pipenv/vendor/packaging/_compat.py b/pipenv/vendor/packaging/_compat.py index 210bb80b7e..25da473c19 100644 --- a/pipenv/vendor/packaging/_compat.py +++ b/pipenv/vendor/packaging/_compat.py @@ -12,9 +12,9 @@ # flake8: noqa if PY3: - string_types = str, + string_types = (str,) else: - string_types = basestring, + string_types = (basestring,) def with_metaclass(meta, *bases): @@ -27,4 +27,5 @@ def with_metaclass(meta, *bases): class metaclass(meta): def __new__(cls, name, this_bases, d): return meta(name, bases, d) - return type.__new__(metaclass, 'temporary_class', (), {}) + + return type.__new__(metaclass, "temporary_class", (), {}) diff --git a/pipenv/vendor/packaging/_structures.py b/pipenv/vendor/packaging/_structures.py index e9fc4a0496..68dcca634d 100644 --- a/pipenv/vendor/packaging/_structures.py +++ b/pipenv/vendor/packaging/_structures.py @@ -5,7 +5,6 @@ class Infinity(object): - def __repr__(self): return "Infinity" @@ -38,7 +37,6 @@ def __neg__(self): class NegativeInfinity(object): - def __repr__(self): return "-Infinity" diff --git a/pipenv/vendor/packaging/markers.py b/pipenv/vendor/packaging/markers.py index 5fdf510ca6..eff5abbbc1 100644 --- a/pipenv/vendor/packaging/markers.py +++ b/pipenv/vendor/packaging/markers.py @@ -17,8 +17,11 @@ __all__ = [ - "InvalidMarker", "UndefinedComparison", "UndefinedEnvironmentName", - "Marker", "default_environment", + "InvalidMarker", + "UndefinedComparison", + "UndefinedEnvironmentName", + "Marker", + "default_environment", ] @@ -42,7 +45,6 @@ class UndefinedEnvironmentName(ValueError): class Node(object): - def __init__(self, value): self.value = value @@ -57,62 +59,52 @@ def serialize(self): class Variable(Node): - def serialize(self): return str(self) class Value(Node): - def serialize(self): return '"{0}"'.format(self) class Op(Node): - def serialize(self): return str(self) VARIABLE = ( - L("implementation_version") | - L("platform_python_implementation") | - L("implementation_name") | - L("python_full_version") | - L("platform_release") | - L("platform_version") | - L("platform_machine") | - L("platform_system") | - L("python_version") | - L("sys_platform") | - L("os_name") | - L("os.name") | # PEP-345 - L("sys.platform") | # PEP-345 - L("platform.version") | # PEP-345 - L("platform.machine") | # PEP-345 - L("platform.python_implementation") | # PEP-345 - L("python_implementation") | # undocumented setuptools legacy - L("extra") + L("implementation_version") + | L("platform_python_implementation") + | L("implementation_name") + | L("python_full_version") + | L("platform_release") + | L("platform_version") + | L("platform_machine") + | L("platform_system") + | L("python_version") + | L("sys_platform") + | L("os_name") + | L("os.name") + | L("sys.platform") # PEP-345 + | L("platform.version") # PEP-345 + | L("platform.machine") # PEP-345 + | L("platform.python_implementation") # PEP-345 + | L("python_implementation") # PEP-345 + | L("extra") # undocumented setuptools legacy ) ALIASES = { - 'os.name': 'os_name', - 'sys.platform': 'sys_platform', - 'platform.version': 'platform_version', - 'platform.machine': 'platform_machine', - 'platform.python_implementation': 'platform_python_implementation', - 'python_implementation': 'platform_python_implementation' + "os.name": "os_name", + "sys.platform": "sys_platform", + "platform.version": "platform_version", + "platform.machine": "platform_machine", + "platform.python_implementation": "platform_python_implementation", + "python_implementation": "platform_python_implementation", } VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0]))) VERSION_CMP = ( - L("===") | - L("==") | - L(">=") | - L("<=") | - L("!=") | - L("~=") | - L(">") | - L("<") + L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<") ) MARKER_OP = VERSION_CMP | L("not in") | L("in") @@ -152,8 +144,11 @@ def _format_marker(marker, first=True): # where the single item is itself it's own list. In that case we want skip # the rest of this function so that we don't get extraneous () on the # outside. - if (isinstance(marker, list) and len(marker) == 1 and - isinstance(marker[0], (list, tuple))): + if ( + isinstance(marker, list) + and len(marker) == 1 + and isinstance(marker[0], (list, tuple)) + ): return _format_marker(marker[0]) if isinstance(marker, list): @@ -239,20 +234,20 @@ def _evaluate_markers(markers, environment): def format_full_version(info): - version = '{0.major}.{0.minor}.{0.micro}'.format(info) + version = "{0.major}.{0.minor}.{0.micro}".format(info) kind = info.releaselevel - if kind != 'final': + if kind != "final": version += kind[0] + str(info.serial) return version def default_environment(): - if hasattr(sys, 'implementation'): + if hasattr(sys, "implementation"): iver = format_full_version(sys.implementation.version) implementation_name = sys.implementation.name else: - iver = '0' - implementation_name = '' + iver = "0" + implementation_name = "" return { "implementation_name": implementation_name, @@ -270,13 +265,13 @@ def default_environment(): class Marker(object): - def __init__(self, marker): try: self._markers = _coerce_parse_result(MARKER.parseString(marker)) except ParseException as e: err_str = "Invalid marker: {0!r}, parse error at {1!r}".format( - marker, marker[e.loc:e.loc + 8]) + marker, marker[e.loc : e.loc + 8] + ) raise InvalidMarker(err_str) def __str__(self): diff --git a/pipenv/vendor/packaging/requirements.py b/pipenv/vendor/packaging/requirements.py index e8008a6ddd..4d9688b932 100644 --- a/pipenv/vendor/packaging/requirements.py +++ b/pipenv/vendor/packaging/requirements.py @@ -38,8 +38,8 @@ class InvalidRequirement(ValueError): NAME = IDENTIFIER("name") EXTRA = IDENTIFIER -URI = Regex(r'[^ ]+')("url") -URL = (AT + URI) +URI = Regex(r"[^ ]+")("url") +URL = AT + URI EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA) EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras") @@ -48,17 +48,18 @@ class InvalidRequirement(ValueError): VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE) VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY -VERSION_MANY = Combine(VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), - joinString=",", adjacent=False)("_raw_spec") +VERSION_MANY = Combine( + VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False +)("_raw_spec") _VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)) -_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or '') +_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "") VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier") VERSION_SPEC.setParseAction(lambda s, l, t: t[1]) MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker") MARKER_EXPR.setParseAction( - lambda s, l, t: Marker(s[t._original_start:t._original_end]) + lambda s, l, t: Marker(s[t._original_start : t._original_end]) ) MARKER_SEPARATOR = SEMICOLON MARKER = MARKER_SEPARATOR + MARKER_EXPR @@ -66,8 +67,7 @@ class InvalidRequirement(ValueError): VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER) URL_AND_MARKER = URL + Optional(MARKER) -NAMED_REQUIREMENT = \ - NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER) +NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER) REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd # pyparsing isn't thread safe during initialization, so we do it eagerly, see @@ -92,15 +92,21 @@ def __init__(self, requirement_string): try: req = REQUIREMENT.parseString(requirement_string) except ParseException as e: - raise InvalidRequirement("Parse error at \"{0!r}\": {1}".format( - requirement_string[e.loc:e.loc + 8], e.msg - )) + raise InvalidRequirement( + 'Parse error at "{0!r}": {1}'.format( + requirement_string[e.loc : e.loc + 8], e.msg + ) + ) self.name = req.name if req.url: parsed_url = urlparse.urlparse(req.url) - if not (parsed_url.scheme and parsed_url.netloc) or ( - not parsed_url.scheme and not parsed_url.netloc): + if parsed_url.scheme == "file": + if urlparse.urlunparse(parsed_url) != req.url: + raise InvalidRequirement("Invalid URL given") + elif not (parsed_url.scheme and parsed_url.netloc) or ( + not parsed_url.scheme and not parsed_url.netloc + ): raise InvalidRequirement("Invalid URL: {0}".format(req.url)) self.url = req.url else: @@ -120,6 +126,8 @@ def __str__(self): if self.url: parts.append("@ {0}".format(self.url)) + if self.marker: + parts.append(" ") if self.marker: parts.append("; {0}".format(self.marker)) diff --git a/pipenv/vendor/packaging/specifiers.py b/pipenv/vendor/packaging/specifiers.py index 4c798999d0..743576a080 100644 --- a/pipenv/vendor/packaging/specifiers.py +++ b/pipenv/vendor/packaging/specifiers.py @@ -19,7 +19,6 @@ class InvalidSpecifier(ValueError): class BaseSpecifier(with_metaclass(abc.ABCMeta, object)): - @abc.abstractmethod def __str__(self): """ @@ -84,10 +83,7 @@ def __init__(self, spec="", prereleases=None): if not match: raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec)) - self._spec = ( - match.group("operator").strip(), - match.group("version").strip(), - ) + self._spec = (match.group("operator").strip(), match.group("version").strip()) # Store whether or not this Specifier should accept prereleases self._prereleases = prereleases @@ -99,11 +95,7 @@ def __repr__(self): else "" ) - return "<{0}({1!r}{2})>".format( - self.__class__.__name__, - str(self), - pre, - ) + return "<{0}({1!r}{2})>".format(self.__class__.__name__, str(self), pre) def __str__(self): return "{0}{1}".format(*self._spec) @@ -194,8 +186,9 @@ def filter(self, iterable, prereleases=None): # If our version is a prerelease, and we were not set to allow # prereleases, then we'll store it for later incase nothing # else matches this specifier. - if (parsed_version.is_prerelease and not - (prereleases or self.prereleases)): + if parsed_version.is_prerelease and not ( + prereleases or self.prereleases + ): found_prereleases.append(version) # Either this is not a prerelease, or we should have been # accepting prereleases from the beginning. @@ -213,8 +206,7 @@ def filter(self, iterable, prereleases=None): class LegacySpecifier(_IndividualSpecifier): - _regex_str = ( - r""" + _regex_str = r""" (?P(==|!=|<=|>=|<|>)) \s* (?P @@ -225,10 +217,8 @@ class LegacySpecifier(_IndividualSpecifier): # them, and a comma since it's a version separator. ) """ - ) - _regex = re.compile( - r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) + _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) _operators = { "==": "equal", @@ -269,13 +259,13 @@ def wrapped(self, prospective, spec): if not isinstance(prospective, Version): return False return fn(self, prospective, spec) + return wrapped class Specifier(_IndividualSpecifier): - _regex_str = ( - r""" + _regex_str = r""" (?P(~=|==|!=|<=|>=|<|>|===)) (?P (?: @@ -367,10 +357,8 @@ class Specifier(_IndividualSpecifier): ) ) """ - ) - _regex = re.compile( - r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) + _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) _operators = { "~=": "compatible", @@ -397,8 +385,7 @@ def _compare_compatible(self, prospective, spec): prefix = ".".join( list( itertools.takewhile( - lambda x: (not x.startswith("post") and not - x.startswith("dev")), + lambda x: (not x.startswith("post") and not x.startswith("dev")), _version_split(spec), ) )[:-1] @@ -407,8 +394,9 @@ def _compare_compatible(self, prospective, spec): # Add the prefix notation to the end of our string prefix += ".*" - return (self._get_operator(">=")(prospective, spec) and - self._get_operator("==")(prospective, prefix)) + return self._get_operator(">=")(prospective, spec) and self._get_operator("==")( + prospective, prefix + ) @_require_version_compare def _compare_equal(self, prospective, spec): @@ -428,7 +416,7 @@ def _compare_equal(self, prospective, spec): # Shorten the prospective version to be the same length as the spec # so that we can determine if the specifier is a prefix of the # prospective version or not. - prospective = prospective[:len(spec)] + prospective = prospective[: len(spec)] # Pad out our two sides with zeros so that they both equal the same # length. @@ -567,27 +555,17 @@ def _pad_version(left, right): right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) # Get the rest of our versions - left_split.append(left[len(left_split[0]):]) - right_split.append(right[len(right_split[0]):]) + left_split.append(left[len(left_split[0]) :]) + right_split.append(right[len(right_split[0]) :]) # Insert our padding - left_split.insert( - 1, - ["0"] * max(0, len(right_split[0]) - len(left_split[0])), - ) - right_split.insert( - 1, - ["0"] * max(0, len(left_split[0]) - len(right_split[0])), - ) + left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0]))) + right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0]))) - return ( - list(itertools.chain(*left_split)), - list(itertools.chain(*right_split)), - ) + return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split))) class SpecifierSet(BaseSpecifier): - def __init__(self, specifiers="", prereleases=None): # Split on , to break each indidivual specifier into it's own item, and # strip each item to remove leading/trailing whitespace. @@ -721,10 +699,7 @@ def contains(self, item, prereleases=None): # given version is contained within all of them. # Note: This use of all() here means that an empty set of specifiers # will always return True, this is an explicit design decision. - return all( - s.contains(item, prereleases=prereleases) - for s in self._specs - ) + return all(s.contains(item, prereleases=prereleases) for s in self._specs) def filter(self, iterable, prereleases=None): # Determine if we're forcing a prerelease or not, if we're not forcing diff --git a/pipenv/vendor/packaging/utils.py b/pipenv/vendor/packaging/utils.py index 4b94a82fbb..8841878693 100644 --- a/pipenv/vendor/packaging/utils.py +++ b/pipenv/vendor/packaging/utils.py @@ -36,13 +36,7 @@ def canonicalize_version(version): # Release segment # NB: This strips trailing '.0's to normalize - parts.append( - re.sub( - r'(\.0)+$', - '', - ".".join(str(x) for x in version.release) - ) - ) + parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in version.release))) # Pre-release if version.pre is not None: diff --git a/pipenv/vendor/packaging/version.py b/pipenv/vendor/packaging/version.py index 6ed5cbbdc3..95157a1f78 100644 --- a/pipenv/vendor/packaging/version.py +++ b/pipenv/vendor/packaging/version.py @@ -10,14 +10,11 @@ from ._structures import Infinity -__all__ = [ - "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN" -] +__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"] _Version = collections.namedtuple( - "_Version", - ["epoch", "release", "dev", "pre", "post", "local"], + "_Version", ["epoch", "release", "dev", "pre", "post", "local"] ) @@ -40,7 +37,6 @@ class InvalidVersion(ValueError): class _BaseVersion(object): - def __hash__(self): return hash(self._key) @@ -70,7 +66,6 @@ def _compare(self, other, method): class LegacyVersion(_BaseVersion): - def __init__(self, version): self._version = str(version) self._key = _legacy_cmpkey(self._version) @@ -126,12 +121,14 @@ def is_devrelease(self): return False -_legacy_version_component_re = re.compile( - r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE, -) +_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE) _legacy_version_replacement_map = { - "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@", + "pre": "c", + "preview": "c", + "-": "final-", + "rc": "c", + "dev": "@", } @@ -215,10 +212,7 @@ def _legacy_cmpkey(version): class Version(_BaseVersion): - _regex = re.compile( - r"^\s*" + VERSION_PATTERN + r"\s*$", - re.VERBOSE | re.IGNORECASE, - ) + _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE) def __init__(self, version): # Validate the version and parse it into pieces @@ -230,18 +224,11 @@ def __init__(self, version): self._version = _Version( epoch=int(match.group("epoch")) if match.group("epoch") else 0, release=tuple(int(i) for i in match.group("release").split(".")), - pre=_parse_letter_version( - match.group("pre_l"), - match.group("pre_n"), - ), + pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")), post=_parse_letter_version( - match.group("post_l"), - match.group("post_n1") or match.group("post_n2"), - ), - dev=_parse_letter_version( - match.group("dev_l"), - match.group("dev_n"), + match.group("post_l"), match.group("post_n1") or match.group("post_n2") ), + dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")), local=_parse_local_version(match.group("local")), ) @@ -395,12 +382,7 @@ def _cmpkey(epoch, release, pre, post, dev, local): # re-reverse it back into the correct order and make it a tuple and use # that for our sorting key. release = tuple( - reversed(list( - itertools.dropwhile( - lambda x: x == 0, - reversed(release), - ) - )) + reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release)))) ) # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0. @@ -433,9 +415,6 @@ def _cmpkey(epoch, release, pre, post, dev, local): # - Numeric segments sort numerically # - Shorter versions sort before longer versions when the prefixes # match exactly - local = tuple( - (i, "") if isinstance(i, int) else (-Infinity, i) - for i in local - ) + local = tuple((i, "") if isinstance(i, int) else (-Infinity, i) for i in local) return epoch, release, pre, post, dev, local diff --git a/pipenv/vendor/passa/cli/options.py b/pipenv/vendor/passa/cli/options.py index f8ba1fe73b..f20b612a44 100644 --- a/pipenv/vendor/passa/cli/options.py +++ b/pipenv/vendor/passa/cli/options.py @@ -20,13 +20,13 @@ def __init__(self, root, *args, **kwargs): pipfile = root.joinpath("Pipfile") if not pipfile.is_file(): raise argparse.ArgumentError( - "{0!r} is not a Pipfile project".format(root), + "project", "{0!r} is not a Pipfile project".format(root), ) try: super(Project, self).__init__(root.as_posix(), *args, **kwargs) except tomlkit.exceptions.ParseError as e: raise argparse.ArgumentError( - "failed to parse Pipfile: {0!r}".format(str(e)), + "project", "failed to parse Pipfile: {0!r}".format(str(e)), ) def __name__(self): diff --git a/pipenv/vendor/pep517/LICENSE b/pipenv/vendor/pep517/LICENSE new file mode 100644 index 0000000000..b0ae9dbc26 --- /dev/null +++ b/pipenv/vendor/pep517/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017 Thomas Kluyver + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/pipenv/vendor/pep517/__init__.py b/pipenv/vendor/pep517/__init__.py new file mode 100644 index 0000000000..9c1a098f78 --- /dev/null +++ b/pipenv/vendor/pep517/__init__.py @@ -0,0 +1,4 @@ +"""Wrappers to build Python packages using PEP 517 hooks +""" + +__version__ = '0.5.0' diff --git a/pipenv/vendor/pep517/_in_process.py b/pipenv/vendor/pep517/_in_process.py new file mode 100644 index 0000000000..d6524b660a --- /dev/null +++ b/pipenv/vendor/pep517/_in_process.py @@ -0,0 +1,207 @@ +"""This is invoked in a subprocess to call the build backend hooks. + +It expects: +- Command line args: hook_name, control_dir +- Environment variable: PEP517_BUILD_BACKEND=entry.point:spec +- control_dir/input.json: + - {"kwargs": {...}} + +Results: +- control_dir/output.json + - {"return_val": ...} +""" +from glob import glob +from importlib import import_module +import os +from os.path import join as pjoin +import re +import shutil +import sys + +# This is run as a script, not a module, so it can't do a relative import +import compat + + +class BackendUnavailable(Exception): + """Raised if we cannot import the backend""" + + +def _build_backend(): + """Find and load the build backend""" + ep = os.environ['PEP517_BUILD_BACKEND'] + mod_path, _, obj_path = ep.partition(':') + try: + obj = import_module(mod_path) + except ImportError: + raise BackendUnavailable + if obj_path: + for path_part in obj_path.split('.'): + obj = getattr(obj, path_part) + return obj + + +def get_requires_for_build_wheel(config_settings): + """Invoke the optional get_requires_for_build_wheel hook + + Returns [] if the hook is not defined. + """ + backend = _build_backend() + try: + hook = backend.get_requires_for_build_wheel + except AttributeError: + return [] + else: + return hook(config_settings) + + +def prepare_metadata_for_build_wheel(metadata_directory, config_settings): + """Invoke optional prepare_metadata_for_build_wheel + + Implements a fallback by building a wheel if the hook isn't defined. + """ + backend = _build_backend() + try: + hook = backend.prepare_metadata_for_build_wheel + except AttributeError: + return _get_wheel_metadata_from_wheel(backend, metadata_directory, + config_settings) + else: + return hook(metadata_directory, config_settings) + + +WHEEL_BUILT_MARKER = 'PEP517_ALREADY_BUILT_WHEEL' + + +def _dist_info_files(whl_zip): + """Identify the .dist-info folder inside a wheel ZipFile.""" + res = [] + for path in whl_zip.namelist(): + m = re.match(r'[^/\\]+-[^/\\]+\.dist-info/', path) + if m: + res.append(path) + if res: + return res + raise Exception("No .dist-info folder found in wheel") + + +def _get_wheel_metadata_from_wheel( + backend, metadata_directory, config_settings): + """Build a wheel and extract the metadata from it. + + Fallback for when the build backend does not + define the 'get_wheel_metadata' hook. + """ + from zipfile import ZipFile + whl_basename = backend.build_wheel(metadata_directory, config_settings) + with open(os.path.join(metadata_directory, WHEEL_BUILT_MARKER), 'wb'): + pass # Touch marker file + + whl_file = os.path.join(metadata_directory, whl_basename) + with ZipFile(whl_file) as zipf: + dist_info = _dist_info_files(zipf) + zipf.extractall(path=metadata_directory, members=dist_info) + return dist_info[0].split('/')[0] + + +def _find_already_built_wheel(metadata_directory): + """Check for a wheel already built during the get_wheel_metadata hook. + """ + if not metadata_directory: + return None + metadata_parent = os.path.dirname(metadata_directory) + if not os.path.isfile(pjoin(metadata_parent, WHEEL_BUILT_MARKER)): + return None + + whl_files = glob(os.path.join(metadata_parent, '*.whl')) + if not whl_files: + print('Found wheel built marker, but no .whl files') + return None + if len(whl_files) > 1: + print('Found multiple .whl files; unspecified behaviour. ' + 'Will call build_wheel.') + return None + + # Exactly one .whl file + return whl_files[0] + + +def build_wheel(wheel_directory, config_settings, metadata_directory=None): + """Invoke the mandatory build_wheel hook. + + If a wheel was already built in the + prepare_metadata_for_build_wheel fallback, this + will copy it rather than rebuilding the wheel. + """ + prebuilt_whl = _find_already_built_wheel(metadata_directory) + if prebuilt_whl: + shutil.copy2(prebuilt_whl, wheel_directory) + return os.path.basename(prebuilt_whl) + + return _build_backend().build_wheel(wheel_directory, config_settings, + metadata_directory) + + +def get_requires_for_build_sdist(config_settings): + """Invoke the optional get_requires_for_build_wheel hook + + Returns [] if the hook is not defined. + """ + backend = _build_backend() + try: + hook = backend.get_requires_for_build_sdist + except AttributeError: + return [] + else: + return hook(config_settings) + + +class _DummyException(Exception): + """Nothing should ever raise this exception""" + + +class GotUnsupportedOperation(Exception): + """For internal use when backend raises UnsupportedOperation""" + + +def build_sdist(sdist_directory, config_settings): + """Invoke the mandatory build_sdist hook.""" + backend = _build_backend() + try: + return backend.build_sdist(sdist_directory, config_settings) + except getattr(backend, 'UnsupportedOperation', _DummyException): + raise GotUnsupportedOperation + + +HOOK_NAMES = { + 'get_requires_for_build_wheel', + 'prepare_metadata_for_build_wheel', + 'build_wheel', + 'get_requires_for_build_sdist', + 'build_sdist', +} + + +def main(): + if len(sys.argv) < 3: + sys.exit("Needs args: hook_name, control_dir") + hook_name = sys.argv[1] + control_dir = sys.argv[2] + if hook_name not in HOOK_NAMES: + sys.exit("Unknown hook: %s" % hook_name) + hook = globals()[hook_name] + + hook_input = compat.read_json(pjoin(control_dir, 'input.json')) + + json_out = {'unsupported': False, 'return_val': None} + try: + json_out['return_val'] = hook(**hook_input['kwargs']) + except BackendUnavailable: + json_out['no_backend'] = True + except GotUnsupportedOperation: + json_out['unsupported'] = True + + compat.write_json(json_out, pjoin(control_dir, 'output.json'), indent=2) + + +if __name__ == '__main__': + main() diff --git a/pipenv/vendor/pep517/build.py b/pipenv/vendor/pep517/build.py new file mode 100644 index 0000000000..6fca39a87c --- /dev/null +++ b/pipenv/vendor/pep517/build.py @@ -0,0 +1,108 @@ +"""Build a project using PEP 517 hooks. +""" +import argparse +import logging +import os +import contextlib +import pytoml +import shutil +import errno +import tempfile + +from .envbuild import BuildEnvironment +from .wrappers import Pep517HookCaller + +log = logging.getLogger(__name__) + + +@contextlib.contextmanager +def tempdir(): + td = tempfile.mkdtemp() + try: + yield td + finally: + shutil.rmtree(td) + + +def _do_build(hooks, env, dist, dest): + get_requires_name = 'get_requires_for_build_{dist}'.format(**locals()) + get_requires = getattr(hooks, get_requires_name) + reqs = get_requires({}) + log.info('Got build requires: %s', reqs) + + env.pip_install(reqs) + log.info('Installed dynamic build dependencies') + + with tempdir() as td: + log.info('Trying to build %s in %s', dist, td) + build_name = 'build_{dist}'.format(**locals()) + build = getattr(hooks, build_name) + filename = build(td, {}) + source = os.path.join(td, filename) + shutil.move(source, os.path.join(dest, os.path.basename(filename))) + + +def mkdir_p(*args, **kwargs): + """Like `mkdir`, but does not raise an exception if the + directory already exists. + """ + try: + return os.mkdir(*args, **kwargs) + except OSError as exc: + if exc.errno != errno.EEXIST: + raise + + +def build(source_dir, dist, dest=None): + pyproject = os.path.join(source_dir, 'pyproject.toml') + dest = os.path.join(source_dir, dest or 'dist') + mkdir_p(dest) + + with open(pyproject) as f: + pyproject_data = pytoml.load(f) + # Ensure the mandatory data can be loaded + buildsys = pyproject_data['build-system'] + requires = buildsys['requires'] + backend = buildsys['build-backend'] + + hooks = Pep517HookCaller(source_dir, backend) + + with BuildEnvironment() as env: + env.pip_install(requires) + _do_build(hooks, env, dist, dest) + + +parser = argparse.ArgumentParser() +parser.add_argument( + 'source_dir', + help="A directory containing pyproject.toml", +) +parser.add_argument( + '--binary', '-b', + action='store_true', + default=False, +) +parser.add_argument( + '--source', '-s', + action='store_true', + default=False, +) +parser.add_argument( + '--out-dir', '-o', + help="Destination in which to save the builds relative to source dir", +) + + +def main(args): + # determine which dists to build + dists = list(filter(None, ( + 'sdist' if args.source or not args.binary else None, + 'wheel' if args.binary or not args.source else None, + ))) + + for dist in dists: + build(args.source_dir, dist, args.out_dir) + + +if __name__ == '__main__': + main(parser.parse_args()) diff --git a/pipenv/vendor/pep517/check.py b/pipenv/vendor/pep517/check.py new file mode 100644 index 0000000000..fc82cca7ec --- /dev/null +++ b/pipenv/vendor/pep517/check.py @@ -0,0 +1,202 @@ +"""Check a project and backend by attempting to build using PEP 517 hooks. +""" +import argparse +import logging +import os +from os.path import isfile, join as pjoin +from pytoml import TomlError, load as toml_load +import shutil +from subprocess import CalledProcessError +import sys +import tarfile +from tempfile import mkdtemp +import zipfile + +from .colorlog import enable_colourful_output +from .envbuild import BuildEnvironment +from .wrappers import Pep517HookCaller + +log = logging.getLogger(__name__) + + +def check_build_sdist(hooks, build_sys_requires): + with BuildEnvironment() as env: + try: + env.pip_install(build_sys_requires) + log.info('Installed static build dependencies') + except CalledProcessError: + log.error('Failed to install static build dependencies') + return False + + try: + reqs = hooks.get_requires_for_build_sdist({}) + log.info('Got build requires: %s', reqs) + except Exception: + log.error('Failure in get_requires_for_build_sdist', exc_info=True) + return False + + try: + env.pip_install(reqs) + log.info('Installed dynamic build dependencies') + except CalledProcessError: + log.error('Failed to install dynamic build dependencies') + return False + + td = mkdtemp() + log.info('Trying to build sdist in %s', td) + try: + try: + filename = hooks.build_sdist(td, {}) + log.info('build_sdist returned %r', filename) + except Exception: + log.info('Failure in build_sdist', exc_info=True) + return False + + if not filename.endswith('.tar.gz'): + log.error( + "Filename %s doesn't have .tar.gz extension", filename) + return False + + path = pjoin(td, filename) + if isfile(path): + log.info("Output file %s exists", path) + else: + log.error("Output file %s does not exist", path) + return False + + if tarfile.is_tarfile(path): + log.info("Output file is a tar file") + else: + log.error("Output file is not a tar file") + return False + + finally: + shutil.rmtree(td) + + return True + + +def check_build_wheel(hooks, build_sys_requires): + with BuildEnvironment() as env: + try: + env.pip_install(build_sys_requires) + log.info('Installed static build dependencies') + except CalledProcessError: + log.error('Failed to install static build dependencies') + return False + + try: + reqs = hooks.get_requires_for_build_wheel({}) + log.info('Got build requires: %s', reqs) + except Exception: + log.error('Failure in get_requires_for_build_sdist', exc_info=True) + return False + + try: + env.pip_install(reqs) + log.info('Installed dynamic build dependencies') + except CalledProcessError: + log.error('Failed to install dynamic build dependencies') + return False + + td = mkdtemp() + log.info('Trying to build wheel in %s', td) + try: + try: + filename = hooks.build_wheel(td, {}) + log.info('build_wheel returned %r', filename) + except Exception: + log.info('Failure in build_wheel', exc_info=True) + return False + + if not filename.endswith('.whl'): + log.error("Filename %s doesn't have .whl extension", filename) + return False + + path = pjoin(td, filename) + if isfile(path): + log.info("Output file %s exists", path) + else: + log.error("Output file %s does not exist", path) + return False + + if zipfile.is_zipfile(path): + log.info("Output file is a zip file") + else: + log.error("Output file is not a zip file") + return False + + finally: + shutil.rmtree(td) + + return True + + +def check(source_dir): + pyproject = pjoin(source_dir, 'pyproject.toml') + if isfile(pyproject): + log.info('Found pyproject.toml') + else: + log.error('Missing pyproject.toml') + return False + + try: + with open(pyproject) as f: + pyproject_data = toml_load(f) + # Ensure the mandatory data can be loaded + buildsys = pyproject_data['build-system'] + requires = buildsys['requires'] + backend = buildsys['build-backend'] + log.info('Loaded pyproject.toml') + except (TomlError, KeyError): + log.error("Invalid pyproject.toml", exc_info=True) + return False + + hooks = Pep517HookCaller(source_dir, backend) + + sdist_ok = check_build_sdist(hooks, requires) + wheel_ok = check_build_wheel(hooks, requires) + + if not sdist_ok: + log.warning('Sdist checks failed; scroll up to see') + if not wheel_ok: + log.warning('Wheel checks failed') + + return sdist_ok + + +def main(argv=None): + ap = argparse.ArgumentParser() + ap.add_argument( + 'source_dir', + help="A directory containing pyproject.toml") + args = ap.parse_args(argv) + + enable_colourful_output() + + ok = check(args.source_dir) + + if ok: + print(ansi('Checks passed', 'green')) + else: + print(ansi('Checks failed', 'red')) + sys.exit(1) + + +ansi_codes = { + 'reset': '\x1b[0m', + 'bold': '\x1b[1m', + 'red': '\x1b[31m', + 'green': '\x1b[32m', +} + + +def ansi(s, attr): + if os.name != 'nt' and sys.stdout.isatty(): + return ansi_codes[attr] + str(s) + ansi_codes['reset'] + else: + return str(s) + + +if __name__ == '__main__': + main() diff --git a/pipenv/vendor/pep517/colorlog.py b/pipenv/vendor/pep517/colorlog.py new file mode 100644 index 0000000000..69c8a59d3d --- /dev/null +++ b/pipenv/vendor/pep517/colorlog.py @@ -0,0 +1,115 @@ +"""Nicer log formatting with colours. + +Code copied from Tornado, Apache licensed. +""" +# Copyright 2012 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +import sys + +try: + import curses +except ImportError: + curses = None + + +def _stderr_supports_color(): + color = False + if curses and hasattr(sys.stderr, 'isatty') and sys.stderr.isatty(): + try: + curses.setupterm() + if curses.tigetnum("colors") > 0: + color = True + except Exception: + pass + return color + + +class LogFormatter(logging.Formatter): + """Log formatter with colour support + """ + DEFAULT_COLORS = { + logging.INFO: 2, # Green + logging.WARNING: 3, # Yellow + logging.ERROR: 1, # Red + logging.CRITICAL: 1, + } + + def __init__(self, color=True, datefmt=None): + r""" + :arg bool color: Enables color support. + :arg string fmt: Log message format. + It will be applied to the attributes dict of log records. The + text between ``%(color)s`` and ``%(end_color)s`` will be colored + depending on the level if color support is on. + :arg dict colors: color mappings from logging level to terminal color + code + :arg string datefmt: Datetime format. + Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``. + .. versionchanged:: 3.2 + Added ``fmt`` and ``datefmt`` arguments. + """ + logging.Formatter.__init__(self, datefmt=datefmt) + self._colors = {} + if color and _stderr_supports_color(): + # The curses module has some str/bytes confusion in + # python3. Until version 3.2.3, most methods return + # bytes, but only accept strings. In addition, we want to + # output these strings with the logging module, which + # works with unicode strings. The explicit calls to + # unicode() below are harmless in python2 but will do the + # right conversion in python 3. + fg_color = (curses.tigetstr("setaf") or + curses.tigetstr("setf") or "") + if (3, 0) < sys.version_info < (3, 2, 3): + fg_color = str(fg_color, "ascii") + + for levelno, code in self.DEFAULT_COLORS.items(): + self._colors[levelno] = str( + curses.tparm(fg_color, code), "ascii") + self._normal = str(curses.tigetstr("sgr0"), "ascii") + + scr = curses.initscr() + self.termwidth = scr.getmaxyx()[1] + curses.endwin() + else: + self._normal = '' + # Default width is usually 80, but too wide is + # worse than too narrow + self.termwidth = 70 + + def formatMessage(self, record): + mlen = len(record.message) + right_text = '{initial}-{name}'.format(initial=record.levelname[0], + name=record.name) + if mlen + len(right_text) < self.termwidth: + space = ' ' * (self.termwidth - (mlen + len(right_text))) + else: + space = ' ' + + if record.levelno in self._colors: + start_color = self._colors[record.levelno] + end_color = self._normal + else: + start_color = end_color = '' + + return record.message + space + start_color + right_text + end_color + + +def enable_colourful_output(level=logging.INFO): + handler = logging.StreamHandler() + handler.setFormatter(LogFormatter()) + logging.root.addHandler(handler) + logging.root.setLevel(level) diff --git a/pipenv/vendor/pep517/compat.py b/pipenv/vendor/pep517/compat.py new file mode 100644 index 0000000000..01c66fc7e4 --- /dev/null +++ b/pipenv/vendor/pep517/compat.py @@ -0,0 +1,23 @@ +"""Handle reading and writing JSON in UTF-8, on Python 3 and 2.""" +import json +import sys + +if sys.version_info[0] >= 3: + # Python 3 + def write_json(obj, path, **kwargs): + with open(path, 'w', encoding='utf-8') as f: + json.dump(obj, f, **kwargs) + + def read_json(path): + with open(path, 'r', encoding='utf-8') as f: + return json.load(f) + +else: + # Python 2 + def write_json(obj, path, **kwargs): + with open(path, 'wb') as f: + json.dump(obj, f, encoding='utf-8', **kwargs) + + def read_json(path): + with open(path, 'rb') as f: + return json.load(f) diff --git a/pipenv/vendor/pep517/envbuild.py b/pipenv/vendor/pep517/envbuild.py new file mode 100644 index 0000000000..61253f4da8 --- /dev/null +++ b/pipenv/vendor/pep517/envbuild.py @@ -0,0 +1,158 @@ +"""Build wheels/sdists by installing build deps to a temporary environment. +""" + +import os +import logging +import pytoml +import shutil +from subprocess import check_call +import sys +from sysconfig import get_paths +from tempfile import mkdtemp + +from .wrappers import Pep517HookCaller + +log = logging.getLogger(__name__) + + +def _load_pyproject(source_dir): + with open(os.path.join(source_dir, 'pyproject.toml')) as f: + pyproject_data = pytoml.load(f) + buildsys = pyproject_data['build-system'] + return buildsys['requires'], buildsys['build-backend'] + + +class BuildEnvironment(object): + """Context manager to install build deps in a simple temporary environment + + Based on code I wrote for pip, which is MIT licensed. + """ + # Copyright (c) 2008-2016 The pip developers (see AUTHORS.txt file) + # + # Permission is hereby granted, free of charge, to any person obtaining + # a copy of this software and associated documentation files (the + # "Software"), to deal in the Software without restriction, including + # without limitation the rights to use, copy, modify, merge, publish, + # distribute, sublicense, and/or sell copies of the Software, and to + # permit persons to whom the Software is furnished to do so, subject to + # the following conditions: + # + # The above copyright notice and this permission notice shall be + # included in all copies or substantial portions of the Software. + # + # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + path = None + + def __init__(self, cleanup=True): + self._cleanup = cleanup + + def __enter__(self): + self.path = mkdtemp(prefix='pep517-build-env-') + log.info('Temporary build environment: %s', self.path) + + self.save_path = os.environ.get('PATH', None) + self.save_pythonpath = os.environ.get('PYTHONPATH', None) + + install_scheme = 'nt' if (os.name == 'nt') else 'posix_prefix' + install_dirs = get_paths(install_scheme, vars={ + 'base': self.path, + 'platbase': self.path, + }) + + scripts = install_dirs['scripts'] + if self.save_path: + os.environ['PATH'] = scripts + os.pathsep + self.save_path + else: + os.environ['PATH'] = scripts + os.pathsep + os.defpath + + if install_dirs['purelib'] == install_dirs['platlib']: + lib_dirs = install_dirs['purelib'] + else: + lib_dirs = install_dirs['purelib'] + os.pathsep + \ + install_dirs['platlib'] + if self.save_pythonpath: + os.environ['PYTHONPATH'] = lib_dirs + os.pathsep + \ + self.save_pythonpath + else: + os.environ['PYTHONPATH'] = lib_dirs + + return self + + def pip_install(self, reqs): + """Install dependencies into this env by calling pip in a subprocess""" + if not reqs: + return + log.info('Calling pip to install %s', reqs) + check_call([ + sys.executable, '-m', 'pip', 'install', '--ignore-installed', + '--prefix', self.path] + list(reqs)) + + def __exit__(self, exc_type, exc_val, exc_tb): + needs_cleanup = ( + self._cleanup and + self.path is not None and + os.path.isdir(self.path) + ) + if needs_cleanup: + shutil.rmtree(self.path) + + if self.save_path is None: + os.environ.pop('PATH', None) + else: + os.environ['PATH'] = self.save_path + + if self.save_pythonpath is None: + os.environ.pop('PYTHONPATH', None) + else: + os.environ['PYTHONPATH'] = self.save_pythonpath + + +def build_wheel(source_dir, wheel_dir, config_settings=None): + """Build a wheel from a source directory using PEP 517 hooks. + + :param str source_dir: Source directory containing pyproject.toml + :param str wheel_dir: Target directory to create wheel in + :param dict config_settings: Options to pass to build backend + + This is a blocking function which will run pip in a subprocess to install + build requirements. + """ + if config_settings is None: + config_settings = {} + requires, backend = _load_pyproject(source_dir) + hooks = Pep517HookCaller(source_dir, backend) + + with BuildEnvironment() as env: + env.pip_install(requires) + reqs = hooks.get_requires_for_build_wheel(config_settings) + env.pip_install(reqs) + return hooks.build_wheel(wheel_dir, config_settings) + + +def build_sdist(source_dir, sdist_dir, config_settings=None): + """Build an sdist from a source directory using PEP 517 hooks. + + :param str source_dir: Source directory containing pyproject.toml + :param str sdist_dir: Target directory to place sdist in + :param dict config_settings: Options to pass to build backend + + This is a blocking function which will run pip in a subprocess to install + build requirements. + """ + if config_settings is None: + config_settings = {} + requires, backend = _load_pyproject(source_dir) + hooks = Pep517HookCaller(source_dir, backend) + + with BuildEnvironment() as env: + env.pip_install(requires) + reqs = hooks.get_requires_for_build_sdist(config_settings) + env.pip_install(reqs) + return hooks.build_sdist(sdist_dir, config_settings) diff --git a/pipenv/vendor/pep517/wrappers.py b/pipenv/vendor/pep517/wrappers.py new file mode 100644 index 0000000000..b14b899150 --- /dev/null +++ b/pipenv/vendor/pep517/wrappers.py @@ -0,0 +1,163 @@ +from contextlib import contextmanager +import os +from os.path import dirname, abspath, join as pjoin +import shutil +from subprocess import check_call +import sys +from tempfile import mkdtemp + +from . import compat + +_in_proc_script = pjoin(dirname(abspath(__file__)), '_in_process.py') + + +@contextmanager +def tempdir(): + td = mkdtemp() + try: + yield td + finally: + shutil.rmtree(td) + + +class BackendUnavailable(Exception): + """Will be raised if the backend cannot be imported in the hook process.""" + + +class UnsupportedOperation(Exception): + """May be raised by build_sdist if the backend indicates that it can't.""" + + +def default_subprocess_runner(cmd, cwd=None, extra_environ=None): + """The default method of calling the wrapper subprocess.""" + env = os.environ.copy() + if extra_environ: + env.update(extra_environ) + + check_call(cmd, cwd=cwd, env=env) + + +class Pep517HookCaller(object): + """A wrapper around a source directory to be built with a PEP 517 backend. + + source_dir : The path to the source directory, containing pyproject.toml. + backend : The build backend spec, as per PEP 517, from pyproject.toml. + """ + def __init__(self, source_dir, build_backend): + self.source_dir = abspath(source_dir) + self.build_backend = build_backend + self._subprocess_runner = default_subprocess_runner + + # TODO: Is this over-engineered? Maybe frontends only need to + # set this when creating the wrapper, not on every call. + @contextmanager + def subprocess_runner(self, runner): + prev = self._subprocess_runner + self._subprocess_runner = runner + yield + self._subprocess_runner = prev + + def get_requires_for_build_wheel(self, config_settings=None): + """Identify packages required for building a wheel + + Returns a list of dependency specifications, e.g.: + ["wheel >= 0.25", "setuptools"] + + This does not include requirements specified in pyproject.toml. + It returns the result of calling the equivalently named hook in a + subprocess. + """ + return self._call_hook('get_requires_for_build_wheel', { + 'config_settings': config_settings + }) + + def prepare_metadata_for_build_wheel( + self, metadata_directory, config_settings=None): + """Prepare a *.dist-info folder with metadata for this project. + + Returns the name of the newly created folder. + + If the build backend defines a hook with this name, it will be called + in a subprocess. If not, the backend will be asked to build a wheel, + and the dist-info extracted from that. + """ + return self._call_hook('prepare_metadata_for_build_wheel', { + 'metadata_directory': abspath(metadata_directory), + 'config_settings': config_settings, + }) + + def build_wheel( + self, wheel_directory, config_settings=None, + metadata_directory=None): + """Build a wheel from this project. + + Returns the name of the newly created file. + + In general, this will call the 'build_wheel' hook in the backend. + However, if that was previously called by + 'prepare_metadata_for_build_wheel', and the same metadata_directory is + used, the previously built wheel will be copied to wheel_directory. + """ + if metadata_directory is not None: + metadata_directory = abspath(metadata_directory) + return self._call_hook('build_wheel', { + 'wheel_directory': abspath(wheel_directory), + 'config_settings': config_settings, + 'metadata_directory': metadata_directory, + }) + + def get_requires_for_build_sdist(self, config_settings=None): + """Identify packages required for building a wheel + + Returns a list of dependency specifications, e.g.: + ["setuptools >= 26"] + + This does not include requirements specified in pyproject.toml. + It returns the result of calling the equivalently named hook in a + subprocess. + """ + return self._call_hook('get_requires_for_build_sdist', { + 'config_settings': config_settings + }) + + def build_sdist(self, sdist_directory, config_settings=None): + """Build an sdist from this project. + + Returns the name of the newly created file. + + This calls the 'build_sdist' backend hook in a subprocess. + """ + return self._call_hook('build_sdist', { + 'sdist_directory': abspath(sdist_directory), + 'config_settings': config_settings, + }) + + def _call_hook(self, hook_name, kwargs): + # On Python 2, pytoml returns Unicode values (which is correct) but the + # environment passed to check_call needs to contain string values. We + # convert here by encoding using ASCII (the backend can only contain + # letters, digits and _, . and : characters, and will be used as a + # Python identifier, so non-ASCII content is wrong on Python 2 in + # any case). + if sys.version_info[0] == 2: + build_backend = self.build_backend.encode('ASCII') + else: + build_backend = self.build_backend + + with tempdir() as td: + compat.write_json({'kwargs': kwargs}, pjoin(td, 'input.json'), + indent=2) + + # Run the hook in a subprocess + self._subprocess_runner( + [sys.executable, _in_proc_script, hook_name, td], + cwd=self.source_dir, + extra_environ={'PEP517_BUILD_BACKEND': build_backend} + ) + + data = compat.read_json(pjoin(td, 'output.json')) + if data.get('unsupported'): + raise UnsupportedOperation + if data.get('no_backend'): + raise BackendUnavailable + return data['return_val'] diff --git a/pipenv/vendor/pipdeptree.py b/pipenv/vendor/pipdeptree.py index 2082fc8a36..cc15c24a60 100644 --- a/pipenv/vendor/pipdeptree.py +++ b/pipenv/vendor/pipdeptree.py @@ -22,7 +22,7 @@ # from graphviz import backend, Digraph -__version__ = '0.13.0' +__version__ = '0.13.1' flatten = chain.from_iterable diff --git a/pipenv/vendor/pyparsing.py b/pipenv/vendor/pyparsing.py index cf38419bcb..ab804d530a 100644 --- a/pipenv/vendor/pyparsing.py +++ b/pipenv/vendor/pyparsing.py @@ -1,6 +1,7 @@ +#-*- coding: utf-8 -*- # module pyparsing.py # -# Copyright (c) 2003-2018 Paul T. McGuire +# Copyright (c) 2003-2019 Paul T. McGuire # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -27,15 +28,18 @@ pyparsing module - Classes and methods to define and execute parsing grammars ============================================================================= -The pyparsing module is an alternative approach to creating and executing simple grammars, -vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you -don't need to learn a new syntax for defining grammars or matching expressions - the parsing module -provides a library of classes that you use to construct the grammar directly in Python. +The pyparsing module is an alternative approach to creating and +executing simple grammars, vs. the traditional lex/yacc approach, or the +use of regular expressions. With pyparsing, you don't need to learn +a new syntax for defining grammars or matching expressions - the parsing +module provides a library of classes that you use to construct the +grammar directly in Python. -Here is a program to parse "Hello, World!" (or any greeting of the form -C{", !"}), built up using L{Word}, L{Literal}, and L{And} elements -(L{'+'} operator gives L{And} expressions, strings are auto-converted to -L{Literal} expressions):: +Here is a program to parse "Hello, World!" (or any greeting of the form +``", !"``), built up using :class:`Word`, +:class:`Literal`, and :class:`And` elements +(the :class:`'+'` operators create :class:`And` expressions, +and the strings are auto-converted to :class:`Literal` expressions):: from pyparsing import Word, alphas @@ -49,33 +53,48 @@ Hello, World! -> ['Hello', ',', 'World', '!'] -The Python representation of the grammar is quite readable, owing to the self-explanatory -class names, and the use of '+', '|' and '^' operators. +The Python representation of the grammar is quite readable, owing to the +self-explanatory class names, and the use of '+', '|' and '^' operators. -The L{ParseResults} object returned from L{ParserElement.parseString} can be accessed as a nested list, a dictionary, or an -object with named attributes. +The :class:`ParseResults` object returned from +:class:`ParserElement.parseString` can be +accessed as a nested list, a dictionary, or an object with named +attributes. -The pyparsing module handles some of the problems that are typically vexing when writing text parsers: - - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.) - - quoted strings - - embedded comments +The pyparsing module handles some of the problems that are typically +vexing when writing text parsers: + + - extra or missing whitespace (the above program will also handle + "Hello,World!", "Hello , World !", etc.) + - quoted strings + - embedded comments Getting Started - ----------------- -Visit the classes L{ParserElement} and L{ParseResults} to see the base classes that most other pyparsing +Visit the classes :class:`ParserElement` and :class:`ParseResults` to +see the base classes that most other pyparsing classes inherit from. Use the docstrings for examples of how to: - - construct literal match expressions from L{Literal} and L{CaselessLiteral} classes - - construct character word-group expressions using the L{Word} class - - see how to create repetitive expressions using L{ZeroOrMore} and L{OneOrMore} classes - - use L{'+'}, L{'|'}, L{'^'}, and L{'&'} operators to combine simple expressions into more complex ones - - associate names with your parsed results using L{ParserElement.setResultsName} - - find some helpful expression short-cuts like L{delimitedList} and L{oneOf} - - find more useful common expressions in the L{pyparsing_common} namespace class + + - construct literal match expressions from :class:`Literal` and + :class:`CaselessLiteral` classes + - construct character word-group expressions using the :class:`Word` + class + - see how to create repetitive expressions using :class:`ZeroOrMore` + and :class:`OneOrMore` classes + - use :class:`'+'`, :class:`'|'`, :class:`'^'`, + and :class:`'&'` operators to combine simple expressions into + more complex ones + - associate names with your parsed results using + :class:`ParserElement.setResultsName` + - find some helpful expression short-cuts like :class:`delimitedList` + and :class:`oneOf` + - find more useful common expressions in the :class:`pyparsing_common` + namespace class """ -__version__ = "2.2.2" -__versionTime__ = "29 Sep 2018 15:58 UTC" +__version__ = "2.3.1" +__versionTime__ = "09 Jan 2019 23:26 UTC" __author__ = "Paul McGuire " import string @@ -91,6 +110,12 @@ class names, and the use of '+', '|' and '^' operators. import types from datetime import datetime +try: + # Python 3 + from itertools import filterfalse +except ImportError: + from itertools import ifilterfalse as filterfalse + try: from _thread import RLock except ImportError: @@ -113,27 +138,33 @@ class names, and the use of '+', '|' and '^' operators. except ImportError: _OrderedDict = None +try: + from types import SimpleNamespace +except ImportError: + class SimpleNamespace: pass + + #~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) ) __all__ = [ 'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty', 'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal', -'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or', +'PrecededBy', 'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or', 'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException', 'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException', -'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', -'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore', +'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', +'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore', 'Char', 'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col', 'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString', 'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums', 'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno', 'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral', 'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables', -'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', +'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', 'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd', 'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute', 'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass', -'CloseMatch', 'tokenMap', 'pyparsing_common', +'CloseMatch', 'tokenMap', 'pyparsing_common', 'pyparsing_unicode', 'unicode_set', ] system_version = tuple(sys.version_info)[:3] @@ -142,6 +173,7 @@ class names, and the use of '+', '|' and '^' operators. _MAX_INT = sys.maxsize basestring = str unichr = chr + unicode = str _ustr = str # build list of single arg builtins, that can be used as parse actions @@ -152,9 +184,11 @@ class names, and the use of '+', '|' and '^' operators. range = xrange def _ustr(obj): - """Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries - str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It - then < returns the unicode object | encodes it with the default encoding | ... >. + """Drop-in replacement for str(obj) that tries to be Unicode + friendly. It first tries str(obj). If that fails with + a UnicodeEncodeError, then it tries unicode(obj). It then + < returns the unicode object | encodes it with the default + encoding | ... >. """ if isinstance(obj,unicode): return obj @@ -179,9 +213,9 @@ def _ustr(obj): singleArgBuiltins.append(getattr(__builtin__,fname)) except AttributeError: continue - + _generatorType = type((y for y in range(1))) - + def _xml_escape(data): """Escape &, <, >, ", ', etc. in a string of data.""" @@ -192,9 +226,6 @@ def _xml_escape(data): data = data.replace(from_, to_) return data -class _Constants(object): - pass - alphas = string.ascii_uppercase + string.ascii_lowercase nums = "0123456789" hexnums = nums + "ABCDEFabcdef" @@ -220,16 +251,16 @@ def __init__( self, pstr, loc=0, msg=None, elem=None ): @classmethod def _from_exception(cls, pe): """ - internal factory method to simplify creating one type of ParseException + internal factory method to simplify creating one type of ParseException from another - avoids having __init__ signature conflicts among subclasses """ return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement) def __getattr__( self, aname ): """supported attributes by name are: - - lineno - returns the line number of the exception text - - col - returns the column number of the exception text - - line - returns the line containing the exception text + - lineno - returns the line number of the exception text + - col - returns the column number of the exception text + - line - returns the line containing the exception text """ if( aname == "lineno" ): return lineno( self.loc, self.pstr ) @@ -262,22 +293,94 @@ class ParseException(ParseBaseException): """ Exception thrown when parse expressions don't match class; supported attributes by name are: - - lineno - returns the line number of the exception text - - col - returns the column number of the exception text - - line - returns the line containing the exception text - + - lineno - returns the line number of the exception text + - col - returns the column number of the exception text + - line - returns the line containing the exception text + Example:: + try: Word(nums).setName("integer").parseString("ABC") except ParseException as pe: print(pe) print("column: {}".format(pe.col)) - + prints:: + Expected integer (at char 0), (line:1, col:1) column: 1 + """ - pass + + @staticmethod + def explain(exc, depth=16): + """ + Method to take an exception and translate the Python internal traceback into a list + of the pyparsing expressions that caused the exception to be raised. + + Parameters: + + - exc - exception raised during parsing (need not be a ParseException, in support + of Python exceptions that might be raised in a parse action) + - depth (default=16) - number of levels back in the stack trace to list expression + and function names; if None, the full stack trace names will be listed; if 0, only + the failing input line, marker, and exception string will be shown + + Returns a multi-line string listing the ParserElements and/or function names in the + exception's stack trace. + + Note: the diagnostic output will include string representations of the expressions + that failed to parse. These representations will be more helpful if you use `setName` to + give identifiable names to your expressions. Otherwise they will use the default string + forms, which may be cryptic to read. + + explain() is only supported under Python 3. + """ + import inspect + + if depth is None: + depth = sys.getrecursionlimit() + ret = [] + if isinstance(exc, ParseBaseException): + ret.append(exc.line) + ret.append(' ' * (exc.col - 1) + '^') + ret.append("{0}: {1}".format(type(exc).__name__, exc)) + + if depth > 0: + callers = inspect.getinnerframes(exc.__traceback__, context=depth) + seen = set() + for i, ff in enumerate(callers[-depth:]): + frm = ff.frame + + f_self = frm.f_locals.get('self', None) + if isinstance(f_self, ParserElement): + if frm.f_code.co_name not in ('parseImpl', '_parseNoCache'): + continue + if f_self in seen: + continue + seen.add(f_self) + + self_type = type(f_self) + ret.append("{0}.{1} - {2}".format(self_type.__module__, + self_type.__name__, + f_self)) + elif f_self is not None: + self_type = type(f_self) + ret.append("{0}.{1}".format(self_type.__module__, + self_type.__name__)) + else: + code = frm.f_code + if code.co_name in ('wrapper', ''): + continue + + ret.append("{0}".format(code.co_name)) + + depth -= 1 + if not depth: + break + + return '\n'.join(ret) + class ParseFatalException(ParseBaseException): """user-throwable exception thrown when inconsistent parse content @@ -285,9 +388,11 @@ class ParseFatalException(ParseBaseException): pass class ParseSyntaxException(ParseFatalException): - """just like L{ParseFatalException}, but thrown internally when an - L{ErrorStop} ('-' operator) indicates that parsing is to stop - immediately because an unbacktrackable syntax error has been found""" + """just like :class:`ParseFatalException`, but thrown internally + when an :class:`ErrorStop` ('-' operator) indicates + that parsing is to stop immediately because an unbacktrackable + syntax error has been found. + """ pass #~ class ReparseException(ParseBaseException): @@ -304,7 +409,9 @@ class ParseSyntaxException(ParseFatalException): #~ self.reparseLoc = restartLoc class RecursiveGrammarException(Exception): - """exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive""" + """exception thrown by :class:`ParserElement.validate` if the + grammar could be improperly recursive + """ def __init__( self, parseElementList ): self.parseElementTrace = parseElementList @@ -322,16 +429,18 @@ def setOffset(self,i): self.tup = (self.tup[0],i) class ParseResults(object): - """ - Structured parse results, to provide multiple means of access to the parsed data: - - as a list (C{len(results)}) - - by list index (C{results[0], results[1]}, etc.) - - by attribute (C{results.} - see L{ParserElement.setResultsName}) + """Structured parse results, to provide multiple means of access to + the parsed data: + + - as a list (``len(results)``) + - by list index (``results[0], results[1]``, etc.) + - by attribute (``results.`` - see :class:`ParserElement.setResultsName`) Example:: + integer = Word(nums) - date_str = (integer.setResultsName("year") + '/' - + integer.setResultsName("month") + '/' + date_str = (integer.setResultsName("year") + '/' + + integer.setResultsName("month") + '/' + integer.setResultsName("day")) # equivalent form: # date_str = integer("year") + '/' + integer("month") + '/' + integer("day") @@ -348,7 +457,9 @@ def test(s, fn=repr): test("'month' in result") test("'minutes' in result") test("result.dump()", str) + prints:: + list(result) -> ['1999', '/', '12', '/', '31'] result[0] -> '1999' result['month'] -> '12' @@ -398,7 +509,7 @@ def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance toklist = [ toklist ] if asList: if isinstance(toklist,ParseResults): - self[name] = _ParseResultsWithOffset(toklist.copy(),0) + self[name] = _ParseResultsWithOffset(ParseResults(toklist.__toklist), 0) else: self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0) self[name].__name = name @@ -467,19 +578,19 @@ def _iterkeys( self ): def _itervalues( self ): return (self[k] for k in self._iterkeys()) - + def _iteritems( self ): return ((k, self[k]) for k in self._iterkeys()) if PY_3: - keys = _iterkeys - """Returns an iterator of all named result keys (Python 3.x only).""" + keys = _iterkeys + """Returns an iterator of all named result keys.""" values = _itervalues - """Returns an iterator of all named result values (Python 3.x only).""" + """Returns an iterator of all named result values.""" items = _iteritems - """Returns an iterator of all named result key-value tuples (Python 3.x only).""" + """Returns an iterator of all named result key-value tuples.""" else: iterkeys = _iterkeys @@ -498,7 +609,7 @@ def keys( self ): def values( self ): """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x).""" return list(self.itervalues()) - + def items( self ): """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x).""" return list(self.iteritems()) @@ -507,19 +618,20 @@ def haskeys( self ): """Since keys() returns an iterator, this method is helpful in bypassing code that looks for the existence of any defined results names.""" return bool(self.__tokdict) - + def pop( self, *args, **kwargs): """ - Removes and returns item at specified index (default=C{last}). - Supports both C{list} and C{dict} semantics for C{pop()}. If passed no - argument or an integer argument, it will use C{list} semantics - and pop tokens from the list of parsed tokens. If passed a - non-integer argument (most likely a string), it will use C{dict} - semantics and pop the corresponding value from any defined - results names. A second default return value argument is - supported, just as in C{dict.pop()}. + Removes and returns item at specified index (default= ``last``). + Supports both ``list`` and ``dict`` semantics for ``pop()``. If + passed no argument or an integer argument, it will use ``list`` + semantics and pop tokens from the list of parsed tokens. If passed + a non-integer argument (most likely a string), it will use ``dict`` + semantics and pop the corresponding value from any defined results + names. A second default return value argument is supported, just as in + ``dict.pop()``. Example:: + def remove_first(tokens): tokens.pop(0) print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] @@ -536,7 +648,9 @@ def remove_LABEL(tokens): return tokens patt.addParseAction(remove_LABEL) print(patt.parseString("AAB 123 321").dump()) + prints:: + ['AAB', '123', '321'] - LABEL: AAB @@ -549,8 +663,8 @@ def remove_LABEL(tokens): args = (args[0], v) else: raise TypeError("pop() got an unexpected keyword argument '%s'" % k) - if (isinstance(args[0], int) or - len(args) == 1 or + if (isinstance(args[0], int) or + len(args) == 1 or args[0] in self): index = args[0] ret = self[index] @@ -563,14 +677,15 @@ def remove_LABEL(tokens): def get(self, key, defaultValue=None): """ Returns named result matching the given key, or if there is no - such name, then returns the given C{defaultValue} or C{None} if no - C{defaultValue} is specified. + such name, then returns the given ``defaultValue`` or ``None`` if no + ``defaultValue`` is specified. + + Similar to ``dict.get()``. - Similar to C{dict.get()}. - Example:: + integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") result = date_str.parseString("1999/12/31") print(result.get("year")) # -> '1999' @@ -585,10 +700,11 @@ def get(self, key, defaultValue=None): def insert( self, index, insStr ): """ Inserts new element at location index in the list of parsed tokens. - - Similar to C{list.insert()}. + + Similar to ``list.insert()``. Example:: + print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] # use a parse action to insert the parse location in the front of the parsed results @@ -607,8 +723,9 @@ def append( self, item ): Add single element to end of ParseResults list of elements. Example:: + print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] - + # use a parse action to compute the sum of the parsed integers, and add it to the end def append_sum(tokens): tokens.append(sum(map(int, tokens))) @@ -621,8 +738,9 @@ def extend( self, itemseq ): Add sequence of elements to end of ParseResults list of elements. Example:: + patt = OneOrMore(Word(alphas)) - + # use a parse action to append the reverse of the matched strings, to make a palindrome def make_palindrome(tokens): tokens.extend(reversed([t[::-1] for t in tokens])) @@ -646,7 +764,7 @@ def __getattr__( self, name ): return self[name] except KeyError: return "" - + if name in self.__tokdict: if name not in self.__accumNames: return self.__tokdict[name][-1][0] @@ -671,7 +789,7 @@ def __iadd__( self, other ): self[k] = v if isinstance(v[0],ParseResults): v[0].__parent = wkref(self) - + self.__toklist += other.__toklist self.__accumNames.update( other.__accumNames ) return self @@ -683,7 +801,7 @@ def __radd__(self, other): else: # this may raise a TypeError - so be it return other + self - + def __repr__( self ): return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) ) @@ -706,11 +824,12 @@ def asList( self ): Returns the parse results as a nested list of matching tokens, all converted to strings. Example:: + patt = OneOrMore(Word(alphas)) result = patt.parseString("sldkj lsdkj sldkj") # even though the result prints in string-like form, it is actually a pyparsing ParseResults print(type(result), result) # -> ['sldkj', 'lsdkj', 'sldkj'] - + # Use asList() to create an actual list result_list = result.asList() print(type(result_list), result_list) # -> ['sldkj', 'lsdkj', 'sldkj'] @@ -722,12 +841,13 @@ def asDict( self ): Returns the named parse results as a nested dictionary. Example:: + integer = Word(nums) date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - + result = date_str.parseString('12/31/1999') print(type(result), repr(result)) # -> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) - + result_dict = result.asDict() print(type(result_dict), repr(result_dict)) # -> {'day': '1999', 'year': '12', 'month': '31'} @@ -740,7 +860,7 @@ def asDict( self ): item_fn = self.items else: item_fn = self.iteritems - + def toItem(obj): if isinstance(obj, ParseResults): if obj.haskeys(): @@ -749,15 +869,15 @@ def toItem(obj): return [toItem(v) for v in obj] else: return obj - + return dict((k,toItem(v)) for k,v in item_fn()) def copy( self ): """ - Returns a new copy of a C{ParseResults} object. + Returns a new copy of a :class:`ParseResults` object. """ ret = ParseResults( self.__toklist ) - ret.__tokdict = self.__tokdict.copy() + ret.__tokdict = dict(self.__tokdict.items()) ret.__parent = self.__parent ret.__accumNames.update( self.__accumNames ) ret.__name = self.__name @@ -833,22 +953,25 @@ def __lookup(self,sub): def getName(self): r""" - Returns the results name for this token expression. Useful when several + Returns the results name for this token expression. Useful when several different expressions might match at a particular location. Example:: + integer = Word(nums) ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d") house_number_expr = Suppress('#') + Word(nums, alphanums) - user_data = (Group(house_number_expr)("house_number") + user_data = (Group(house_number_expr)("house_number") | Group(ssn_expr)("ssn") | Group(integer)("age")) user_info = OneOrMore(user_data) - + result = user_info.parseString("22 111-22-3333 #221B") for item in result: print(item.getName(), ':', item[0]) + prints:: + age : 22 ssn : 111-22-3333 house_number : 221B @@ -870,17 +993,20 @@ def getName(self): def dump(self, indent='', depth=0, full=True): """ - Diagnostic method for listing out the contents of a C{ParseResults}. - Accepts an optional C{indent} argument so that this string can be embedded - in a nested display of other data. + Diagnostic method for listing out the contents of + a :class:`ParseResults`. Accepts an optional ``indent`` argument so + that this string can be embedded in a nested display of other data. Example:: + integer = Word(nums) date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - + result = date_str.parseString('12/31/1999') print(result.dump()) + prints:: + ['12', '/', '31', '/', '1999'] - day: 1999 - month: 31 @@ -910,16 +1036,18 @@ def dump(self, indent='', depth=0, full=True): out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) )) else: out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv))) - + return "".join(out) def pprint(self, *args, **kwargs): """ - Pretty-printer for parsed results as a list, using the C{pprint} module. - Accepts additional positional or keyword args as defined for the - C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint}) + Pretty-printer for parsed results as a list, using the + `pprint `_ module. + Accepts additional positional or keyword args as defined for + `pprint.pprint `_ . Example:: + ident = Word(alphas, alphanums) num = Word(nums) func = Forward() @@ -927,7 +1055,9 @@ def pprint(self, *args, **kwargs): func <<= ident + Group(Optional(delimitedList(term))) result = func.parseString("fna a,b,(fnb c,d,200),100") result.pprint(width=40) + prints:: + ['fna', ['a', 'b', @@ -970,24 +1100,25 @@ def col (loc,strg): The first column is number 1. Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See L{I{ParserElement.parseString}} for more information - on parsing strings containing C{}s, and suggested methods to maintain a - consistent view of the parsed string, the parse location, and line and column - positions within the parsed string. + before starting the parsing process. See + :class:`ParserElement.parseString` for more + information on parsing strings containing ```` s, and suggested + methods to maintain a consistent view of the parsed string, the parse + location, and line and column positions within the parsed string. """ s = strg return 1 if 0} for more information - on parsing strings containing C{}s, and suggested methods to maintain a - consistent view of the parsed string, the parse location, and line and column - positions within the parsed string. - """ + Note - the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See :class:`ParserElement.parseString` + for more information on parsing strings containing ```` s, and + suggested methods to maintain a consistent view of the parsed string, the + parse location, and line and column positions within the parsed string. + """ return strg.count("\n",0,loc) + 1 def line( loc, strg ): @@ -1041,7 +1172,7 @@ def _trim_arity(func, maxargs=2): return lambda s,l,t: func(t) limit = [0] foundArity = [False] - + # traceback return data structure changed in Py3.5 - normalize back to plain tuples if system_version[:2] >= (3,5): def extract_stack(limit=0): @@ -1056,12 +1187,12 @@ def extract_tb(tb, limit=0): else: extract_stack = traceback.extract_stack extract_tb = traceback.extract_tb - - # synthesize what would be returned by traceback.extract_stack at the call to + + # synthesize what would be returned by traceback.extract_stack at the call to # user's parse action 'func', so that we don't incur call penalty at parse time - + LINE_DIFF = 6 - # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND + # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!! this_line = extract_stack(limit=2)[-1] pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF) @@ -1092,7 +1223,7 @@ def wrapper(*args): # copy func name to wrapper for sensible debug output func_name = "" try: - func_name = getattr(func, '__name__', + func_name = getattr(func, '__name__', getattr(func, '__class__').__name__) except Exception: func_name = str(func) @@ -1111,9 +1242,10 @@ def setDefaultWhitespaceChars( chars ): Overrides the default whitespace chars Example:: + # default whitespace chars are space, and newline OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] - + # change to just treat newline as significant ParserElement.setDefaultWhitespaceChars(" \t") OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def'] @@ -1124,18 +1256,19 @@ def setDefaultWhitespaceChars( chars ): def inlineLiteralsUsing(cls): """ Set class to be used for inclusion of string literals into a parser. - + Example:: + # default literal class used is Literal integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] # change to Suppress ParserElement.inlineLiteralsUsing(Suppress) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") date_str.parseString("1999/12/31") # -> ['1999', '12', '31'] """ @@ -1149,7 +1282,7 @@ def __init__( self, savelist=False ): self.resultsName = None self.saveAsList = savelist self.skipWhitespace = True - self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS + self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS) self.copyDefaultWhiteChars = True self.mayReturnEmpty = False # used when checking for left-recursion self.keepTabs = False @@ -1166,18 +1299,24 @@ def __init__( self, savelist=False ): def copy( self ): """ - Make a copy of this C{ParserElement}. Useful for defining different parse actions - for the same parsing pattern, using copies of the original parse element. - + Make a copy of this :class:`ParserElement`. Useful for defining + different parse actions for the same parsing pattern, using copies of + the original parse element. + Example:: + integer = Word(nums).setParseAction(lambda toks: int(toks[0])) integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K") integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") - + print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M")) + prints:: + [5120, 100, 655360, 268435456] - Equivalent form of C{expr.copy()} is just C{expr()}:: + + Equivalent form of ``expr.copy()`` is just ``expr()``:: + integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") """ cpy = copy.copy( self ) @@ -1190,8 +1329,9 @@ def copy( self ): def setName( self, name ): """ Define name for this expression, makes debugging and exception messages clearer. - + Example:: + Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1) Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) """ @@ -1205,17 +1345,18 @@ def setResultsName( self, name, listAllMatches=False ): """ Define name for referencing matching tokens as a nested attribute of the returned parse results. - NOTE: this returns a *copy* of the original C{ParserElement} object; + NOTE: this returns a *copy* of the original :class:`ParserElement` object; this is so that the client can define a basic element, such as an integer, and reference it in multiple places with different names. You can also set results names using the abbreviated syntax, - C{expr("name")} in place of C{expr.setResultsName("name")} - - see L{I{__call__}<__call__>}. + ``expr("name")`` in place of ``expr.setResultsName("name")`` + - see :class:`__call__`. Example:: - date_str = (integer.setResultsName("year") + '/' - + integer.setResultsName("month") + '/' + + date_str = (integer.setResultsName("year") + '/' + + integer.setResultsName("month") + '/' + integer.setResultsName("day")) # equivalent form: @@ -1231,7 +1372,7 @@ def setResultsName( self, name, listAllMatches=False ): def setBreak(self,breakFlag = True): """Method to invoke the Python pdb debugger when this element is - about to be parsed. Set C{breakFlag} to True to enable, False to + about to be parsed. Set ``breakFlag`` to True to enable, False to disable. """ if breakFlag: @@ -1250,25 +1391,28 @@ def breaker(instring, loc, doActions=True, callPreParse=True): def setParseAction( self, *fns, **kwargs ): """ Define one or more actions to perform when successfully matching parse element definition. - Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)}, - C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where: - - s = the original string being parsed (see note below) - - loc = the location of the matching substring - - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object + Parse action fn is a callable method with 0-3 arguments, called as ``fn(s,loc,toks)`` , + ``fn(loc,toks)`` , ``fn(toks)`` , or just ``fn()`` , where: + + - s = the original string being parsed (see note below) + - loc = the location of the matching substring + - toks = a list of the matched tokens, packaged as a :class:`ParseResults` object + If the functions in fns modify the tokens, they can return them as the return value from fn, and the modified list of tokens will replace the original. Otherwise, fn does not need to return any value. Optional keyword arguments: - - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing + - callDuringTry = (default= ``False`` ) indicate if parse action should be run during lookaheads and alternate testing Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See L{I{parseString}} for more information - on parsing strings containing C{}s, and suggested methods to maintain a - consistent view of the parsed string, the parse location, and line and column - positions within the parsed string. - + before starting the parsing process. See :class:`parseString for more + information on parsing strings containing ```` s, and suggested + methods to maintain a consistent view of the parsed string, the parse + location, and line and column positions within the parsed string. + Example:: + integer = Word(nums) date_str = integer + '/' + integer + '/' + integer @@ -1287,24 +1431,25 @@ def setParseAction( self, *fns, **kwargs ): def addParseAction( self, *fns, **kwargs ): """ - Add one or more parse actions to expression's list of parse actions. See L{I{setParseAction}}. - - See examples in L{I{copy}}. + Add one or more parse actions to expression's list of parse actions. See :class:`setParseAction`. + + See examples in :class:`copy`. """ self.parseAction += list(map(_trim_arity, list(fns))) self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) return self def addCondition(self, *fns, **kwargs): - """Add a boolean predicate function to expression's list of parse actions. See - L{I{setParseAction}} for function call signatures. Unlike C{setParseAction}, - functions passed to C{addCondition} need to return boolean success/fail of the condition. + """Add a boolean predicate function to expression's list of parse actions. See + :class:`setParseAction` for function call signatures. Unlike ``setParseAction``, + functions passed to ``addCondition`` need to return boolean success/fail of the condition. Optional keyword arguments: - - message = define a custom message to be used in the raised exception - - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException - + - message = define a custom message to be used in the raised exception + - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException + Example:: + integer = Word(nums).setParseAction(lambda toks: int(toks[0])) year_int = integer.copy() year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later") @@ -1315,8 +1460,9 @@ def addCondition(self, *fns, **kwargs): msg = kwargs.get("message", "failed user-defined condition") exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException for fn in fns: + fn = _trim_arity(fn) def pa(s,l,t): - if not bool(_trim_arity(fn)(s,l,t)): + if not bool(fn(s,l,t)): raise exc_type(s,l,msg) self.parseAction.append(pa) self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) @@ -1325,12 +1471,12 @@ def pa(s,l,t): def setFailAction( self, fn ): """Define action to perform if parsing fails at this expression. Fail acton fn is a callable function that takes the arguments - C{fn(s,loc,expr,err)} where: - - s = string being parsed - - loc = location where expression match was attempted and failed - - expr = the parse expression that failed - - err = the exception thrown - The function returns no value. It may throw C{L{ParseFatalException}} + ``fn(s,loc,expr,err)`` where: + - s = string being parsed + - loc = location where expression match was attempted and failed + - expr = the parse expression that failed + - err = the exception thrown + The function returns no value. It may throw :class:`ParseFatalException` if it is desired to stop parsing immediately.""" self.failAction = fn return self @@ -1412,8 +1558,14 @@ def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ): if debugging: try: for fn in self.parseAction: - tokens = fn( instring, tokensStart, retTokens ) - if tokens is not None: + try: + tokens = fn( instring, tokensStart, retTokens ) + except IndexError as parse_action_exc: + exc = ParseException("exception raised in parse action") + exc.__cause__ = parse_action_exc + raise exc + + if tokens is not None and tokens is not retTokens: retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), @@ -1425,8 +1577,14 @@ def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ): raise else: for fn in self.parseAction: - tokens = fn( instring, tokensStart, retTokens ) - if tokens is not None: + try: + tokens = fn( instring, tokensStart, retTokens ) + except IndexError as parse_action_exc: + exc = ParseException("exception raised in parse action") + exc.__cause__ = parse_action_exc + raise exc + + if tokens is not None and tokens is not retTokens: retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), @@ -1443,7 +1601,7 @@ def tryParse( self, instring, loc ): return self._parse( instring, loc, doActions=False )[0] except ParseFatalException: raise ParseException( instring, loc, self.errmsg, self) - + def canParseNext(self, instring, loc): try: self.tryParse(instring, loc) @@ -1465,7 +1623,7 @@ def set(self, key, value): def clear(self): cache.clear() - + def cache_len(self): return len(cache) @@ -1577,23 +1735,23 @@ def enablePackrat(cache_size_limit=128): often in many complex grammars) can immediately return a cached value, instead of re-executing parsing/validating code. Memoizing is done of both valid results and parsing exceptions. - + Parameters: - - cache_size_limit - (default=C{128}) - if an integer value is provided - will limit the size of the packrat cache; if None is passed, then - the cache size will be unbounded; if 0 is passed, the cache will - be effectively disabled. - + + - cache_size_limit - (default= ``128``) - if an integer value is provided + will limit the size of the packrat cache; if None is passed, then + the cache size will be unbounded; if 0 is passed, the cache will + be effectively disabled. + This speedup may break existing programs that use parse actions that have side-effects. For this reason, packrat parsing is disabled when you first import pyparsing. To activate the packrat feature, your - program must call the class method C{ParserElement.enablePackrat()}. If - your program uses C{psyco} to "compile as you go", you must call - C{enablePackrat} before calling C{psyco.full()}. If you do not do this, - Python will crash. For best results, call C{enablePackrat()} immediately - after importing pyparsing. - + program must call the class method :class:`ParserElement.enablePackrat`. + For best results, call ``enablePackrat()`` immediately after + importing pyparsing. + Example:: + import pyparsing pyparsing.ParserElement.enablePackrat() """ @@ -1612,23 +1770,25 @@ def parseString( self, instring, parseAll=False ): expression has been built. If you want the grammar to require that the entire input string be - successfully parsed, then set C{parseAll} to True (equivalent to ending - the grammar with C{L{StringEnd()}}). + successfully parsed, then set ``parseAll`` to True (equivalent to ending + the grammar with ``StringEnd()``). - Note: C{parseString} implicitly calls C{expandtabs()} on the input string, + Note: ``parseString`` implicitly calls ``expandtabs()`` on the input string, in order to report proper column numbers in parse actions. If the input string contains tabs and - the grammar uses parse actions that use the C{loc} argument to index into the + the grammar uses parse actions that use the ``loc`` argument to index into the string being parsed, you can ensure you have a consistent view of the input string by: - - calling C{parseWithTabs} on your grammar before calling C{parseString} - (see L{I{parseWithTabs}}) - - define your parse action using the full C{(s,loc,toks)} signature, and - reference the input string using the parse action's C{s} argument - - explictly expand the tabs in your input string before calling - C{parseString} - + + - calling ``parseWithTabs`` on your grammar before calling ``parseString`` + (see :class:`parseWithTabs`) + - define your parse action using the full ``(s,loc,toks)`` signature, and + reference the input string using the parse action's ``s`` argument + - explictly expand the tabs in your input string before calling + ``parseString`` + Example:: + Word('a').parseString('aaaaabaaa') # -> ['aaaaa'] Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text """ @@ -1659,22 +1819,23 @@ def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ): """ Scan the input string for expression matches. Each match will return the matching tokens, start location, and end location. May be called with optional - C{maxMatches} argument, to clip scanning after 'n' matches are found. If - C{overlap} is specified, then overlapping matches will be reported. + ``maxMatches`` argument, to clip scanning after 'n' matches are found. If + ``overlap`` is specified, then overlapping matches will be reported. Note that the start and end locations are reported relative to the string - being parsed. See L{I{parseString}} for more information on parsing + being parsed. See :class:`parseString` for more information on parsing strings with embedded tabs. Example:: + source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" print(source) for tokens,start,end in Word(alphas).scanString(source): print(' '*start + '^'*(end-start)) print(' '*start + tokens[0]) - + prints:: - + sldjf123lsdjjkf345sldkjf879lkjsfd987 ^^^^^ sldjf @@ -1728,19 +1889,22 @@ def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ): def transformString( self, instring ): """ - Extension to C{L{scanString}}, to modify matching text with modified tokens that may - be returned from a parse action. To use C{transformString}, define a grammar and + Extension to :class:`scanString`, to modify matching text with modified tokens that may + be returned from a parse action. To use ``transformString``, define a grammar and attach a parse action to it that modifies the returned token list. - Invoking C{transformString()} on a target string will then scan for matches, + Invoking ``transformString()`` on a target string will then scan for matches, and replace the matched text patterns according to the logic in the parse - action. C{transformString()} returns the resulting transformed string. - + action. ``transformString()`` returns the resulting transformed string. + Example:: + wd = Word(alphas) wd.setParseAction(lambda toks: toks[0].title()) - + print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york.")) - Prints:: + + prints:: + Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York. """ out = [] @@ -1771,19 +1935,22 @@ def transformString( self, instring ): def searchString( self, instring, maxMatches=_MAX_INT ): """ - Another extension to C{L{scanString}}, simplifying the access to the tokens found + Another extension to :class:`scanString`, simplifying the access to the tokens found to match the given parse expression. May be called with optional - C{maxMatches} argument, to clip searching after 'n' matches are found. - + ``maxMatches`` argument, to clip searching after 'n' matches are found. + Example:: + # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters cap_word = Word(alphas.upper(), alphas.lower()) - + print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")) # the sum() builtin can be used to merge results into a single ParseResults object print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))) + prints:: + [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']] ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity'] """ @@ -1799,14 +1966,17 @@ def searchString( self, instring, maxMatches=_MAX_INT ): def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False): """ Generator method to split a string using the given expression as a separator. - May be called with optional C{maxsplit} argument, to limit the number of splits; - and the optional C{includeSeparators} argument (default=C{False}), if the separating + May be called with optional ``maxsplit`` argument, to limit the number of splits; + and the optional ``includeSeparators`` argument (default= ``False``), if the separating matching text should be included in the split results. - - Example:: + + Example:: + punc = oneOf(list(".,;:/-!?")) print(list(punc.split("This, this?, this sentence, is badly punctuated!"))) + prints:: + ['This', ' this', '', ' this sentence', ' is badly punctuated', ''] """ splits = 0 @@ -1820,14 +1990,17 @@ def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False): def __add__(self, other ): """ - Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement - converts them to L{Literal}s by default. - + Implementation of + operator - returns :class:`And`. Adding strings to a ParserElement + converts them to :class:`Literal`s by default. + Example:: + greet = Word(alphas) + "," + Word(alphas) + "!" hello = "Hello, World!" print (hello, "->", greet.parseString(hello)) - Prints:: + + prints:: + Hello, World! -> ['Hello', ',', 'World', '!'] """ if isinstance( other, basestring ): @@ -1840,7 +2013,7 @@ def __add__(self, other ): def __radd__(self, other ): """ - Implementation of + operator when left operand is not a C{L{ParserElement}} + Implementation of + operator when left operand is not a :class:`ParserElement` """ if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) @@ -1852,7 +2025,7 @@ def __radd__(self, other ): def __sub__(self, other): """ - Implementation of - operator, returns C{L{And}} with error stop + Implementation of - operator, returns :class:`And` with error stop """ if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) @@ -1864,7 +2037,7 @@ def __sub__(self, other): def __rsub__(self, other ): """ - Implementation of - operator when left operand is not a C{L{ParserElement}} + Implementation of - operator when left operand is not a :class:`ParserElement` """ if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) @@ -1876,23 +2049,23 @@ def __rsub__(self, other ): def __mul__(self,other): """ - Implementation of * operator, allows use of C{expr * 3} in place of - C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer - tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples - may also include C{None} as in: - - C{expr*(n,None)} or C{expr*(n,)} is equivalent - to C{expr*n + L{ZeroOrMore}(expr)} - (read as "at least n instances of C{expr}") - - C{expr*(None,n)} is equivalent to C{expr*(0,n)} - (read as "0 to n instances of C{expr}") - - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)} - - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)} - - Note that C{expr*(None,n)} does not raise an exception if + Implementation of * operator, allows use of ``expr * 3`` in place of + ``expr + expr + expr``. Expressions may also me multiplied by a 2-integer + tuple, similar to ``{min,max}`` multipliers in regular expressions. Tuples + may also include ``None`` as in: + - ``expr*(n,None)`` or ``expr*(n,)`` is equivalent + to ``expr*n + ZeroOrMore(expr)`` + (read as "at least n instances of ``expr``") + - ``expr*(None,n)`` is equivalent to ``expr*(0,n)`` + (read as "0 to n instances of ``expr``") + - ``expr*(None,None)`` is equivalent to ``ZeroOrMore(expr)`` + - ``expr*(1,None)`` is equivalent to ``OneOrMore(expr)`` + + Note that ``expr*(None,n)`` does not raise an exception if more than n exprs exist in the input stream; that is, - C{expr*(None,n)} does not enforce a maximum number of expr + ``expr*(None,n)`` does not enforce a maximum number of expr occurrences. If this behavior is desired, then write - C{expr*(None,n) + ~expr} + ``expr*(None,n) + ~expr`` """ if isinstance(other,int): minElements, optElements = other,0 @@ -1947,7 +2120,7 @@ def __rmul__(self, other): def __or__(self, other ): """ - Implementation of | operator - returns C{L{MatchFirst}} + Implementation of | operator - returns :class:`MatchFirst` """ if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) @@ -1959,7 +2132,7 @@ def __or__(self, other ): def __ror__(self, other ): """ - Implementation of | operator when left operand is not a C{L{ParserElement}} + Implementation of | operator when left operand is not a :class:`ParserElement` """ if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) @@ -1971,7 +2144,7 @@ def __ror__(self, other ): def __xor__(self, other ): """ - Implementation of ^ operator - returns C{L{Or}} + Implementation of ^ operator - returns :class:`Or` """ if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) @@ -1983,7 +2156,7 @@ def __xor__(self, other ): def __rxor__(self, other ): """ - Implementation of ^ operator when left operand is not a C{L{ParserElement}} + Implementation of ^ operator when left operand is not a :class:`ParserElement` """ if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) @@ -1995,7 +2168,7 @@ def __rxor__(self, other ): def __and__(self, other ): """ - Implementation of & operator - returns C{L{Each}} + Implementation of & operator - returns :class:`Each` """ if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) @@ -2007,7 +2180,7 @@ def __and__(self, other ): def __rand__(self, other ): """ - Implementation of & operator when left operand is not a C{L{ParserElement}} + Implementation of & operator when left operand is not a :class:`ParserElement` """ if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) @@ -2019,23 +2192,24 @@ def __rand__(self, other ): def __invert__( self ): """ - Implementation of ~ operator - returns C{L{NotAny}} + Implementation of ~ operator - returns :class:`NotAny` """ return NotAny( self ) def __call__(self, name=None): """ - Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}. - - If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be - passed as C{True}. - - If C{name} is omitted, same as calling C{L{copy}}. + Shortcut for :class:`setResultsName`, with ``listAllMatches=False``. + + If ``name`` is given with a trailing ``'*'`` character, then ``listAllMatches`` will be + passed as ``True``. + + If ``name` is omitted, same as calling :class:`copy`. Example:: + # these are equivalent userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno") - userdata = Word(alphas)("name") + Word(nums+"-")("socsecno") + userdata = Word(alphas)("name") + Word(nums+"-")("socsecno") """ if name is not None: return self.setResultsName(name) @@ -2044,7 +2218,7 @@ def __call__(self, name=None): def suppress( self ): """ - Suppresses the output of this C{ParserElement}; useful to keep punctuation from + Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from cluttering up returned output. """ return Suppress( self ) @@ -2052,7 +2226,7 @@ def suppress( self ): def leaveWhitespace( self ): """ Disables the skipping of whitespace before matching the characters in the - C{ParserElement}'s defined pattern. This is normally only used internally by + :class:`ParserElement`'s defined pattern. This is normally only used internally by the pyparsing module, but may be needed in some whitespace-sensitive grammars. """ self.skipWhitespace = False @@ -2069,9 +2243,9 @@ def setWhitespaceChars( self, chars ): def parseWithTabs( self ): """ - Overrides default behavior to expand C{}s to spaces before parsing the input string. - Must be called before C{parseString} when the input grammar contains elements that - match C{} characters. + Overrides default behavior to expand ````s to spaces before parsing the input string. + Must be called before ``parseString`` when the input grammar contains elements that + match ```` characters. """ self.keepTabs = True return self @@ -2081,11 +2255,12 @@ def ignore( self, other ): Define expression to be ignored (e.g., comments) while doing pattern matching; may be called repeatedly, to define multiple comment or other ignorable patterns. - + Example:: + patt = OneOrMore(Word(alphas)) patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj'] - + patt.ignore(cStyleComment) patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd'] """ @@ -2112,19 +2287,21 @@ def setDebugActions( self, startAction, successAction, exceptionAction ): def setDebug( self, flag=True ): """ Enable display of debugging messages while doing pattern matching. - Set C{flag} to True to enable, False to disable. + Set ``flag`` to True to enable, False to disable. Example:: + wd = Word(alphas).setName("alphaword") integer = Word(nums).setName("numword") term = wd | integer - + # turn on debugging for wd wd.setDebug() OneOrMore(term).parseString("abc 123 xyz 890") - + prints:: + Match alphaword at loc 0(1,1) Matched alphaword -> ['abc'] Match alphaword at loc 3(1,4) @@ -2137,12 +2314,12 @@ def setDebug( self, flag=True ): Exception raised:Expected alphaword (at char 15), (line:1, col:16) The output shown is that produced by the default debug actions - custom debug actions can be - specified using L{setDebugActions}. Prior to attempting - to match the C{wd} expression, the debugging message C{"Match at loc (,)"} - is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"} - message is shown. Also note the use of L{setName} to assign a human-readable name to the expression, + specified using :class:`setDebugActions`. Prior to attempting + to match the ``wd`` expression, the debugging message ``"Match at loc (,)"`` + is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"`` + message is shown. Also note the use of :class:`setName` to assign a human-readable name to the expression, which makes debugging and exception messages easier to understand - for instance, the default - name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}. + name created for the :class:`Word` expression without calling ``setName`` is ``"W:(ABCD...)"``. """ if flag: self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction ) @@ -2212,14 +2389,15 @@ def __rne__(self,other): def matches(self, testString, parseAll=True): """ - Method for quick testing of a parser against a test string. Good for simple + Method for quick testing of a parser against a test string. Good for simple inline microtests of sub expressions while building up larger parser. - + Parameters: - testString - to test against this expression for a match - - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests - + - parseAll - (default= ``True``) - flag to pass to :class:`parseString` when running tests + Example:: + expr = Word(nums) assert expr.matches("100") """ @@ -2228,28 +2406,32 @@ def matches(self, testString, parseAll=True): return True except ParseBaseException: return False - - def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False): + + def runTests(self, tests, parseAll=True, comment='#', + fullDump=True, printResults=True, failureTests=False, postParse=None): """ Execute the parse expression on a series of test strings, showing each test, the parsed results or where the parse failed. Quick and easy way to run a parse expression against a list of sample strings. - + Parameters: - tests - a list of separate test strings, or a multiline string of test strings - - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests - - comment - (default=C{'#'}) - expression for indicating embedded comments in the test + - parseAll - (default= ``True``) - flag to pass to :class:`parseString` when running tests + - comment - (default= ``'#'``) - expression for indicating embedded comments in the test string; pass None to disable comment filtering - - fullDump - (default=C{True}) - dump results as list followed by results names in nested outline; + - fullDump - (default= ``True``) - dump results as list followed by results names in nested outline; if False, only dump nested list - - printResults - (default=C{True}) prints test output to stdout - - failureTests - (default=C{False}) indicates if these tests are expected to fail parsing + - printResults - (default= ``True``) prints test output to stdout + - failureTests - (default= ``False``) indicates if these tests are expected to fail parsing + - postParse - (default= ``None``) optional callback for successful parse results; called as + `fn(test_string, parse_results)` and returns a string to be added to the test output Returns: a (success, results) tuple, where success indicates that all tests succeeded - (or failed if C{failureTests} is True), and the results contain a list of lines of each + (or failed if ``failureTests`` is True), and the results contain a list of lines of each test's output - + Example:: + number_expr = pyparsing_common.number.copy() result = number_expr.runTests(''' @@ -2273,7 +2455,9 @@ def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResult 3.14.159 ''', failureTests=True) print("Success" if result[0] else "Failed!") + prints:: + # unsigned integer 100 [100] @@ -2291,7 +2475,7 @@ def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResult [1e-12] Success - + # stray character 100Z ^ @@ -2313,7 +2497,7 @@ def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResult lines, create a test like this:: expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines") - + (Note that this is a raw string literal, you must include the leading 'r'.) """ if isinstance(tests, basestring): @@ -2332,10 +2516,18 @@ def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResult out = ['\n'.join(comments), t] comments = [] try: - t = t.replace(r'\n','\n') + # convert newline marks to actual newlines, and strip leading BOM if present + t = t.replace(r'\n','\n').lstrip('\ufeff') result = self.parseString(t, parseAll=parseAll) out.append(result.dump(full=fullDump)) success = success and not failureTests + if postParse is not None: + try: + pp_value = postParse(t, result) + if pp_value is not None: + out.append(str(pp_value)) + except Exception as e: + out.append("{0} failed: {1}: {2}".format(postParse.__name__, type(e).__name__, e)) except ParseBaseException as pe: fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else "" if '\n' in t: @@ -2357,21 +2549,20 @@ def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResult print('\n'.join(out)) allResults.append((t, result)) - + return success, allResults - + class Token(ParserElement): - """ - Abstract C{ParserElement} subclass, for defining atomic matching patterns. + """Abstract :class:`ParserElement` subclass, for defining atomic + matching patterns. """ def __init__( self ): super(Token,self).__init__( savelist=False ) class Empty(Token): - """ - An empty token, will always match. + """An empty token, will always match. """ def __init__( self ): super(Empty,self).__init__() @@ -2381,8 +2572,7 @@ def __init__( self ): class NoMatch(Token): - """ - A token that will never match. + """A token that will never match. """ def __init__( self ): super(NoMatch,self).__init__() @@ -2396,18 +2586,18 @@ def parseImpl( self, instring, loc, doActions=True ): class Literal(Token): - """ - Token to exactly match a specified string. - + """Token to exactly match a specified string. + Example:: + Literal('blah').parseString('blah') # -> ['blah'] Literal('blah').parseString('blahfooblah') # -> ['blah'] Literal('blah').parseString('bla') # -> Exception: Expected "blah" - - For case-insensitive matching, use L{CaselessLiteral}. - + + For case-insensitive matching, use :class:`CaselessLiteral`. + For keyword matching (force word break before and after the matched string), - use L{Keyword} or L{CaselessKeyword}. + use :class:`Keyword` or :class:`CaselessKeyword`. """ def __init__( self, matchString ): super(Literal,self).__init__() @@ -2437,21 +2627,29 @@ def parseImpl( self, instring, loc, doActions=True ): ParserElement._literalStringClass = Literal class Keyword(Token): - """ - Token to exactly match a specified string as a keyword, that is, it must be - immediately followed by a non-keyword character. Compare with C{L{Literal}}: - - C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}. - - C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'} - Accepts two optional constructor arguments in addition to the keyword string: - - C{identChars} is a string of characters that would be valid identifier characters, - defaulting to all alphanumerics + "_" and "$" - - C{caseless} allows case-insensitive matching, default is C{False}. - + """Token to exactly match a specified string as a keyword, that is, + it must be immediately followed by a non-keyword character. Compare + with :class:`Literal`: + + - ``Literal("if")`` will match the leading ``'if'`` in + ``'ifAndOnlyIf'``. + - ``Keyword("if")`` will not; it will only match the leading + ``'if'`` in ``'if x=1'``, or ``'if(y==2)'`` + + Accepts two optional constructor arguments in addition to the + keyword string: + + - ``identChars`` is a string of characters that would be valid + identifier characters, defaulting to all alphanumerics + "_" and + "$" + - ``caseless`` allows case-insensitive matching, default is ``False``. + Example:: + Keyword("start").parseString("start") # -> ['start'] Keyword("start").parseString("starting") # -> Exception - For case-insensitive matching, use L{CaselessKeyword}. + For case-insensitive matching, use :class:`CaselessKeyword`. """ DEFAULT_KEYWORD_CHARS = alphanums+"_$" @@ -2502,15 +2700,15 @@ def setDefaultKeywordChars( chars ): Keyword.DEFAULT_KEYWORD_CHARS = chars class CaselessLiteral(Literal): - """ - Token to match a specified string, ignoring case of letters. + """Token to match a specified string, ignoring case of letters. Note: the matched results will always be in the case of the given match string, NOT the case of the input text. Example:: + OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD'] - - (Contrast with example for L{CaselessKeyword}.) + + (Contrast with example for :class:`CaselessKeyword`.) """ def __init__( self, matchString ): super(CaselessLiteral,self).__init__( matchString.upper() ) @@ -2526,36 +2724,39 @@ def parseImpl( self, instring, loc, doActions=True ): class CaselessKeyword(Keyword): """ - Caseless version of L{Keyword}. + Caseless version of :class:`Keyword`. Example:: + OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD'] - - (Contrast with example for L{CaselessLiteral}.) + + (Contrast with example for :class:`CaselessLiteral`.) """ def __init__( self, matchString, identChars=None ): super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True ) - def parseImpl( self, instring, loc, doActions=True ): - if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and - (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ): - return loc+self.matchLen, self.match - raise ParseException(instring, loc, self.errmsg, self) - class CloseMatch(Token): - """ - A variation on L{Literal} which matches "close" matches, that is, - strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters: - - C{match_string} - string to be matched - - C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match - - The results from a successful parse will contain the matched text from the input string and the following named results: - - C{mismatches} - a list of the positions within the match_string where mismatches were found - - C{original} - the original match_string used to compare against the input string - - If C{mismatches} is an empty list, then the match was an exact match. - + """A variation on :class:`Literal` which matches "close" matches, + that is, strings with at most 'n' mismatching characters. + :class:`CloseMatch` takes parameters: + + - ``match_string`` - string to be matched + - ``maxMismatches`` - (``default=1``) maximum number of + mismatches allowed to count as a match + + The results from a successful parse will contain the matched text + from the input string and the following named results: + + - ``mismatches`` - a list of the positions within the + match_string where mismatches were found + - ``original`` - the original match_string used to compare + against the input string + + If ``mismatches`` is an empty list, then the match was an exact + match. + Example:: + patt = CloseMatch("ATCATCGAATGGA") patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']}) patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1) @@ -2604,49 +2805,55 @@ def parseImpl( self, instring, loc, doActions=True ): class Word(Token): - """ - Token for matching words composed of allowed character sets. - Defined with string containing all allowed initial characters, - an optional string containing allowed body characters (if omitted, + """Token for matching words composed of allowed character sets. + Defined with string containing all allowed initial characters, an + optional string containing allowed body characters (if omitted, defaults to the initial character set), and an optional minimum, - maximum, and/or exact length. The default value for C{min} is 1 (a - minimum value < 1 is not valid); the default values for C{max} and C{exact} - are 0, meaning no maximum or exact length restriction. An optional - C{excludeChars} parameter can list characters that might be found in - the input C{bodyChars} string; useful to define a word of all printables - except for one or two characters, for instance. - - L{srange} is useful for defining custom character set strings for defining - C{Word} expressions, using range notation from regular expression character sets. - - A common mistake is to use C{Word} to match a specific literal string, as in - C{Word("Address")}. Remember that C{Word} uses the string argument to define - I{sets} of matchable characters. This expression would match "Add", "AAA", - "dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'. - To match an exact literal string, use L{Literal} or L{Keyword}. + maximum, and/or exact length. The default value for ``min`` is + 1 (a minimum value < 1 is not valid); the default values for + ``max`` and ``exact`` are 0, meaning no maximum or exact + length restriction. An optional ``excludeChars`` parameter can + list characters that might be found in the input ``bodyChars`` + string; useful to define a word of all printables except for one or + two characters, for instance. + + :class:`srange` is useful for defining custom character set strings + for defining ``Word`` expressions, using range notation from + regular expression character sets. + + A common mistake is to use :class:`Word` to match a specific literal + string, as in ``Word("Address")``. Remember that :class:`Word` + uses the string argument to define *sets* of matchable characters. + This expression would match "Add", "AAA", "dAred", or any other word + made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an + exact literal string, use :class:`Literal` or :class:`Keyword`. pyparsing includes helper strings for building Words: - - L{alphas} - - L{nums} - - L{alphanums} - - L{hexnums} - - L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.) - - L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.) - - L{printables} (any non-whitespace character) + + - :class:`alphas` + - :class:`nums` + - :class:`alphanums` + - :class:`hexnums` + - :class:`alphas8bit` (alphabetic characters in ASCII range 128-255 + - accented, tilded, umlauted, etc.) + - :class:`punc8bit` (non-alphabetic characters in ASCII range + 128-255 - currency, symbols, superscripts, diacriticals, etc.) + - :class:`printables` (any non-whitespace character) Example:: + # a word composed of digits integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9")) - + # a word with a leading capital, and zero or more lowercase capital_word = Word(alphas.upper(), alphas.lower()) # hostnames are alphanumeric, with leading alpha, and '-' hostname = Word(alphas, alphanums+'-') - + # roman numeral (not a strict parser, accepts invalid mix of characters) roman = Word("IVXLCDM") - + # any string of non-whitespace characters, except for ',' csv_value = Word(printables, excludeChars=",") """ @@ -2762,22 +2969,38 @@ def charsAsStr(s): return self.strRepr +class Char(Word): + """A short-cut class for defining ``Word(characters, exact=1)``, + when defining a match of any single character in a string of + characters. + """ + def __init__(self, charset): + super(Char, self).__init__(charset, exact=1) + self.reString = "[%s]" % _escapeRegexRangeChars(self.initCharsOrig) + self.re = re.compile( self.reString ) + + class Regex(Token): - r""" - Token for matching strings that match a given regular expression. - Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module. - If the given regex contains named groups (defined using C{(?P...)}), these will be preserved as - named parse results. + r"""Token for matching strings that match a given regular + expression. Defined with string specifying the regular expression in + a form recognized by the stdlib Python `re module `_. + If the given regex contains named groups (defined using ``(?P...)``), + these will be preserved as named parse results. Example:: + realnum = Regex(r"[+-]?\d+\.\d*") date = Regex(r'(?P\d{4})-(?P\d\d?)-(?P\d\d?)') - # ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression - roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") + # ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression + roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") """ compiledREtype = type(re.compile("[A-Z]")) def __init__( self, pattern, flags=0, asGroupList=False, asMatch=False): - """The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags.""" + """The parameters ``pattern`` and ``flags`` are passed + to the ``re.compile()`` function as-is. See the Python + `re module `_ module for an + explanation of the acceptable patterns and flags. + """ super(Regex,self).__init__() if isinstance(pattern, basestring): @@ -2801,7 +3024,7 @@ def __init__( self, pattern, flags=0, asGroupList=False, asMatch=False): self.pattern = \ self.reString = str(pattern) self.flags = flags - + else: raise ValueError("Regex may only be constructed with a string or a compiled RE object") @@ -2818,16 +3041,16 @@ def parseImpl( self, instring, loc, doActions=True ): raise ParseException(instring, loc, self.errmsg, self) loc = result.end() - d = result.groupdict() if self.asMatch: ret = result elif self.asGroupList: ret = result.groups() else: ret = ParseResults(result.group()) + d = result.groupdict() if d: - for k in d: - ret[k] = d[k] + for k, v in d.items(): + ret[k] = v return loc,ret def __str__( self ): @@ -2844,17 +3067,23 @@ def __str__( self ): def sub(self, repl): """ Return Regex with an attached parse action to transform the parsed - result as if called using C{re.sub(expr, repl, string)}. + result as if called using `re.sub(expr, repl, string) `_. + + Example:: + + make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2") + print(make_html.transformString("h1:main title:")) + # prints "

main title

" """ if self.asGroupList: - warnings.warn("cannot use sub() with Regex(asGroupList=True)", + warnings.warn("cannot use sub() with Regex(asGroupList=True)", SyntaxWarning, stacklevel=2) raise SyntaxError() if self.asMatch and callable(repl): - warnings.warn("cannot use sub() with a callable with Regex(asMatch=True)", + warnings.warn("cannot use sub() with a callable with Regex(asMatch=True)", SyntaxWarning, stacklevel=2) - raise SyntaxError() + raise SyntaxError() if self.asMatch: def pa(tokens): @@ -2867,24 +3096,38 @@ def pa(tokens): class QuotedString(Token): r""" Token for matching strings that are delimited by quoting characters. - + Defined with the following parameters: - - quoteChar - string of one or more characters defining the quote delimiting string - - escChar - character to escape quotes, typically backslash (default=C{None}) - - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None}) - - multiline - boolean indicating whether quotes can span multiple lines (default=C{False}) - - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True}) - - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar) - - convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True}) + + - quoteChar - string of one or more characters defining the + quote delimiting string + - escChar - character to escape quotes, typically backslash + (default= ``None`` ) + - escQuote - special quote sequence to escape an embedded quote + string (such as SQL's ``""`` to escape an embedded ``"``) + (default= ``None`` ) + - multiline - boolean indicating whether quotes can span + multiple lines (default= ``False`` ) + - unquoteResults - boolean indicating whether the matched text + should be unquoted (default= ``True`` ) + - endQuoteChar - string of one or more characters defining the + end of the quote delimited string (default= ``None`` => same as + quoteChar) + - convertWhitespaceEscapes - convert escaped whitespace + (``'\t'``, ``'\n'``, etc.) to actual whitespace + (default= ``True`` ) Example:: + qs = QuotedString('"') print(qs.searchString('lsjdf "This is the quote" sldjf')) complex_qs = QuotedString('{{', endQuoteChar='}}') print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf')) sql_qs = QuotedString('"', escQuote='""') print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf')) + prints:: + [['This is the quote']] [['This is the "quote"']] [['This is the quote with "embedded" quotes']] @@ -3002,19 +3245,23 @@ def __str__( self ): class CharsNotIn(Token): - """ - Token for matching words composed of characters I{not} in a given set (will - include whitespace in matched characters if not listed in the provided exclusion set - see example). - Defined with string containing all disallowed characters, and an optional - minimum, maximum, and/or exact length. The default value for C{min} is 1 (a - minimum value < 1 is not valid); the default values for C{max} and C{exact} - are 0, meaning no maximum or exact length restriction. + """Token for matching words composed of characters *not* in a given + set (will include whitespace in matched characters if not listed in + the provided exclusion set - see example). Defined with string + containing all disallowed characters, and an optional minimum, + maximum, and/or exact length. The default value for ``min`` is + 1 (a minimum value < 1 is not valid); the default values for + ``max`` and ``exact`` are 0, meaning no maximum or exact + length restriction. Example:: + # define a comma-separated-value as anything that is not a ',' csv_value = CharsNotIn(',') print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213")) + prints:: + ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] """ def __init__( self, notChars, min=1, max=0, exact=0 ): @@ -3023,7 +3270,9 @@ def __init__( self, notChars, min=1, max=0, exact=0 ): self.notChars = notChars if min < 1: - raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted") + raise ValueError( + "cannot specify a minimum length < 1; use " + + "Optional(CharsNotIn()) if zero-length char group is permitted") self.minLen = min @@ -3073,19 +3322,38 @@ def __str__( self ): return self.strRepr class White(Token): - """ - Special matching class for matching whitespace. Normally, whitespace is ignored - by pyparsing grammars. This class is included when some whitespace structures - are significant. Define with a string containing the whitespace characters to be - matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments, - as defined for the C{L{Word}} class. + """Special matching class for matching whitespace. Normally, + whitespace is ignored by pyparsing grammars. This class is included + when some whitespace structures are significant. Define with + a string containing the whitespace characters to be matched; default + is ``" \\t\\r\\n"``. Also takes optional ``min``, + ``max``, and ``exact`` arguments, as defined for the + :class:`Word` class. """ whiteStrs = { - " " : "", - "\t": "", - "\n": "", - "\r": "", - "\f": "", + ' ' : '', + '\t': '', + '\n': '', + '\r': '', + '\f': '', + 'u\00A0': '', + 'u\1680': '', + 'u\180E': '', + 'u\2000': '', + 'u\2001': '', + 'u\2002': '', + 'u\2003': '', + 'u\2004': '', + 'u\2005': '', + 'u\2006': '', + 'u\2007': '', + 'u\2008': '', + 'u\2009': '', + 'u\200A': '', + 'u\200B': '', + 'u\202F': '', + 'u\205F': '', + 'u\3000': '', } def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0): super(White,self).__init__() @@ -3131,8 +3399,8 @@ def __init__( self ): self.mayIndexError = False class GoToColumn(_PositionToken): - """ - Token to advance to a specific column of input text; useful for tabular report scraping. + """Token to advance to a specific column of input text; useful for + tabular report scraping. """ def __init__( self, colno ): super(GoToColumn,self).__init__() @@ -3157,11 +3425,11 @@ def parseImpl( self, instring, loc, doActions=True ): class LineStart(_PositionToken): - """ - Matches if current position is at the beginning of a line within the parse string - + """Matches if current position is at the beginning of a line within + the parse string + Example:: - + test = '''\ AAA this line AAA and this line @@ -3171,10 +3439,11 @@ class LineStart(_PositionToken): for t in (LineStart() + 'AAA' + restOfLine).searchString(test): print(t) - - Prints:: + + prints:: + ['AAA', ' this line'] - ['AAA', ' and this line'] + ['AAA', ' and this line'] """ def __init__( self ): @@ -3187,8 +3456,8 @@ def parseImpl( self, instring, loc, doActions=True ): raise ParseException(instring, loc, self.errmsg, self) class LineEnd(_PositionToken): - """ - Matches if current position is at the end of a line within the parse string + """Matches if current position is at the end of a line within the + parse string """ def __init__( self ): super(LineEnd,self).__init__() @@ -3207,8 +3476,8 @@ def parseImpl( self, instring, loc, doActions=True ): raise ParseException(instring, loc, self.errmsg, self) class StringStart(_PositionToken): - """ - Matches if current position is at the beginning of the parse string + """Matches if current position is at the beginning of the parse + string """ def __init__( self ): super(StringStart,self).__init__() @@ -3222,8 +3491,7 @@ def parseImpl( self, instring, loc, doActions=True ): return loc, [] class StringEnd(_PositionToken): - """ - Matches if current position is at the end of the parse string + """Matches if current position is at the end of the parse string """ def __init__( self ): super(StringEnd,self).__init__() @@ -3240,12 +3508,13 @@ def parseImpl( self, instring, loc, doActions=True ): raise ParseException(instring, loc, self.errmsg, self) class WordStart(_PositionToken): - """ - Matches if the current position is at the beginning of a Word, and - is not preceded by any character in a given set of C{wordChars} - (default=C{printables}). To emulate the C{\b} behavior of regular expressions, - use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of - the string being parsed, or at the beginning of a line. + """Matches if the current position is at the beginning of a Word, + and is not preceded by any character in a given set of + ``wordChars`` (default= ``printables``). To emulate the + ``\b`` behavior of regular expressions, use + ``WordStart(alphanums)``. ``WordStart`` will also match at + the beginning of the string being parsed, or at the beginning of + a line. """ def __init__(self, wordChars = printables): super(WordStart,self).__init__() @@ -3260,12 +3529,12 @@ def parseImpl(self, instring, loc, doActions=True ): return loc, [] class WordEnd(_PositionToken): - """ - Matches if the current position is at the end of a Word, and - is not followed by any character in a given set of C{wordChars} - (default=C{printables}). To emulate the C{\b} behavior of regular expressions, - use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of - the string being parsed, or at the end of a line. + """Matches if the current position is at the end of a Word, and is + not followed by any character in a given set of ``wordChars`` + (default= ``printables``). To emulate the ``\b`` behavior of + regular expressions, use ``WordEnd(alphanums)``. ``WordEnd`` + will also match at the end of the string being parsed, or at the end + of a line. """ def __init__(self, wordChars = printables): super(WordEnd,self).__init__() @@ -3283,8 +3552,8 @@ def parseImpl(self, instring, loc, doActions=True ): class ParseExpression(ParserElement): - """ - Abstract subclass of ParserElement, for combining and post-processing parsed tokens. + """Abstract subclass of ParserElement, for combining and + post-processing parsed tokens. """ def __init__( self, exprs, savelist = False ): super(ParseExpression,self).__init__(savelist) @@ -3315,7 +3584,7 @@ def append( self, other ): return self def leaveWhitespace( self ): - """Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on + """Extends ``leaveWhitespace`` defined in base class, and also invokes ``leaveWhitespace`` on all contained expressions.""" self.skipWhitespace = False self.exprs = [ e.copy() for e in self.exprs ] @@ -3376,7 +3645,7 @@ def streamline( self ): self.mayIndexError |= other.mayIndexError self.errmsg = "Expected " + _ustr(self) - + return self def setResultsName( self, name, listAllMatches=False ): @@ -3388,7 +3657,7 @@ def validate( self, validateTrace=[] ): for e in self.exprs: e.validate(tmp) self.checkRecursion( [] ) - + def copy(self): ret = super(ParseExpression,self).copy() ret.exprs = [e.copy() for e in self.exprs] @@ -3396,12 +3665,14 @@ def copy(self): class And(ParseExpression): """ - Requires all given C{ParseExpression}s to be found in the given order. + Requires all given :class:`ParseExpression` s to be found in the given order. Expressions may be separated by whitespace. - May be constructed using the C{'+'} operator. - May also be constructed using the C{'-'} operator, which will suppress backtracking. + May be constructed using the ``'+'`` operator. + May also be constructed using the ``'-'`` operator, which will + suppress backtracking. Example:: + integer = Word(nums) name_expr = OneOrMore(Word(alphas)) @@ -3423,6 +3694,11 @@ def __init__( self, exprs, savelist = True ): self.skipWhitespace = self.exprs[0].skipWhitespace self.callPreparse = True + def streamline(self): + super(And, self).streamline() + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + return self + def parseImpl( self, instring, loc, doActions=True ): # pass False as last arg to _parse for first element, since we already # pre-parsed the string as part of our And pre-parsing @@ -3471,17 +3747,20 @@ def __str__( self ): class Or(ParseExpression): - """ - Requires that at least one C{ParseExpression} is found. - If two expressions match, the expression that matches the longest string will be used. - May be constructed using the C{'^'} operator. + """Requires that at least one :class:`ParseExpression` is found. If + two expressions match, the expression that matches the longest + string will be used. May be constructed using the ``'^'`` + operator. Example:: + # construct Or using '^' operator - + number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums)) print(number.searchString("123 3.1416 789")) + prints:: + [['123'], ['3.1416'], ['789']] """ def __init__( self, exprs, savelist = False ): @@ -3491,6 +3770,11 @@ def __init__( self, exprs, savelist = False ): else: self.mayReturnEmpty = True + def streamline(self): + super(Or, self).streamline() + self.saveAsList = any(e.saveAsList for e in self.exprs) + return self + def parseImpl( self, instring, loc, doActions=True ): maxExcLoc = -1 maxException = None @@ -3550,14 +3834,14 @@ def checkRecursion( self, parseElementList ): class MatchFirst(ParseExpression): - """ - Requires that at least one C{ParseExpression} is found. - If two expressions match, the first one listed is the one that will match. - May be constructed using the C{'|'} operator. + """Requires that at least one :class:`ParseExpression` is found. If + two expressions match, the first one listed is the one that will + match. May be constructed using the ``'|'`` operator. Example:: + # construct MatchFirst using '|' operator - + # watch the order of expressions to match number = Word(nums) | Combine(Word(nums) + '.' + Word(nums)) print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']] @@ -3570,9 +3854,15 @@ def __init__( self, exprs, savelist = False ): super(MatchFirst,self).__init__(exprs, savelist) if self.exprs: self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) + # self.saveAsList = any(e.saveAsList for e in self.exprs) else: self.mayReturnEmpty = True + def streamline(self): + super(MatchFirst, self).streamline() + self.saveAsList = any(e.saveAsList for e in self.exprs) + return self + def parseImpl( self, instring, loc, doActions=True ): maxExcLoc = -1 maxException = None @@ -3618,12 +3908,13 @@ def checkRecursion( self, parseElementList ): class Each(ParseExpression): - """ - Requires all given C{ParseExpression}s to be found, but in any order. - Expressions may be separated by whitespace. - May be constructed using the C{'&'} operator. + """Requires all given :class:`ParseExpression` s to be found, but in + any order. Expressions may be separated by whitespace. + + May be constructed using the ``'&'`` operator. Example:: + color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN") shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON") integer = Word(nums) @@ -3632,7 +3923,7 @@ class Each(ParseExpression): color_attr = "color:" + color("color") size_attr = "size:" + integer("size") - # use Each (using operator '&') to accept attributes in any order + # use Each (using operator '&') to accept attributes in any order # (shape and posn are required, color and size are optional) shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr) @@ -3642,7 +3933,9 @@ class Each(ParseExpression): color:GREEN size:20 shape:TRIANGLE posn:20,40 ''' ) + prints:: + shape: SQUARE color: BLACK posn: 100, 120 ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']] - color: BLACK @@ -3676,6 +3969,12 @@ def __init__( self, exprs, savelist = True ): self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) self.skipWhitespace = True self.initExprGroups = True + self.saveAsList = True + + def streamline(self): + super(Each, self).streamline() + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + return self def parseImpl( self, instring, loc, doActions=True ): if self.initExprGroups: @@ -3742,8 +4041,8 @@ def checkRecursion( self, parseElementList ): class ParseElementEnhance(ParserElement): - """ - Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens. + """Abstract subclass of :class:`ParserElement`, for combining and + post-processing parsed tokens. """ def __init__( self, expr, savelist=False ): super(ParseElementEnhance,self).__init__(savelist) @@ -3819,20 +4118,25 @@ def __str__( self ): class FollowedBy(ParseElementEnhance): - """ - Lookahead matching of the given parse expression. C{FollowedBy} - does I{not} advance the parsing position within the input string, it only - verifies that the specified parse expression matches at the current - position. C{FollowedBy} always returns a null token list. + """Lookahead matching of the given parse expression. + ``FollowedBy`` does *not* advance the parsing position within + the input string, it only verifies that the specified parse + expression matches at the current position. ``FollowedBy`` + always returns a null token list. If any results names are defined + in the lookahead expression, those *will* be returned for access by + name. Example:: + # use FollowedBy to match a label only if it is followed by a ':' data_word = Word(alphas) label = data_word + FollowedBy(':') attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) - + OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint() + prints:: + [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] """ def __init__( self, expr ): @@ -3840,20 +4144,108 @@ def __init__( self, expr ): self.mayReturnEmpty = True def parseImpl( self, instring, loc, doActions=True ): - self.expr.tryParse( instring, loc ) - return loc, [] + _, ret = self.expr._parse(instring, loc, doActions=doActions) + del ret[:] + return loc, ret -class NotAny(ParseElementEnhance): +class PrecededBy(ParseElementEnhance): + """Lookbehind matching of the given parse expression. + ``PrecededBy`` does not advance the parsing position within the + input string, it only verifies that the specified parse expression + matches prior to the current position. ``PrecededBy`` always + returns a null token list, but if a results name is defined on the + given expression, it is returned. + + Parameters: + + - expr - expression that must match prior to the current parse + location + - retreat - (default= ``None``) - (int) maximum number of characters + to lookbehind prior to the current parse location + + If the lookbehind expression is a string, Literal, Keyword, or + a Word or CharsNotIn with a specified exact or maximum length, then + the retreat parameter is not required. Otherwise, retreat must be + specified to give a maximum number of characters to look back from + the current parse position for a lookbehind match. + + Example:: + + # VB-style variable names with type prefixes + int_var = PrecededBy("#") + pyparsing_common.identifier + str_var = PrecededBy("$") + pyparsing_common.identifier + """ - Lookahead to disallow matching with the given parse expression. C{NotAny} - does I{not} advance the parsing position within the input string, it only - verifies that the specified parse expression does I{not} match at the current - position. Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny} - always returns a null token list. May be constructed using the '~' operator. + def __init__(self, expr, retreat=None): + super(PrecededBy, self).__init__(expr) + self.expr = self.expr().leaveWhitespace() + self.mayReturnEmpty = True + self.mayIndexError = False + self.exact = False + if isinstance(expr, str): + retreat = len(expr) + self.exact = True + elif isinstance(expr, (Literal, Keyword)): + retreat = expr.matchLen + self.exact = True + elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT: + retreat = expr.maxLen + self.exact = True + elif isinstance(expr, _PositionToken): + retreat = 0 + self.exact = True + self.retreat = retreat + self.errmsg = "not preceded by " + str(expr) + self.skipWhitespace = False + + def parseImpl(self, instring, loc=0, doActions=True): + if self.exact: + if loc < self.retreat: + raise ParseException(instring, loc, self.errmsg) + start = loc - self.retreat + _, ret = self.expr._parse(instring, start) + else: + # retreat specified a maximum lookbehind window, iterate + test_expr = self.expr + StringEnd() + instring_slice = instring[:loc] + last_expr = ParseException(instring, loc, self.errmsg) + for offset in range(1, min(loc, self.retreat+1)): + try: + _, ret = test_expr._parse(instring_slice, loc-offset) + except ParseBaseException as pbe: + last_expr = pbe + else: + break + else: + raise last_expr + # return empty list of tokens, but preserve any defined results names + del ret[:] + return loc, ret + + +class NotAny(ParseElementEnhance): + """Lookahead to disallow matching with the given parse expression. + ``NotAny`` does *not* advance the parsing position within the + input string, it only verifies that the specified parse expression + does *not* match at the current position. Also, ``NotAny`` does + *not* skip over leading whitespace. ``NotAny`` always returns + a null token list. May be constructed using the '~' operator. Example:: - + + AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split()) + + # take care not to mistake keywords for identifiers + ident = ~(AND | OR | NOT) + Word(alphas) + boolean_term = Optional(NOT) + ident + + # very crude boolean expression - to support parenthesis groups and + # operation hierarchy, use infixNotation + boolean_expr = boolean_term + ZeroOrMore((AND | OR) + boolean_term) + + # integers that are followed by "." are actually floats + integer = Word(nums) + ~Char(".") """ def __init__( self, expr ): super(NotAny,self).__init__(expr) @@ -3891,7 +4283,7 @@ def parseImpl( self, instring, loc, doActions=True ): check_ender = self.not_ender is not None if check_ender: try_not_ender = self.not_ender.tryParse - + # must be at least one (but first see if we are the stopOn sentinel; # if so, fail) if check_ender: @@ -3913,18 +4305,18 @@ def parseImpl( self, instring, loc, doActions=True ): pass return loc, tokens - + class OneOrMore(_MultipleMatch): - """ - Repetition of one or more of the given expression. - + """Repetition of one or more of the given expression. + Parameters: - expr - expression that must match one or more times - - stopOn - (default=C{None}) - expression for a terminating sentinel - (only required if the sentinel would ordinarily match the repetition - expression) + - stopOn - (default= ``None``) - expression for a terminating sentinel + (only required if the sentinel would ordinarily match the repetition + expression) Example:: + data_word = Word(alphas) label = data_word + FollowedBy(':') attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) @@ -3935,7 +4327,7 @@ class OneOrMore(_MultipleMatch): # use stopOn attribute for OneOrMore to avoid reading label string as part of the data attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']] - + # could also be written as (attr_expr * (1,)).parseString(text).pprint() """ @@ -3950,21 +4342,20 @@ def __str__( self ): return self.strRepr class ZeroOrMore(_MultipleMatch): - """ - Optional repetition of zero or more of the given expression. - + """Optional repetition of zero or more of the given expression. + Parameters: - expr - expression that must match zero or more times - - stopOn - (default=C{None}) - expression for a terminating sentinel - (only required if the sentinel would ordinarily match the repetition - expression) + - stopOn - (default= ``None``) - expression for a terminating sentinel + (only required if the sentinel would ordinarily match the repetition + expression) - Example: similar to L{OneOrMore} + Example: similar to :class:`OneOrMore` """ def __init__( self, expr, stopOn=None): super(ZeroOrMore,self).__init__(expr, stopOn=stopOn) self.mayReturnEmpty = True - + def parseImpl( self, instring, loc, doActions=True ): try: return super(ZeroOrMore, self).parseImpl(instring, loc, doActions) @@ -3989,27 +4380,29 @@ def __str__(self): _optionalNotMatched = _NullToken() class Optional(ParseElementEnhance): - """ - Optional matching of the given expression. + """Optional matching of the given expression. Parameters: - expr - expression that must match zero or more times - default (optional) - value to be returned if the optional expression is not found. Example:: + # US postal code can be a 5-digit zip, plus optional 4-digit qualifier zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4))) zip.runTests(''' # traditional ZIP code 12345 - + # ZIP+4 form 12101-0001 - + # invalid ZIP 98765- ''') + prints:: + # traditional ZIP code 12345 ['12345'] @@ -4053,20 +4446,21 @@ def __str__( self ): return self.strRepr class SkipTo(ParseElementEnhance): - """ - Token for skipping over all undefined text until the matched expression is found. + """Token for skipping over all undefined text until the matched + expression is found. Parameters: - expr - target expression marking the end of the data to be skipped - - include - (default=C{False}) if True, the target expression is also parsed + - include - (default= ``False``) if True, the target expression is also parsed (the skipped text and target expression are returned as a 2-element list). - - ignore - (default=C{None}) used to define grammars (typically quoted strings and + - ignore - (default= ``None``) used to define grammars (typically quoted strings and comments) that might contain false matches to the target expression - - failOn - (default=C{None}) define expressions that are not allowed to be - included in the skipped test; if found before the target expression is found, + - failOn - (default= ``None``) define expressions that are not allowed to be + included in the skipped test; if found before the target expression is found, the SkipTo is not a match Example:: + report = ''' Outstanding Issues Report - 1 Jan 2000 @@ -4083,14 +4477,16 @@ class SkipTo(ParseElementEnhance): # - parse action will call token.strip() for each matched token, i.e., the description body string_data = SkipTo(SEP, ignore=quotedString) string_data.setParseAction(tokenMap(str.strip)) - ticket_expr = (integer("issue_num") + SEP - + string_data("sev") + SEP - + string_data("desc") + SEP + ticket_expr = (integer("issue_num") + SEP + + string_data("sev") + SEP + + string_data("desc") + SEP + integer("days_open")) - + for tkt in ticket_expr.searchString(report): print tkt.dump() + prints:: + ['101', 'Critical', 'Intermittent system crash', '6'] - days_open: 6 - desc: Intermittent system crash @@ -4127,14 +4523,14 @@ def parseImpl( self, instring, loc, doActions=True ): expr_parse = self.expr._parse self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None - + tmploc = loc while tmploc <= instrlen: if self_failOn_canParseNext is not None: # break if failOn expression matches if self_failOn_canParseNext(instring, tmploc): break - + if self_ignoreExpr_tryParse is not None: # advance past ignore expressions while 1: @@ -4142,7 +4538,7 @@ def parseImpl( self, instring, loc, doActions=True ): tmploc = self_ignoreExpr_tryParse(instring, tmploc) except ParseBaseException: break - + try: expr_parse(instring, tmploc, doActions=False, callPreParse=False) except (ParseException, IndexError): @@ -4160,7 +4556,7 @@ def parseImpl( self, instring, loc, doActions=True ): loc = tmploc skiptext = instring[startloc:loc] skipresult = ParseResults(skiptext) - + if self.includeMatch: loc, mat = expr_parse(instring,loc,doActions,callPreParse=False) skipresult += mat @@ -4168,23 +4564,31 @@ def parseImpl( self, instring, loc, doActions=True ): return loc, skipresult class Forward(ParseElementEnhance): - """ - Forward declaration of an expression to be defined later - + """Forward declaration of an expression to be defined later - used for recursive grammars, such as algebraic infix notation. - When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator. + When the expression is known, it is assigned to the ``Forward`` + variable using the '<<' operator. + + Note: take care when assigning to ``Forward`` not to overlook + precedence of operators. - Note: take care when assigning to C{Forward} not to overlook precedence of operators. Specifically, '|' has a lower precedence than '<<', so that:: + fwdExpr << a | b | c + will actually be evaluated as:: + (fwdExpr << a) | b | c + thereby leaving b and c out as parseable alternatives. It is recommended that you - explicitly group the values inserted into the C{Forward}:: + explicitly group the values inserted into the ``Forward``:: + fwdExpr << (a | b | c) + Converting to use the '<<=' operator instead will avoid this problem. - See L{ParseResults.pprint} for an example of a recursive parser created using - C{Forward}. + See :class:`ParseResults.pprint` for an example of a recursive + parser created using ``Forward``. """ def __init__( self, other=None ): super(Forward,self).__init__( other, savelist=False ) @@ -4201,10 +4605,10 @@ def __lshift__( self, other ): self.saveAsList = self.expr.saveAsList self.ignoreExprs.extend(self.expr.ignoreExprs) return self - + def __ilshift__(self, other): return self << other - + def leaveWhitespace( self ): self.skipWhitespace = False return self @@ -4254,19 +4658,20 @@ def __str__( self ): class TokenConverter(ParseElementEnhance): """ - Abstract subclass of C{ParseExpression}, for converting parsed results. + Abstract subclass of :class:`ParseExpression`, for converting parsed results. """ def __init__( self, expr, savelist=False ): super(TokenConverter,self).__init__( expr )#, savelist ) self.saveAsList = False class Combine(TokenConverter): - """ - Converter to concatenate all matching tokens to a single string. - By default, the matching patterns must also be contiguous in the input string; - this can be disabled by specifying C{'adjacent=False'} in the constructor. + """Converter to concatenate all matching tokens to a single string. + By default, the matching patterns must also be contiguous in the + input string; this can be disabled by specifying + ``'adjacent=False'`` in the constructor. Example:: + real = Word(nums) + '.' + Word(nums) print(real.parseString('3.1416')) # -> ['3', '.', '1416'] # will also erroneously match the following @@ -4305,10 +4710,11 @@ def postParse( self, instring, loc, tokenlist ): return retToks class Group(TokenConverter): - """ - Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions. + """Converter to return the matched tokens as a list - useful for + returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions. Example:: + ident = Word(alphas) num = Word(nums) term = ident | num @@ -4320,38 +4726,40 @@ class Group(TokenConverter): """ def __init__( self, expr ): super(Group,self).__init__( expr ) - self.saveAsList = True + self.saveAsList = expr.saveAsList def postParse( self, instring, loc, tokenlist ): return [ tokenlist ] class Dict(TokenConverter): - """ - Converter to return a repetitive expression as a list, but also as a dictionary. - Each element can also be referenced using the first token in the expression as its key. - Useful for tabular report scraping when the first column can be used as a item key. + """Converter to return a repetitive expression as a list, but also + as a dictionary. Each element can also be referenced using the first + token in the expression as its key. Useful for tabular report + scraping when the first column can be used as a item key. Example:: + data_word = Word(alphas) label = data_word + FollowedBy(':') attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) text = "shape: SQUARE posn: upper left color: light blue texture: burlap" attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) - + # print attributes as plain groups print(OneOrMore(attr_expr).parseString(text).dump()) - + # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names result = Dict(OneOrMore(Group(attr_expr))).parseString(text) print(result.dump()) - + # access named fields as dict entries, or output as dict - print(result['shape']) + print(result['shape']) print(result.asDict()) + prints:: - ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] + ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - color: light blue - posn: upper left @@ -4359,7 +4767,8 @@ class Dict(TokenConverter): - texture: burlap SQUARE {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'} - See more examples at L{ParseResults} of accessing fields by results name. + + See more examples at :class:`ParseResults` of accessing fields by results name. """ def __init__( self, expr ): super(Dict,self).__init__( expr ) @@ -4391,10 +4800,10 @@ def postParse( self, instring, loc, tokenlist ): class Suppress(TokenConverter): - """ - Converter for ignoring the results of a parsed expression. + """Converter for ignoring the results of a parsed expression. Example:: + source = "a, b, c,d" wd = Word(alphas) wd_list1 = wd + ZeroOrMore(',' + wd) @@ -4404,10 +4813,13 @@ class Suppress(TokenConverter): # way afterward - use Suppress to keep them out of the parsed output wd_list2 = wd + ZeroOrMore(Suppress(',') + wd) print(wd_list2.parseString(source)) + prints:: + ['a', ',', 'b', ',', 'c', ',', 'd'] ['a', 'b', 'c', 'd'] - (See also L{delimitedList}.) + + (See also :class:`delimitedList`.) """ def postParse( self, instring, loc, tokenlist ): return [] @@ -4417,8 +4829,7 @@ def suppress( self ): class OnlyOnce(object): - """ - Wrapper for parse actions, to ensure they are only called once. + """Wrapper for parse actions, to ensure they are only called once. """ def __init__(self, methodCall): self.callable = _trim_arity(methodCall) @@ -4433,13 +4844,15 @@ def reset(self): self.called = False def traceParseAction(f): - """ - Decorator for debugging parse actions. - - When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".} - When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised. + """Decorator for debugging parse actions. + + When the parse action is called, this decorator will print + ``">> entering method-name(line:, , )"``. + When the parse action completes, the decorator will print + ``"<<"`` followed by the returned value, or any exception that the parse action raised. Example:: + wd = Word(alphas) @traceParseAction @@ -4448,7 +4861,9 @@ def remove_duplicate_chars(tokens): wds = OneOrMore(wd).setParseAction(remove_duplicate_chars) print(wds.parseString("slkdjs sld sldd sdlf sdljf")) + prints:: + >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) < ['aa', 'bb', 'cc'] delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] """ @@ -4496,16 +4913,21 @@ def delimitedList( expr, delim=",", combine=False ): return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName) def countedArray( expr, intExpr=None ): - """ - Helper to define a counted list of expressions. + """Helper to define a counted list of expressions. + This helper defines a pattern of the form:: + integer expr expr expr... + where the leading integer tells how many expr expressions follow. - The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed. - - If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value. + The matched tokens returns the array of expr tokens as a list - the + leading count token is suppressed. + + If ``intExpr`` is specified, it should be a pyparsing expression + that produces an integer value. Example:: + countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd'] # in this parser, the leading integer value is given in binary, @@ -4536,17 +4958,19 @@ def _flatten(L): return ret def matchPreviousLiteral(expr): - """ - Helper to define an expression that is indirectly defined from - the tokens matched in a previous expression, that is, it looks - for a 'repeat' of a previous expression. For example:: + """Helper to define an expression that is indirectly defined from + the tokens matched in a previous expression, that is, it looks for + a 'repeat' of a previous expression. For example:: + first = Word(nums) second = matchPreviousLiteral(first) matchExpr = first + ":" + second - will match C{"1:1"}, but not C{"1:2"}. Because this matches a - previous literal, will also match the leading C{"1:1"} in C{"1:10"}. - If this is not desired, use C{matchPreviousExpr}. - Do I{not} use with packrat parsing enabled. + + will match ``"1:1"``, but not ``"1:2"``. Because this + matches a previous literal, will also match the leading + ``"1:1"`` in ``"1:10"``. If this is not desired, use + :class:`matchPreviousExpr`. Do *not* use with packrat parsing + enabled. """ rep = Forward() def copyTokenToRepeater(s,l,t): @@ -4564,18 +4988,19 @@ def copyTokenToRepeater(s,l,t): return rep def matchPreviousExpr(expr): - """ - Helper to define an expression that is indirectly defined from - the tokens matched in a previous expression, that is, it looks - for a 'repeat' of a previous expression. For example:: + """Helper to define an expression that is indirectly defined from + the tokens matched in a previous expression, that is, it looks for + a 'repeat' of a previous expression. For example:: + first = Word(nums) second = matchPreviousExpr(first) matchExpr = first + ":" + second - will match C{"1:1"}, but not C{"1:2"}. Because this matches by - expressions, will I{not} match the leading C{"1:1"} in C{"1:10"}; - the expressions are evaluated first, and then compared, so - C{"1"} is compared with C{"10"}. - Do I{not} use with packrat parsing enabled. + + will match ``"1:1"``, but not ``"1:2"``. Because this + matches by expressions, will *not* match the leading ``"1:1"`` + in ``"1:10"``; the expressions are evaluated first, and then + compared, so ``"1"`` is compared with ``"10"``. Do *not* use + with packrat parsing enabled. """ rep = Forward() e2 = expr.copy() @@ -4600,26 +5025,33 @@ def _escapeRegexRangeChars(s): return _ustr(s) def oneOf( strs, caseless=False, useRegex=True ): - """ - Helper to quickly define a set of alternative Literals, and makes sure to do - longest-first testing when there is a conflict, regardless of the input order, - but returns a C{L{MatchFirst}} for best performance. + """Helper to quickly define a set of alternative Literals, and makes + sure to do longest-first testing when there is a conflict, + regardless of the input order, but returns + a :class:`MatchFirst` for best performance. Parameters: - - strs - a string of space-delimited literals, or a collection of string literals - - caseless - (default=C{False}) - treat all literals as caseless - - useRegex - (default=C{True}) - as an optimization, will generate a Regex - object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or - if creating a C{Regex} raises an exception) + + - strs - a string of space-delimited literals, or a collection of + string literals + - caseless - (default= ``False``) - treat all literals as + caseless + - useRegex - (default= ``True``) - as an optimization, will + generate a Regex object; otherwise, will generate + a :class:`MatchFirst` object (if ``caseless=True``, or if + creating a :class:`Regex` raises an exception) Example:: + comp_oper = oneOf("< = > <= >= !=") var = Word(alphas) number = Word(nums) term = var | number comparison_expr = term + comp_oper + term print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12")) + prints:: + [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] """ if caseless: @@ -4673,19 +5105,21 @@ def oneOf( strs, caseless=False, useRegex=True ): return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols)) def dictOf( key, value ): - """ - Helper to easily and clearly define a dictionary by specifying the respective patterns - for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens - in the proper order. The key pattern can include delimiting markers or punctuation, - as long as they are suppressed, thereby leaving the significant key text. The value - pattern can include named results, so that the C{Dict} results can include named token - fields. + """Helper to easily and clearly define a dictionary by specifying + the respective patterns for the key and value. Takes care of + defining the :class:`Dict`, :class:`ZeroOrMore`, and + :class:`Group` tokens in the proper order. The key pattern + can include delimiting markers or punctuation, as long as they are + suppressed, thereby leaving the significant key text. The value + pattern can include named results, so that the :class:`Dict` results + can include named token fields. Example:: + text = "shape: SQUARE posn: upper left color: light blue texture: burlap" attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) print(OneOrMore(attr_expr).parseString(text).dump()) - + attr_label = label attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join) @@ -4695,7 +5129,9 @@ def dictOf( key, value ): print(result['shape']) print(result.shape) # object attribute access works too print(result.asDict()) + prints:: + [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - color: light blue - posn: upper left @@ -4705,29 +5141,34 @@ def dictOf( key, value ): SQUARE {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'} """ - return Dict( ZeroOrMore( Group ( key + value ) ) ) + return Dict(OneOrMore(Group(key + value))) def originalTextFor(expr, asString=True): - """ - Helper to return the original, untokenized text for a given expression. Useful to - restore the parsed fields of an HTML start tag into the raw tag text itself, or to - revert separate tokens with intervening whitespace back to the original matching - input text. By default, returns astring containing the original parsed text. - - If the optional C{asString} argument is passed as C{False}, then the return value is a - C{L{ParseResults}} containing any results names that were originally matched, and a - single token containing the original matched text from the input string. So if - the expression passed to C{L{originalTextFor}} contains expressions with defined - results names, you must set C{asString} to C{False} if you want to preserve those - results name values. + """Helper to return the original, untokenized text for a given + expression. Useful to restore the parsed fields of an HTML start + tag into the raw tag text itself, or to revert separate tokens with + intervening whitespace back to the original matching input text. By + default, returns astring containing the original parsed text. + + If the optional ``asString`` argument is passed as + ``False``, then the return value is + a :class:`ParseResults` containing any results names that + were originally matched, and a single token containing the original + matched text from the input string. So if the expression passed to + :class:`originalTextFor` contains expressions with defined + results names, you must set ``asString`` to ``False`` if you + want to preserve those results name values. Example:: + src = "this is test bold text normal text " for tag in ("b","i"): opener,closer = makeHTMLTags(tag) patt = originalTextFor(opener + SkipTo(closer) + closer) print(patt.searchString(src)[0]) + prints:: + [' bold text '] ['text'] """ @@ -4744,29 +5185,33 @@ def extractText(s,l,t): matchExpr.ignoreExprs = expr.ignoreExprs return matchExpr -def ungroup(expr): - """ - Helper to undo pyparsing's default grouping of And expressions, even - if all but one are non-empty. +def ungroup(expr): + """Helper to undo pyparsing's default grouping of And expressions, + even if all but one are non-empty. """ return TokenConverter(expr).setParseAction(lambda t:t[0]) def locatedExpr(expr): - """ - Helper to decorate a returned token with its starting and ending locations in the input string. + """Helper to decorate a returned token with its starting and ending + locations in the input string. + This helper adds the following results names: + - locn_start = location where matched expression begins - locn_end = location where matched expression ends - value = the actual parsed results - Be careful if the input text contains C{} characters, you may want to call - C{L{ParserElement.parseWithTabs}} + Be careful if the input text contains ```` characters, you + may want to call :class:`ParserElement.parseWithTabs` Example:: + wd = Word(alphas) for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"): print(match) + prints:: + [[0, 'ljsdf', 5]] [[8, 'lksdjjf', 15]] [[18, 'lkkjj', 23]] @@ -4790,22 +5235,30 @@ def locatedExpr(expr): _reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]" def srange(s): - r""" - Helper to easily define string ranges for use in Word construction. Borrows - syntax from regexp '[]' string range definitions:: + r"""Helper to easily define string ranges for use in Word + construction. Borrows syntax from regexp '[]' string range + definitions:: + srange("[0-9]") -> "0123456789" srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" - The input string must be enclosed in []'s, and the returned string is the expanded - character set joined into a single string. - The values enclosed in the []'s may be: + + The input string must be enclosed in []'s, and the returned string + is the expanded character set joined into a single string. The + values enclosed in the []'s may be: + - a single character - - an escaped character with a leading backslash (such as C{\-} or C{\]}) - - an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character) - (C{\0x##} is also supported for backwards compatibility) - - an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character) - - a range of any of the above, separated by a dash (C{'a-z'}, etc.) - - any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.) + - an escaped character with a leading backslash (such as ``\-`` + or ``\]``) + - an escaped hex character with a leading ``'\x'`` + (``\x21``, which is a ``'!'`` character) (``\0x##`` + is also supported for backwards compatibility) + - an escaped octal character with a leading ``'\0'`` + (``\041``, which is a ``'!'`` character) + - a range of any of the above, separated by a dash (``'a-z'``, + etc.) + - any combination of the above (``'aeiouy'``, + ``'a-zA-Z0-9_$'``, etc.) """ _expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1)) try: @@ -4814,9 +5267,8 @@ def srange(s): return "" def matchOnlyAtCol(n): - """ - Helper method for defining parse actions that require matching at a specific - column in the input text. + """Helper method for defining parse actions that require matching at + a specific column in the input text. """ def verifyCol(strg,locn,toks): if col(locn,strg) != n: @@ -4824,24 +5276,26 @@ def verifyCol(strg,locn,toks): return verifyCol def replaceWith(replStr): - """ - Helper method for common parse actions that simply return a literal value. Especially - useful when used with C{L{transformString}()}. + """Helper method for common parse actions that simply return + a literal value. Especially useful when used with + :class:`transformString` (). Example:: + num = Word(nums).setParseAction(lambda toks: int(toks[0])) na = oneOf("N/A NA").setParseAction(replaceWith(math.nan)) term = na | num - + OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234] """ return lambda s,l,t: [replStr] def removeQuotes(s,l,t): - """ - Helper parse action for removing quotation marks from parsed quoted strings. + """Helper parse action for removing quotation marks from parsed + quoted strings. Example:: + # by default, quotation marks are included in parsed results quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"] @@ -4852,18 +5306,20 @@ def removeQuotes(s,l,t): return t[0][1:-1] def tokenMap(func, *args): - """ - Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional - args are passed, they are forwarded to the given function as additional arguments after - the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the - parsed data to an integer using base 16. + """Helper to define a parse action by mapping a function to all + elements of a ParseResults list. If any additional args are passed, + they are forwarded to the given function as additional arguments + after the token, as in + ``hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))``, + which will convert the parsed data to an integer using base 16. + + Example (compare the last to example in :class:`ParserElement.transformString`:: - Example (compare the last to example in L{ParserElement.transformString}:: hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16)) hex_ints.runTests(''' 00 11 22 aa FF 0a 0d 1a ''') - + upperword = Word(alphas).setParseAction(tokenMap(str.upper)) OneOrMore(upperword).runTests(''' my kingdom for a horse @@ -4873,7 +5329,9 @@ def tokenMap(func, *args): OneOrMore(wd).setParseAction(' '.join).runTests(''' now is the winter of our discontent made glorious summer by this sun of york ''') + prints:: + 00 11 22 aa FF 0a 0d 1a [0, 17, 34, 170, 255, 10, 13, 26] @@ -4887,7 +5345,7 @@ def pa(s,l,t): return [func(tokn, *args) for tokn in t] try: - func_name = getattr(func, '__name__', + func_name = getattr(func, '__name__', getattr(func, '__class__').__name__) except Exception: func_name = str(func) @@ -4896,11 +5354,13 @@ def pa(s,l,t): return pa upcaseTokens = tokenMap(lambda t: _ustr(t).upper()) -"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}""" +"""(Deprecated) Helper parse action to convert tokens to upper case. +Deprecated in favor of :class:`pyparsing_common.upcaseTokens`""" downcaseTokens = tokenMap(lambda t: _ustr(t).lower()) -"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}""" - +"""(Deprecated) Helper parse action to convert tokens to lower case. +Deprecated in favor of :class:`pyparsing_common.downcaseTokens`""" + def _makeTags(tagStr, xml): """Internal helper to construct opening and closing tag expressions, given a tag name""" if isinstance(tagStr,basestring): @@ -4931,55 +5391,63 @@ def _makeTags(tagStr, xml): return openTag, closeTag def makeHTMLTags(tagStr): - """ - Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches - tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values. + """Helper to construct opening and closing tag expressions for HTML, + given a tag name. Matches tags in either upper or lower case, + attributes with namespaces and with quoted or unquoted values. Example:: - text = 'More info at the pyparsing wiki page' - # makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple + + text = 'More info at the pyparsing wiki page' + # makeHTMLTags returns pyparsing expressions for the opening and + # closing tags as a 2-tuple a,a_end = makeHTMLTags("A") link_expr = a + SkipTo(a_end)("link_text") + a_end - + for link in link_expr.searchString(text): - # attributes in the tag (like "href" shown here) are also accessible as named results + # attributes in the tag (like "href" shown here) are + # also accessible as named results print(link.link_text, '->', link.href) + prints:: - pyparsing -> http://pyparsing.wikispaces.com + + pyparsing -> https://github.com/pyparsing/pyparsing/wiki """ return _makeTags( tagStr, False ) def makeXMLTags(tagStr): - """ - Helper to construct opening and closing tag expressions for XML, given a tag name. Matches - tags only in the given upper/lower case. + """Helper to construct opening and closing tag expressions for XML, + given a tag name. Matches tags only in the given upper/lower case. - Example: similar to L{makeHTMLTags} + Example: similar to :class:`makeHTMLTags` """ return _makeTags( tagStr, True ) def withAttribute(*args,**attrDict): - """ - Helper to create a validating parse action to be used with start tags created - with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag - with a required attribute value, to avoid false matches on common tags such as - C{} or C{
}. - - Call C{withAttribute} with a series of attribute names and values. Specify the list - of filter attributes names and values as: - - keyword arguments, as in C{(align="right")}, or - - as an explicit dict with C{**} operator, when an attribute name is also a Python - reserved word, as in C{**{"class":"Customer", "align":"right"}} - - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") ) - For attribute names with a namespace prefix, you must use the second form. Attribute - names are matched insensitive to upper/lower case. - - If just testing for C{class} (with or without a namespace), use C{L{withClass}}. - - To verify that the attribute exists, but without specifying a value, pass - C{withAttribute.ANY_VALUE} as the value. + """Helper to create a validating parse action to be used with start + tags created with :class:`makeXMLTags` or + :class:`makeHTMLTags`. Use ``withAttribute`` to qualify + a starting tag with a required attribute value, to avoid false + matches on common tags such as ```` or ``
``. + + Call ``withAttribute`` with a series of attribute names and + values. Specify the list of filter attributes names and values as: + + - keyword arguments, as in ``(align="right")``, or + - as an explicit dict with ``**`` operator, when an attribute + name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}`` + - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align","right"))`` + + For attribute names with a namespace prefix, you must use the second + form. Attribute names are matched insensitive to upper/lower case. + + If just testing for ``class`` (with or without a namespace), use + :class:`withClass`. + + To verify that the attribute exists, but without specifying a value, + pass ``withAttribute.ANY_VALUE`` as the value. Example:: + html = '''
Some text @@ -4987,7 +5455,7 @@ def withAttribute(*args,**attrDict):
1,3 2,3 1,1
this has no type
- + ''' div,div_end = makeHTMLTags("div") @@ -4996,13 +5464,15 @@ def withAttribute(*args,**attrDict): grid_expr = div_grid + SkipTo(div | div_end)("body") for grid_header in grid_expr.searchString(html): print(grid_header.body) - + # construct a match with any div tag having a type attribute, regardless of the value div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE)) div_expr = div_any_type + SkipTo(div | div_end)("body") for div_header in div_expr.searchString(html): print(div_header.body) + prints:: + 1 4 0 1 0 1 4 0 1 0 @@ -5024,11 +5494,12 @@ def pa(s,l,tokens): withAttribute.ANY_VALUE = object() def withClass(classname, namespace=''): - """ - Simplified version of C{L{withAttribute}} when matching on a div class - made - difficult because C{class} is a reserved word in Python. + """Simplified version of :class:`withAttribute` when + matching on a div class - made difficult because ``class`` is + a reserved word in Python. Example:: + html = '''
Some text @@ -5036,84 +5507,96 @@ def withClass(classname, namespace=''):
1,3 2,3 1,1
this <div> has no class
- + ''' div,div_end = makeHTMLTags("div") div_grid = div().setParseAction(withClass("grid")) - + grid_expr = div_grid + SkipTo(div | div_end)("body") for grid_header in grid_expr.searchString(html): print(grid_header.body) - + div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE)) div_expr = div_any_type + SkipTo(div | div_end)("body") for div_header in div_expr.searchString(html): print(div_header.body) + prints:: + 1 4 0 1 0 1 4 0 1 0 1,3 2,3 1,1 """ classattr = "%s:class" % namespace if namespace else "class" - return withAttribute(**{classattr : classname}) + return withAttribute(**{classattr : classname}) -opAssoc = _Constants() +opAssoc = SimpleNamespace() opAssoc.LEFT = object() opAssoc.RIGHT = object() def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ): - """ - Helper method for constructing grammars of expressions made up of - operators working in a precedence hierarchy. Operators may be unary or - binary, left- or right-associative. Parse actions can also be attached - to operator expressions. The generated parser will also recognize the use - of parentheses to override operator precedences (see example below). - - Note: if you define a deep operator list, you may see performance issues - when using infixNotation. See L{ParserElement.enablePackrat} for a - mechanism to potentially improve your parser performance. + """Helper method for constructing grammars of expressions made up of + operators working in a precedence hierarchy. Operators may be unary + or binary, left- or right-associative. Parse actions can also be + attached to operator expressions. The generated parser will also + recognize the use of parentheses to override operator precedences + (see example below). + + Note: if you define a deep operator list, you may see performance + issues when using infixNotation. See + :class:`ParserElement.enablePackrat` for a mechanism to potentially + improve your parser performance. Parameters: - - baseExpr - expression representing the most basic element for the nested - - opList - list of tuples, one for each operator precedence level in the - expression grammar; each tuple is of the form - (opExpr, numTerms, rightLeftAssoc, parseAction), where: - - opExpr is the pyparsing expression for the operator; - may also be a string, which will be converted to a Literal; - if numTerms is 3, opExpr is a tuple of two expressions, for the - two operators separating the 3 terms - - numTerms is the number of terms for this operator (must - be 1, 2, or 3) - - rightLeftAssoc is the indicator whether the operator is - right or left associative, using the pyparsing-defined - constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}. + - baseExpr - expression representing the most basic element for the + nested + - opList - list of tuples, one for each operator precedence level + in the expression grammar; each tuple is of the form ``(opExpr, + numTerms, rightLeftAssoc, parseAction)``, where: + + - opExpr is the pyparsing expression for the operator; may also + be a string, which will be converted to a Literal; if numTerms + is 3, opExpr is a tuple of two expressions, for the two + operators separating the 3 terms + - numTerms is the number of terms for this operator (must be 1, + 2, or 3) + - rightLeftAssoc is the indicator whether the operator is right + or left associative, using the pyparsing-defined constants + ``opAssoc.RIGHT`` and ``opAssoc.LEFT``. - parseAction is the parse action to be associated with - expressions matching this operator expression (the - parse action tuple member may be omitted); if the parse action - is passed a tuple or list of functions, this is equivalent to - calling C{setParseAction(*fn)} (L{ParserElement.setParseAction}) - - lpar - expression for matching left-parentheses (default=C{Suppress('(')}) - - rpar - expression for matching right-parentheses (default=C{Suppress(')')}) + expressions matching this operator expression (the parse action + tuple member may be omitted); if the parse action is passed + a tuple or list of functions, this is equivalent to calling + ``setParseAction(*fn)`` + (:class:`ParserElement.setParseAction`) + - lpar - expression for matching left-parentheses + (default= ``Suppress('(')``) + - rpar - expression for matching right-parentheses + (default= ``Suppress(')')``) Example:: - # simple example of four-function arithmetic with ints and variable names + + # simple example of four-function arithmetic with ints and + # variable names integer = pyparsing_common.signed_integer - varname = pyparsing_common.identifier - + varname = pyparsing_common.identifier + arith_expr = infixNotation(integer | varname, [ ('-', 1, opAssoc.RIGHT), (oneOf('* /'), 2, opAssoc.LEFT), (oneOf('+ -'), 2, opAssoc.LEFT), ]) - + arith_expr.runTests(''' 5+3*6 (5+3)*6 -2--11 ''', fullDump=False) + prints:: + 5+3*6 [[5, '+', [3, '*', 6]]] @@ -5123,6 +5606,12 @@ def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ): -2--11 [[['-', 2], '-', ['-', 11]]] """ + # captive version of FollowedBy that does not do parse actions or capture results names + class _FB(FollowedBy): + def parseImpl(self, instring, loc, doActions=True): + self.expr.tryParse(instring, loc) + return loc, [] + ret = Forward() lastExpr = baseExpr | ( lpar + ret + rpar ) for i,operDef in enumerate(opList): @@ -5130,19 +5619,20 @@ def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ): termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr if arity == 3: if opExpr is None or len(opExpr) != 2: - raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions") + raise ValueError( + "if numterms=3, opExpr must be a tuple or list of two expressions") opExpr1, opExpr2 = opExpr thisExpr = Forward().setName(termName) if rightLeftAssoc == opAssoc.LEFT: if arity == 1: - matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) ) + matchExpr = _FB(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) ) elif arity == 2: if opExpr is not None: - matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) ) + matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) ) else: - matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) ) + matchExpr = _FB(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) ) elif arity == 3: - matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \ + matchExpr = _FB(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \ Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr ) else: raise ValueError("operator must be unary (1), binary (2), or ternary (3)") @@ -5151,14 +5641,14 @@ def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ): # try to avoid LR with this extra test if not isinstance(opExpr, Optional): opExpr = Optional(opExpr) - matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr ) + matchExpr = _FB(opExpr.expr + thisExpr) + Group( opExpr + thisExpr ) elif arity == 2: if opExpr is not None: - matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) ) + matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) ) else: - matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) ) + matchExpr = _FB(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) ) elif arity == 3: - matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \ + matchExpr = _FB(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \ Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr ) else: raise ValueError("operator must be unary (1), binary (2), or ternary (3)") @@ -5175,7 +5665,8 @@ def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ): return ret operatorPrecedence = infixNotation -"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release.""" +"""(Deprecated) Former name of :class:`infixNotation`, will be +dropped in a future release.""" dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes") sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes") @@ -5184,28 +5675,33 @@ def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ): unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal") def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()): - """ - Helper method for defining nested lists enclosed in opening and closing - delimiters ("(" and ")" are the default). + """Helper method for defining nested lists enclosed in opening and + closing delimiters ("(" and ")" are the default). Parameters: - - opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression - - closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression - - content - expression for items within the nested lists (default=C{None}) - - ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString}) - - If an expression is not provided for the content argument, the nested - expression will capture all whitespace-delimited content between delimiters - as a list of separate values. - - Use the C{ignoreExpr} argument to define expressions that may contain - opening or closing characters that should not be treated as opening - or closing characters for nesting, such as quotedString or a comment - expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}. - The default is L{quotedString}, but if no expressions are to be ignored, - then pass C{None} for this argument. + - opener - opening character for a nested list + (default= ``"("``); can also be a pyparsing expression + - closer - closing character for a nested list + (default= ``")"``); can also be a pyparsing expression + - content - expression for items within the nested lists + (default= ``None``) + - ignoreExpr - expression for ignoring opening and closing + delimiters (default= :class:`quotedString`) + + If an expression is not provided for the content argument, the + nested expression will capture all whitespace-delimited content + between delimiters as a list of separate values. + + Use the ``ignoreExpr`` argument to define expressions that may + contain opening or closing characters that should not be treated as + opening or closing characters for nesting, such as quotedString or + a comment expression. Specify multiple expressions using an + :class:`Or` or :class:`MatchFirst`. The default is + :class:`quotedString`, but if no expressions are to be ignored, then + pass ``None`` for this argument. Example:: + data_type = oneOf("void int short long char float double") decl_data_type = Combine(data_type + Optional(Word('*'))) ident = Word(alphas+'_', alphanums+'_') @@ -5215,29 +5711,31 @@ def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.cop code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment)) - c_function = (decl_data_type("type") + c_function = (decl_data_type("type") + ident("name") - + LPAR + Optional(delimitedList(arg), [])("args") + RPAR + + LPAR + Optional(delimitedList(arg), [])("args") + RPAR + code_body("body")) c_function.ignore(cStyleComment) - + source_code = ''' - int is_odd(int x) { - return (x%2); + int is_odd(int x) { + return (x%2); } - - int dec_to_hex(char hchar) { - if (hchar >= '0' && hchar <= '9') { - return (ord(hchar)-ord('0')); - } else { + + int dec_to_hex(char hchar) { + if (hchar >= '0' && hchar <= '9') { + return (ord(hchar)-ord('0')); + } else { return (10+ord(hchar)-ord('A')); - } + } } ''' for func in c_function.searchString(source_code): print("%(name)s (%(type)s) args: %(args)s" % func) + prints:: + is_odd (int) args: [['int', 'x']] dec_to_hex (int) args: [['char', 'hchar']] """ @@ -5255,7 +5753,7 @@ def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.cop ).setParseAction(lambda t:t[0].strip())) else: if ignoreExpr is not None: - content = (Combine(OneOrMore(~ignoreExpr + + content = (Combine(OneOrMore(~ignoreExpr + ~Literal(opener) + ~Literal(closer) + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) ).setParseAction(lambda t:t[0].strip())) @@ -5274,23 +5772,24 @@ def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.cop return ret def indentedBlock(blockStatementExpr, indentStack, indent=True): - """ - Helper method for defining space-delimited indentation blocks, such as - those used to define block statements in Python source code. + """Helper method for defining space-delimited indentation blocks, + such as those used to define block statements in Python source code. Parameters: + - blockStatementExpr - expression defining syntax of statement that - is repeated within the indented block + is repeated within the indented block - indentStack - list created by caller to manage indentation stack - (multiple statementWithIndentedBlock expressions within a single grammar - should share a common indentStack) - - indent - boolean indicating whether block must be indented beyond the - the current level; set to False for block of left-most statements - (default=C{True}) + (multiple statementWithIndentedBlock expressions within a single + grammar should share a common indentStack) + - indent - boolean indicating whether block must be indented beyond + the the current level; set to False for block of left-most + statements (default= ``True``) - A valid block must contain at least one C{blockStatement}. + A valid block must contain at least one ``blockStatement``. Example:: + data = ''' def A(z): A1 @@ -5331,7 +5830,9 @@ def eggs(z): parseTree = module_body.parseString(data) parseTree.pprint() + prints:: + [['def', 'A', ['(', 'z', ')'], @@ -5349,7 +5850,7 @@ def eggs(z): 'spam', ['(', 'x', 'y', ')'], ':', - [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] + [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] """ def checkPeerIndent(s,l,t): if l >= len(s): return @@ -5399,51 +5900,61 @@ def replaceHTMLEntity(t): # it's easy to get these comment structures wrong - they're very common, so may as well make them available cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment") -"Comment of the form C{/* ... */}" +"Comment of the form ``/* ... */``" htmlComment = Regex(r"").setName("HTML comment") -"Comment of the form C{}" +"Comment of the form ````" restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line") dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment") -"Comment of the form C{// ... (to end of line)}" +"Comment of the form ``// ... (to end of line)``" cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment") -"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}" +"Comment of either form :class:`cStyleComment` or :class:`dblSlashComment`" javaStyleComment = cppStyleComment -"Same as C{L{cppStyleComment}}" +"Same as :class:`cppStyleComment`" pythonStyleComment = Regex(r"#.*").setName("Python style comment") -"Comment of the form C{# ... (to end of line)}" +"Comment of the form ``# ... (to end of line)``" _commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') + Optional( Word(" \t") + ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem") commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList") -"""(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas. - This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}.""" +"""(Deprecated) Predefined expression of 1 or more printable words or +quoted strings, separated by commas. + +This expression is deprecated in favor of :class:`pyparsing_common.comma_separated_list`. +""" # some other useful expressions - using lower-case class name since we are really using this as a namespace class pyparsing_common: - """ - Here are some common low-level expressions that may be useful in jump-starting parser development: - - numeric forms (L{integers}, L{reals}, L{scientific notation}) - - common L{programming identifiers} - - network addresses (L{MAC}, L{IPv4}, L{IPv6}) - - ISO8601 L{dates} and L{datetime} - - L{UUID} - - L{comma-separated list} + """Here are some common low-level expressions that may be useful in + jump-starting parser development: + + - numeric forms (:class:`integers`, :class:`reals`, + :class:`scientific notation`) + - common :class:`programming identifiers` + - network addresses (:class:`MAC`, + :class:`IPv4`, :class:`IPv6`) + - ISO8601 :class:`dates` and + :class:`datetime` + - :class:`UUID` + - :class:`comma-separated list` + Parse actions: - - C{L{convertToInteger}} - - C{L{convertToFloat}} - - C{L{convertToDate}} - - C{L{convertToDatetime}} - - C{L{stripHTMLTags}} - - C{L{upcaseTokens}} - - C{L{downcaseTokens}} + + - :class:`convertToInteger` + - :class:`convertToFloat` + - :class:`convertToDate` + - :class:`convertToDatetime` + - :class:`stripHTMLTags` + - :class:`upcaseTokens` + - :class:`downcaseTokens` Example:: + pyparsing_common.number.runTests(''' # any int or real number, returned as the appropriate type 100 @@ -5490,7 +6001,9 @@ class pyparsing_common: # uuid 12345678-1234-5678-1234-567812345678 ''') + prints:: + # any int or real number, returned as the appropriate type 100 [100] @@ -5592,7 +6105,8 @@ class pyparsing_common: """expression that parses a floating point number and returns a float""" sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat) - """expression that parses a floating point number with optional scientific notation and returns a float""" + """expression that parses a floating point number with optional + scientific notation and returns a float""" # streamlining this expression makes the docs nicer-looking number = (sci_real | real | signed_integer).streamline() @@ -5600,12 +6114,12 @@ class pyparsing_common: fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat) """any int or real number, returned as float""" - + identifier = Word(alphas+'_', alphanums+'_').setName("identifier") """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" - + ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address") - "IPv4 address (C{0.0.0.0 - 255.255.255.255})" + "IPv4 address (``0.0.0.0 - 255.255.255.255``)" _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer") _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address") @@ -5614,7 +6128,7 @@ class pyparsing_common: _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address") ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address") "IPv6 address (long, short, or mixed form)" - + mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address") "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" @@ -5624,13 +6138,16 @@ def convertToDate(fmt="%Y-%m-%d"): Helper to create a parse action for converting parsed date string to Python datetime.date Params - - - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"}) + - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``) Example:: + date_expr = pyparsing_common.iso8601_date.copy() date_expr.setParseAction(pyparsing_common.convertToDate()) print(date_expr.parseString("1999-12-31")) + prints:: + [datetime.date(1999, 12, 31)] """ def cvt_fn(s,l,t): @@ -5642,17 +6159,20 @@ def cvt_fn(s,l,t): @staticmethod def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"): - """ - Helper to create a parse action for converting parsed datetime string to Python datetime.datetime + """Helper to create a parse action for converting parsed + datetime string to Python datetime.datetime Params - - - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"}) + - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``) Example:: + dt_expr = pyparsing_common.iso8601_datetime.copy() dt_expr.setParseAction(pyparsing_common.convertToDatetime()) print(dt_expr.parseString("1999-12-31T23:59:59.999")) + prints:: + [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] """ def cvt_fn(s,l,t): @@ -5663,31 +6183,34 @@ def cvt_fn(s,l,t): return cvt_fn iso8601_date = Regex(r'(?P\d{4})(?:-(?P\d\d)(?:-(?P\d\d))?)?').setName("ISO8601 date") - "ISO8601 date (C{yyyy-mm-dd})" + "ISO8601 date (``yyyy-mm-dd``)" iso8601_datetime = Regex(r'(?P\d{4})-(?P\d\d)-(?P\d\d)[T ](?P\d\d):(?P\d\d)(:(?P\d\d(\.\d*)?)?)?(?PZ|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime") - "ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}" + "ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``" uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID") - "UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})" + "UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)" _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress() @staticmethod def stripHTMLTags(s, l, tokens): - """ - Parse action to remove HTML tags from web page HTML source + """Parse action to remove HTML tags from web page HTML source Example:: - # strip HTML links from normal text - text = 'More info at the
pyparsing wiki page' + + # strip HTML links from normal text + text = 'More info at the pyparsing wiki page' td,td_end = makeHTMLTags("TD") table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end - - print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page' + print(table_text.parseString(text).body) + + Prints:: + + More info at the pyparsing wiki page """ return pyparsing_common._html_stripper.transformString(tokens[0]) - _commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',') + _commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',') + Optional( White(" \t") ) ) ).streamline().setName("commaItem") comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list") """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" @@ -5699,6 +6222,164 @@ def stripHTMLTags(s, l, tokens): """Parse action to convert tokens to lower case.""" +class _lazyclassproperty(object): + def __init__(self, fn): + self.fn = fn + self.__doc__ = fn.__doc__ + self.__name__ = fn.__name__ + + def __get__(self, obj, cls): + if cls is None: + cls = type(obj) + if not hasattr(cls, '_intern') or any(cls._intern is getattr(superclass, '_intern', []) for superclass in cls.__mro__[1:]): + cls._intern = {} + attrname = self.fn.__name__ + if attrname not in cls._intern: + cls._intern[attrname] = self.fn(cls) + return cls._intern[attrname] + + +class unicode_set(object): + """ + A set of Unicode characters, for language-specific strings for + ``alphas``, ``nums``, ``alphanums``, and ``printables``. + A unicode_set is defined by a list of ranges in the Unicode character + set, in a class attribute ``_ranges``, such as:: + + _ranges = [(0x0020, 0x007e), (0x00a0, 0x00ff),] + + A unicode set can also be defined using multiple inheritance of other unicode sets:: + + class CJK(Chinese, Japanese, Korean): + pass + """ + _ranges = [] + + @classmethod + def _get_chars_for_ranges(cls): + ret = [] + for cc in cls.__mro__: + if cc is unicode_set: + break + for rr in cc._ranges: + ret.extend(range(rr[0], rr[-1]+1)) + return [unichr(c) for c in sorted(set(ret))] + + @_lazyclassproperty + def printables(cls): + "all non-whitespace characters in this range" + return u''.join(filterfalse(unicode.isspace, cls._get_chars_for_ranges())) + + @_lazyclassproperty + def alphas(cls): + "all alphabetic characters in this range" + return u''.join(filter(unicode.isalpha, cls._get_chars_for_ranges())) + + @_lazyclassproperty + def nums(cls): + "all numeric digit characters in this range" + return u''.join(filter(unicode.isdigit, cls._get_chars_for_ranges())) + + @_lazyclassproperty + def alphanums(cls): + "all alphanumeric characters in this range" + return cls.alphas + cls.nums + + +class pyparsing_unicode(unicode_set): + """ + A namespace class for defining common language unicode_sets. + """ + _ranges = [(32, sys.maxunicode)] + + class Latin1(unicode_set): + "Unicode set for Latin-1 Unicode Character Range" + _ranges = [(0x0020, 0x007e), (0x00a0, 0x00ff),] + + class LatinA(unicode_set): + "Unicode set for Latin-A Unicode Character Range" + _ranges = [(0x0100, 0x017f),] + + class LatinB(unicode_set): + "Unicode set for Latin-B Unicode Character Range" + _ranges = [(0x0180, 0x024f),] + + class Greek(unicode_set): + "Unicode set for Greek Unicode Character Ranges" + _ranges = [ + (0x0370, 0x03ff), (0x1f00, 0x1f15), (0x1f18, 0x1f1d), (0x1f20, 0x1f45), (0x1f48, 0x1f4d), + (0x1f50, 0x1f57), (0x1f59,), (0x1f5b,), (0x1f5d,), (0x1f5f, 0x1f7d), (0x1f80, 0x1fb4), (0x1fb6, 0x1fc4), + (0x1fc6, 0x1fd3), (0x1fd6, 0x1fdb), (0x1fdd, 0x1fef), (0x1ff2, 0x1ff4), (0x1ff6, 0x1ffe), + ] + + class Cyrillic(unicode_set): + "Unicode set for Cyrillic Unicode Character Range" + _ranges = [(0x0400, 0x04ff)] + + class Chinese(unicode_set): + "Unicode set for Chinese Unicode Character Range" + _ranges = [(0x4e00, 0x9fff), (0x3000, 0x303f), ] + + class Japanese(unicode_set): + "Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges" + _ranges = [ ] + + class Kanji(unicode_set): + "Unicode set for Kanji Unicode Character Range" + _ranges = [(0x4E00, 0x9Fbf), (0x3000, 0x303f), ] + + class Hiragana(unicode_set): + "Unicode set for Hiragana Unicode Character Range" + _ranges = [(0x3040, 0x309f), ] + + class Katakana(unicode_set): + "Unicode set for Katakana Unicode Character Range" + _ranges = [(0x30a0, 0x30ff), ] + + class Korean(unicode_set): + "Unicode set for Korean Unicode Character Range" + _ranges = [(0xac00, 0xd7af), (0x1100, 0x11ff), (0x3130, 0x318f), (0xa960, 0xa97f), (0xd7b0, 0xd7ff), (0x3000, 0x303f), ] + + class CJK(Chinese, Japanese, Korean): + "Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range" + pass + + class Thai(unicode_set): + "Unicode set for Thai Unicode Character Range" + _ranges = [(0x0e01, 0x0e3a), (0x0e3f, 0x0e5b), ] + + class Arabic(unicode_set): + "Unicode set for Arabic Unicode Character Range" + _ranges = [(0x0600, 0x061b), (0x061e, 0x06ff), (0x0700, 0x077f), ] + + class Hebrew(unicode_set): + "Unicode set for Hebrew Unicode Character Range" + _ranges = [(0x0590, 0x05ff), ] + + class Devanagari(unicode_set): + "Unicode set for Devanagari Unicode Character Range" + _ranges = [(0x0900, 0x097f), (0xa8e0, 0xa8ff)] + +pyparsing_unicode.Japanese._ranges = (pyparsing_unicode.Japanese.Kanji._ranges + + pyparsing_unicode.Japanese.Hiragana._ranges + + pyparsing_unicode.Japanese.Katakana._ranges) + +# define ranges in language character sets +if PY_3: + setattr(pyparsing_unicode, "العربية", pyparsing_unicode.Arabic) + setattr(pyparsing_unicode, "中文", pyparsing_unicode.Chinese) + setattr(pyparsing_unicode, "кириллица", pyparsing_unicode.Cyrillic) + setattr(pyparsing_unicode, "Ελληνικά", pyparsing_unicode.Greek) + setattr(pyparsing_unicode, "עִברִית", pyparsing_unicode.Hebrew) + setattr(pyparsing_unicode, "日本語", pyparsing_unicode.Japanese) + setattr(pyparsing_unicode.Japanese, "漢字", pyparsing_unicode.Japanese.Kanji) + setattr(pyparsing_unicode.Japanese, "カタカナ", pyparsing_unicode.Japanese.Katakana) + setattr(pyparsing_unicode.Japanese, "ひらがな", pyparsing_unicode.Japanese.Hiragana) + setattr(pyparsing_unicode, "한국어", pyparsing_unicode.Korean) + setattr(pyparsing_unicode, "ไทย", pyparsing_unicode.Thai) + setattr(pyparsing_unicode, "देवनागरी", pyparsing_unicode.Devanagari) + + if __name__ == "__main__": selectToken = CaselessLiteral("select") @@ -5712,7 +6393,7 @@ def stripHTMLTags(s, l, tokens): tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) tableNameList = Group(delimitedList(tableName)).setName("tables") - + simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables") # demo runTests method, including embedded comments in test string diff --git a/pipenv/vendor/pythonfinder/__init__.py b/pipenv/vendor/pythonfinder/__init__.py index 8e12a198d7..9421573d03 100644 --- a/pipenv/vendor/pythonfinder/__init__.py +++ b/pipenv/vendor/pythonfinder/__init__.py @@ -1,6 +1,6 @@ -from __future__ import print_function, absolute_import +from __future__ import absolute_import, print_function -__version__ = '1.1.10' +__version__ = '1.1.11' # Add NullHandler to "pythonfinder" logger, because Python2's default root # logger has no handler and warnings like this would be reported: diff --git a/pipenv/vendor/pythonfinder/__main__.py b/pipenv/vendor/pythonfinder/__main__.py index c804d573a9..3083e72d90 100644 --- a/pipenv/vendor/pythonfinder/__main__.py +++ b/pipenv/vendor/pythonfinder/__main__.py @@ -1,12 +1,17 @@ +#!env python +# -*- coding=utf-8 -*- + from __future__ import absolute_import import os import sys +from pythonfinder.cli import cli + + PYTHONFINDER_MAIN = os.path.dirname(os.path.abspath(__file__)) PYTHONFINDER_PACKAGE = os.path.dirname(PYTHONFINDER_MAIN) -from pythonfinder import cli as cli if __name__ == "__main__": sys.exit(cli()) diff --git a/pipenv/vendor/pythonfinder/cli.py b/pipenv/vendor/pythonfinder/cli.py index 221cb2fda3..7dc4ebd456 100644 --- a/pipenv/vendor/pythonfinder/cli.py +++ b/pipenv/vendor/pythonfinder/cli.py @@ -1,9 +1,11 @@ -#!/usr/bin/env python # -*- coding=utf-8 -*- -from __future__ import print_function, absolute_import +from __future__ import absolute_import, print_function, unicode_literals + +import sys + import click import crayons -import sys + from . import __version__ from .pythonfinder import Finder @@ -17,10 +19,18 @@ @click.option( "--version", is_flag=True, default=False, help="Display PythonFinder version." ) -@click.option("--ignore-unsupported/--no-unsupported", is_flag=True, default=True, envvar="PYTHONFINDER_IGNORE_UNSUPPORTED", help="Ignore unsupported python versions.") -@click.version_option(prog_name='pyfinder', version=__version__) +@click.option( + "--ignore-unsupported/--no-unsupported", + is_flag=True, + default=True, + envvar="PYTHONFINDER_IGNORE_UNSUPPORTED", + help="Ignore unsupported python versions.", +) +@click.version_option(prog_name="pyfinder", version=__version__) @click.pass_context -def cli(ctx, find=False, which=False, findall=False, version=False, ignore_unsupported=True): +def cli( + ctx, find=False, which=False, findall=False, version=False, ignore_unsupported=True +): if version: click.echo( "{0} version {1}".format( diff --git a/pipenv/vendor/pythonfinder/environment.py b/pipenv/vendor/pythonfinder/environment.py index e8878403bf..ce21fb79b4 100644 --- a/pipenv/vendor/pythonfinder/environment.py +++ b/pipenv/vendor/pythonfinder/environment.py @@ -1,5 +1,6 @@ # -*- coding=utf-8 -*- -from __future__ import print_function, absolute_import +from __future__ import absolute_import, print_function + import os import platform import sys @@ -34,3 +35,12 @@ def is_type_checking(): IGNORE_UNSUPPORTED = bool(os.environ.get("PYTHONFINDER_IGNORE_UNSUPPORTED", False)) MYPY_RUNNING = os.environ.get("MYPY_RUNNING", is_type_checking()) + +def get_shim_paths(): + shim_paths = [] + if ASDF_INSTALLED: + shim_paths.append(os.path.join(ASDF_DATA_DIR, "shims")) + if PYENV_INSTALLED: + shim_paths.append(os.path.join(PYENV_ROOT, "shims")) + return [os.path.normpath(os.path.normcase(p)) for p in shim_paths] +SHIM_PATHS = get_shim_paths() diff --git a/pipenv/vendor/pythonfinder/exceptions.py b/pipenv/vendor/pythonfinder/exceptions.py index df381daf62..adfac16ba1 100644 --- a/pipenv/vendor/pythonfinder/exceptions.py +++ b/pipenv/vendor/pythonfinder/exceptions.py @@ -1,5 +1,5 @@ # -*- coding=utf-8 -*- -from __future__ import print_function, absolute_import +from __future__ import absolute_import, print_function class InvalidPythonVersion(Exception): diff --git a/pipenv/vendor/pythonfinder/models/mixins.py b/pipenv/vendor/pythonfinder/models/mixins.py index 7d4065484c..31c3c20266 100644 --- a/pipenv/vendor/pythonfinder/models/mixins.py +++ b/pipenv/vendor/pythonfinder/models/mixins.py @@ -2,16 +2,60 @@ from __future__ import absolute_import, unicode_literals import abc -import attr import operator + +from collections import defaultdict + +import attr import six -from ..utils import ensure_path, KNOWN_EXTS, unnest +from cached_property import cached_property +from vistir.compat import fs_str + +from ..environment import MYPY_RUNNING +from ..exceptions import InvalidPythonVersion +from ..utils import ( + KNOWN_EXTS, Sequence, expand_paths, looks_like_python, + path_is_known_executable +) + + +if MYPY_RUNNING: + from .path import PathEntry + from .python import PythonVersion + from typing import ( + Optional, + Union, + Any, + Dict, + Iterator, + List, + DefaultDict, + Generator, + Tuple, + TypeVar, + Type, + ) + from vistir.compat import Path + + BaseFinderType = TypeVar("BaseFinderType") @attr.s class BasePath(object): + path = attr.ib(default=None) # type: Path + _children = attr.ib(default=attr.Factory(dict)) # type: Dict[str, PathEntry] + only_python = attr.ib(default=False) # type: bool + name = attr.ib(type=str) + _py_version = attr.ib(default=None) # type: Optional[PythonVersion] + _pythons = attr.ib(default=attr.Factory(defaultdict)) # type: DefaultDict[str, PathEntry] + + def __str__(self): + # type: () -> str + return fs_str("{0}".format(self.path.as_posix())) + def which(self, name): + # type: (str) -> Optional[PathEntry] """Search in this path for an executable. :param executable: The name of an executable to search for. @@ -24,26 +68,165 @@ def which(self, name): for ext in KNOWN_EXTS ] children = self.children - found = next( - ( - children[(self.path / child).as_posix()] - for child in valid_names - if (self.path / child).as_posix() in children - ), - None, - ) + found = None + if self.path is not None: + found = next( + ( + children[(self.path / child).as_posix()] + for child in valid_names + if (self.path / child).as_posix() in children + ), + None, + ) return found + def __del__(self): + for key in ["as_python", "is_dir", "is_python", "is_executable", "py_version"]: + if key in self.__dict__: + del self.__dict__[key] + self._children = {} + for key in list(self._pythons.keys()): + del self._pythons[key] + + @property + def children(self): + # type: () -> Dict[str, PathEntry] + if not self.is_dir: + return {} + return self._children + + @cached_property + def as_python(self): + # type: () -> PythonVersion + py_version = None + if self.py_version: + return self.py_version + if not self.is_dir and self.is_python: + try: + from .python import PythonVersion + + py_version = PythonVersion.from_path( # type: ignore + path=self, name=self.name + ) + except (ValueError, InvalidPythonVersion): + pass + if py_version is None: + pass + return py_version # type: ignore + + @name.default + def get_name(self): + # type: () -> Optional[str] + if self.path: + return self.path.name + return None + + @cached_property + def is_dir(self): + # type: () -> bool + if not self.path: + return False + try: + ret_val = self.path.is_dir() + except OSError: + ret_val = False + return ret_val + + @cached_property + def is_executable(self): + # type: () -> bool + if not self.path: + return False + return path_is_known_executable(self.path) + + @cached_property + def is_python(self): + # type: () -> bool + if not self.path: + return False + return self.is_executable and (looks_like_python(self.path.name)) + + def get_py_version(self): + # type: () -> Optional[PythonVersion] + from ..environment import IGNORE_UNSUPPORTED + + if self.is_dir: + return None + if self.is_python: + py_version = None + from .python import PythonVersion + + try: + py_version = PythonVersion.from_path( # type: ignore + path=self, name=self.name + ) + except (InvalidPythonVersion, ValueError): + py_version = None + except Exception: + if not IGNORE_UNSUPPORTED: + raise + return py_version + return None + + @cached_property + def py_version(self): + # type: () -> Optional[PythonVersion] + if not self._py_version: + py_version = self.get_py_version() + self._py_version = py_version + else: + py_version = self._py_version + return py_version + + def _iter_pythons(self): + # type: () -> Iterator + if self.is_dir: + for entry in self.children.values(): + if entry is None: + continue + elif entry.is_dir: + for python in entry._iter_pythons(): + yield python + elif entry.is_python and entry.as_python is not None: + yield entry + elif self.is_python and self.as_python is not None: + yield self # type: ignore + + @property + def pythons(self): + # type: () -> DefaultDict[Union[str, Path], PathEntry] + if not self._pythons: + from .path import PathEntry + self._pythons = defaultdict(PathEntry) + for python in self._iter_pythons(): + python_path = python.path.as_posix() # type: ignore + self._pythons[python_path] = python + return self._pythons + + def __iter__(self): + # type: () -> Iterator + for entry in self.children.values(): + yield entry + + def __next__(self): + # type: () -> Generator + return next(iter(self)) + + def next(self): + # type: () -> Generator + return self.__next__() + def find_all_python_versions( self, - major=None, - minor=None, - patch=None, - pre=None, - dev=None, - arch=None, - name=None, + major=None, # type: Optional[Union[str, int]] + minor=None, # type: Optional[int] + patch=None, # type: Optional[int] + pre=None, # type: Optional[bool] + dev=None, # type: Optional[bool] + arch=None, # type: Optional[str] + name=None, # type: Optional[str] ): + # type: (...) -> List[PathEntry] """Search for a specific python version on the path. Return all copies :param major: Major python version to search for. @@ -62,31 +245,29 @@ def find_all_python_versions( "find_all_python_versions" if self.is_dir else "find_python_version" ) sub_finder = operator.methodcaller( - call_method, - major=major, - minor=minor, - patch=patch, - pre=pre, - dev=dev, - arch=arch, - name=name, + call_method, major, minor, patch, pre, dev, arch, name ) if not self.is_dir: return sub_finder(self) - path_filter = filter(None, (sub_finder(p) for p in self.children.values())) + unnested = [ + sub_finder(path) for path in expand_paths(self) + ] version_sort = operator.attrgetter("as_python.version_sort") - return [c for c in sorted(path_filter, key=version_sort, reverse=True)] + unnested = [p for p in unnested if p is not None and p.as_python is not None] + paths = sorted(unnested, key=version_sort, reverse=True) + return list(paths) def find_python_version( self, - major=None, - minor=None, - patch=None, - pre=None, - dev=None, - arch=None, - name=None, + major=None, # type: Optional[Union[str, int]] + minor=None, # type: Optional[int] + patch=None, # type: Optional[int] + pre=None, # type: Optional[bool] + dev=None, # type: Optional[bool] + arch=None, # type: Optional[str] + name=None, # type: Optional[str] ): + # type: (...) -> Optional[PathEntry] """Search or self for the specified Python version and return the first match. :param major: Major version number. @@ -101,55 +282,63 @@ def find_python_version( """ version_matcher = operator.methodcaller( - "matches", - major=major, - minor=minor, - patch=patch, - pre=pre, - dev=dev, - arch=arch, - name=name, + "matches", major, minor, patch, pre, dev, arch, python_name=name ) - is_py = operator.attrgetter("is_python") - py_version = operator.attrgetter("as_python") if not self.is_dir: if self.is_python and self.as_python and version_matcher(self.py_version): - return attr.evolve(self) - return - finder = ( - (child, child.as_python) - for child in unnest(self.pythons.values()) - if child.as_python - ) - py_filter = filter( - None, filter(lambda child: version_matcher(child[1]), finder) - ) - version_sort = operator.attrgetter("version_sort") - return next( - ( - c[0] - for c in sorted( - py_filter, key=lambda child: child[1].version_sort, reverse=True - ) - ), - None, + return self # type: ignore + + matching_pythons = [ + [entry, entry.as_python.version_sort] + for entry in self._iter_pythons() + if (entry is not None and entry.as_python is not None and + version_matcher(entry.py_version)) + ] + results = sorted(matching_pythons, + key=operator.itemgetter(1, 0), + reverse=True, ) + return next(iter(r[0] for r in results if r is not None), None) @six.add_metaclass(abc.ABCMeta) class BaseFinder(object): + def __init__(self): + #: Maps executable paths to PathEntries + from .path import PathEntry + + self._pythons = defaultdict(PathEntry) # type: DefaultDict[str, PathEntry] + self._versions = defaultdict(PathEntry) # type: Dict[Tuple, PathEntry] + def get_versions(self): + # type: () -> DefaultDict[Tuple, PathEntry] """Return the available versions from the finder""" raise NotImplementedError @classmethod - def create(cls): + def create(cls, # type: Type[BaseFinderType] + *args, # type: Any + **kwargs # type: Any + ): + # type: (...) -> BaseFinderType raise NotImplementedError @property def version_paths(self): - return self.versions.values() + # type: () -> Any + return self._versions.values() @property def expanded_paths(self): + # type: () -> Any return (p.paths.values() for p in self.version_paths) + + @property + def pythons(self): + # type: () -> DefaultDict[str, PathEntry] + return self._pythons + + @pythons.setter + def pythons(self, value): + # type: (DefaultDict[str, PathEntry]) -> None + self._pythons = value diff --git a/pipenv/vendor/pythonfinder/models/path.py b/pipenv/vendor/pythonfinder/models/path.py index 221d892f12..005a17b1ae 100644 --- a/pipenv/vendor/pythonfinder/models/path.py +++ b/pipenv/vendor/pythonfinder/models/path.py @@ -13,54 +13,87 @@ import six from cached_property import cached_property - from vistir.compat import Path, fs_str -from .mixins import BasePath -from ..environment import PYENV_INSTALLED, PYENV_ROOT, ASDF_INSTALLED, ASDF_DATA_DIR +from ..environment import ( + ASDF_DATA_DIR, ASDF_INSTALLED, MYPY_RUNNING, PYENV_INSTALLED, PYENV_ROOT, + SHIM_PATHS +) from ..exceptions import InvalidPythonVersion from ..utils import ( - ensure_path, - filter_pythons, - looks_like_python, - optional_instance_of, - path_is_known_executable, - unnest, - normalize_path, - parse_pyenv_version_order, - parse_asdf_version_order + Iterable, Sequence, ensure_path, expand_paths, filter_pythons, is_in_path, + looks_like_python, normalize_path, optional_instance_of, + parse_asdf_version_order, parse_pyenv_version_order, + path_is_known_executable, unnest ) +from .mixins import BaseFinder, BasePath from .python import PythonVersion -ASDF_SHIM_PATH = normalize_path(os.path.join(ASDF_DATA_DIR, "shims")) -PYENV_SHIM_PATH = normalize_path(os.path.join(PYENV_ROOT, "shims")) -SHIM_PATHS = [ASDF_SHIM_PATH, PYENV_SHIM_PATH] +if MYPY_RUNNING: + from typing import ( + Optional, Dict, DefaultDict, Iterator, List, Union, Tuple, Generator, Callable, + Type, Any, TypeVar + ) + from .mixins import BaseFinder + from .python import PythonFinder + from .windows import WindowsFinder + FinderType = TypeVar('FinderType', BaseFinder, PythonFinder, WindowsFinder) + ChildType = Union[PythonFinder, PathEntry] + PathType = Union[PythonFinder, PathEntry] @attr.s class SystemPath(object): global_search = attr.ib(default=True) - paths = attr.ib(default=attr.Factory(defaultdict)) - _executables = attr.ib(default=attr.Factory(list)) - _python_executables = attr.ib(default=attr.Factory(list)) - path_order = attr.ib(default=attr.Factory(list)) - python_version_dict = attr.ib(default=attr.Factory(defaultdict)) - only_python = attr.ib(default=False) - pyenv_finder = attr.ib(default=None, validator=optional_instance_of("PyenvPath")) - asdf_finder = attr.ib(default=None) - system = attr.ib(default=False) - _version_dict = attr.ib(default=attr.Factory(defaultdict)) - ignore_unsupported = attr.ib(default=False) - - __finders = attr.ib(default=attr.Factory(dict)) + paths = attr.ib(default=attr.Factory(defaultdict)) # type: DefaultDict[str, Union[PythonFinder, PathEntry]] + _executables = attr.ib(default=attr.Factory(list)) # type: List[PathEntry] + _python_executables = attr.ib(default=attr.Factory(dict)) # type: Dict[str, PathEntry] + path_order = attr.ib(default=attr.Factory(list)) # type: List[str] + python_version_dict = attr.ib() # type: DefaultDict[Tuple, List[PythonVersion]] + only_python = attr.ib(default=False, type=bool) + pyenv_finder = attr.ib(default=None, validator=optional_instance_of("PythonFinder")) # type: Optional[PythonFinder] + asdf_finder = attr.ib(default=None) # type: Optional[PythonFinder] + system = attr.ib(default=False, type=bool) + _version_dict = attr.ib(default=attr.Factory(defaultdict)) # type: DefaultDict[Tuple, List[PathEntry]] + ignore_unsupported = attr.ib(default=False, type=bool) + + __finders = attr.ib(default=attr.Factory(dict)) # type: Dict[str, Union[WindowsFinder, PythonFinder]] def _register_finder(self, finder_name, finder): + # type: (str, Union[WindowsFinder, PythonFinder]) -> None if finder_name not in self.__finders: self.__finders[finder_name] = finder + def clear_caches(self): + for key in ["executables", "python_executables", "version_dict", "path_entries"]: + if key in self.__dict__: + del self.__dict__[key] + self._executables = [] + self._python_executables = {} + self.python_version_dict = defaultdict(list) + self._version_dict = defaultdict(list) + + def __del__(self): + self.clear_caches() + self.path_order = [] + self.pyenv_finder = None + self.asdf_finder = None + self.paths = defaultdict(PathEntry) + + @property + def finders(self): + # type: () -> List[str] + return [k for k in self.__finders.keys()] + + @python_version_dict.default + def create_python_version_dict(self): + # type: () -> DefaultDict[Tuple, List[PythonVersion]] + return defaultdict(list) + @cached_property def executables(self): + # type: () -> List[PathEntry] self.executables = [ p for p in chain(*(child.children.values() for child in self.paths.values())) @@ -70,6 +103,7 @@ def executables(self): @cached_property def python_executables(self): + # type: () -> Dict[str, PathEntry] python_executables = {} for child in self.paths.values(): if child.pythons: @@ -82,31 +116,28 @@ def python_executables(self): @cached_property def version_dict(self): - self._version_dict = defaultdict(list) + # type: () -> DefaultDict[Tuple, List[PathEntry]] + self._version_dict = defaultdict(list) # type: DefaultDict[Tuple, List[PathEntry]] for finder_name, finder in self.__finders.items(): for version, entry in finder.versions.items(): if finder_name == "windows": if entry not in self._version_dict[version]: self._version_dict[version].append(entry) continue - if type(entry).__name__ == "VersionPath": - for path in entry.paths.values(): - if path not in self._version_dict[version] and path.is_python: - self._version_dict[version].append(path) - continue - continue - elif entry not in self._version_dict[version] and entry.is_python: + if entry not in self._version_dict[version] and entry.is_python: self._version_dict[version].append(entry) for p, entry in self.python_executables.items(): version = entry.as_python if not version: continue - version = version.version_tuple + if not isinstance(version, tuple): + version = version.version_tuple if version and entry not in self._version_dict[version]: self._version_dict[version].append(entry) return self._version_dict def __attrs_post_init__(self): + # type: () -> None #: slice in pyenv if not self.__class__ == SystemPath: return @@ -124,7 +155,7 @@ def __attrs_post_init__(self): if venv and (self.system or self.global_search): p = ensure_path(venv) self.path_order = [(p / bin_dir).as_posix()] + self.path_order - self.paths[p] = PathEntry.create(path=p, is_root=True, only_python=False) + self.paths[p] = self.get_path(p.joinpath(bin_dir)) if self.system: syspath = Path(sys.executable) syspath_bin = syspath.parent @@ -136,26 +167,35 @@ def __attrs_post_init__(self): ) def _get_last_instance(self, path): + # type: (str) -> int reversed_paths = reversed(self.path_order) paths = [normalize_path(p) for p in reversed_paths] normalized_target = normalize_path(path) last_instance = next( iter(p for p in paths if normalized_target in p), None ) - try: - path_index = self.path_order.index(last_instance) - except ValueError: - return + if last_instance is None: + raise ValueError("No instance found on path for target: {0!s}".format(path)) + path_index = self.path_order.index(last_instance) return path_index def _slice_in_paths(self, start_idx, paths): - before_path = self.path_order[: start_idx + 1] - after_path = self.path_order[start_idx + 2 :] + # type: (int, List[Path]) -> None + before_path = [] # type: List[str] + after_path = [] # type: List[str] + if start_idx == 0: + after_path = self.path_order[:] + elif start_idx == -1: + before_path = self.path_order[:] + else: + before_path = self.path_order[: start_idx + 1] + after_path = self.path_order[start_idx + 2 :] self.path_order = ( before_path + [p.as_posix() for p in paths] + after_path ) def _remove_path(self, path): + # type: (str) -> None path_copy = [p for p in reversed(self.path_order[:])] new_order = [] target = normalize_path(path) @@ -164,7 +204,7 @@ def _remove_path(self, path): for pth in self.paths.keys() } if target in path_map: - del self.paths[path_map.get(target)] + del self.paths[path_map[target]] for current_path in path_copy: normalized = normalize_path(current_path) if normalized != target: @@ -173,41 +213,80 @@ def _remove_path(self, path): self.path_order = new_order def _setup_asdf(self): + # type: () -> None from .python import PythonFinder + os_path = os.environ["PATH"].split(os.pathsep) self.asdf_finder = PythonFinder.create( root=ASDF_DATA_DIR, ignore_unsupported=True, sort_function=parse_asdf_version_order, version_glob_path="installs/python/*") - asdf_index = self._get_last_instance(ASDF_DATA_DIR) - if not asdf_index: + asdf_index = None + try: + asdf_index = self._get_last_instance(ASDF_DATA_DIR) + except ValueError: + pyenv_index = 0 if is_in_path(next(iter(os_path), ""), PYENV_ROOT) else -1 + if asdf_index is None: # we are in a virtualenv without global pyenv on the path, so we should # not write pyenv to the path here return root_paths = [p for p in self.asdf_finder.roots] - self._slice_in_paths(asdf_index, root_paths) + self._slice_in_paths(asdf_index, [self.asdf_finder.root]) + self.paths[self.asdf_finder.root] = self.asdf_finder self.paths.update(self.asdf_finder.roots) self._remove_path(normalize_path(os.path.join(ASDF_DATA_DIR, "shims"))) self._register_finder("asdf", self.asdf_finder) + def reload_finder(self, finder_name): + # type: (str) -> None + if finder_name is None: + raise TypeError("Must pass a string as the name of the target finder") + finder_attr = "{0}_finder".format(finder_name) + setup_attr = "_setup_{0}".format(finder_name) + try: + current_finder = getattr(self, finder_attr) # type: Any + except AttributeError: + raise ValueError("Must pass a valid finder to reload.") + try: + setup_fn = getattr(self, setup_attr) + except AttributeError: + raise ValueError("Finder has no valid setup function: %s" % finder_name) + if current_finder is None: + # TODO: This is called 'reload', should we load a new finder for the first + # time here? lets just skip that for now to avoid unallowed finders + pass + if (finder_name == "pyenv" and not PYENV_INSTALLED) or (finder_name == "asdf" and not ASDF_INSTALLED): + # Don't allow loading of finders that aren't explicitly 'installed' as it were + pass + setattr(self, finder_attr, None) + if finder_name in self.__finders: + del self.__finders[finder_name] + setup_fn() + def _setup_pyenv(self): + # type: () -> None from .python import PythonFinder + os_path = os.environ["PATH"].split(os.pathsep) self.pyenv_finder = PythonFinder.create( - root=PYENV_ROOT, sort_function=parse_pyenv_version_order, - version_glob_path="versions/*", ignore_unsupported=self.ignore_unsupported - ) - pyenv_index = self._get_last_instance(PYENV_ROOT) - if not pyenv_index: + root=PYENV_ROOT, sort_function=parse_pyenv_version_order, version_glob_path="versions/*", ignore_unsupported=self.ignore_unsupported) + pyenv_index = None + try: + pyenv_index = self._get_last_instance(PYENV_ROOT) + except ValueError: + pyenv_index = 0 if is_in_path(next(iter(os_path), ""), PYENV_ROOT) else -1 + if pyenv_index is None: # we are in a virtualenv without global pyenv on the path, so we should # not write pyenv to the path here return - root_paths = [p for p in self.pyenv_finder.roots] - self._slice_in_paths(pyenv_index, root_paths) + root_paths = [p for p in self.pyenv_finder.roots] + self._slice_in_paths(pyenv_index, [self.pyenv_finder.root]) + self.paths[self.pyenv_finder.root] = self.pyenv_finder self.paths.update(self.pyenv_finder.roots) self._remove_path(os.path.join(PYENV_ROOT, "shims")) self._register_finder("pyenv", self.pyenv_finder) def _setup_windows(self): + # type: () -> None from .windows import WindowsFinder self.windows_finder = WindowsFinder.create() @@ -218,6 +297,9 @@ def _setup_windows(self): self._register_finder("windows", self.windows_finder) def get_path(self, path): + # type: (Union[str, Path]) -> PathType + if path is None: + raise TypeError("A path must be provided in order to generate a path entry.") path = ensure_path(path) _path = self.paths.get(path) if not _path: @@ -227,69 +309,90 @@ def get_path(self, path): path=path.absolute(), is_root=True, only_python=self.only_python ) self.paths[path.as_posix()] = _path + if not _path: + raise ValueError("Path not found or generated: {0!r}".format(path)) return _path def _get_paths(self): - return (self.get_path(k) for k in self.path_order) + # type: () -> Iterator + for path in self.path_order: + try: + entry = self.get_path(path) + except ValueError: + continue + else: + yield entry @cached_property def path_entries(self): - paths = self._get_paths() + # type: () -> List[Union[PathEntry, FinderType]] + paths = list(self._get_paths()) return paths def find_all(self, executable): - """Search the path for an executable. Return all copies. + # type: (str) -> List[Union[PathEntry, FinderType]] + """ + Search the path for an executable. Return all copies. :param executable: Name of the executable :type executable: str :returns: List[PathEntry] """ - sub_which = operator.methodcaller("which", name=executable) + + sub_which = operator.methodcaller("which", executable) filtered = (sub_which(self.get_path(k)) for k in self.path_order) return list(filtered) def which(self, executable): - """Search for an executable on the path. + # type: (str) -> Union[PathEntry, None] + """ + Search for an executable on the path. :param executable: Name of the executable to be located. :type executable: str :returns: :class:`~pythonfinder.models.PathEntry` object. """ - sub_which = operator.methodcaller("which", name=executable) + + sub_which = operator.methodcaller("which", executable) filtered = (sub_which(self.get_path(k)) for k in self.path_order) return next(iter(f for f in filtered if f is not None), None) def _filter_paths(self, finder): - return ( - pth for pth in unnest(finder(p) for p in self.path_entries if p is not None) - if pth is not None - ) + # type: (Callable) -> Iterator + for path in self._get_paths(): + if path is None: + continue + python_versions = finder(path) + if python_versions is not None: + for python in python_versions: + if python is not None: + yield python def _get_all_pythons(self, finder): - paths = {p.path.as_posix(): p for p in self._filter_paths(finder)} - paths.update(self.python_executables) - return (p for p in paths.values() if p is not None) + # type: (Callable) -> Iterator + for python in self._filter_paths(finder): + if python is not None and python.is_python: + yield python def get_pythons(self, finder): + # type: (Callable) -> Iterator sort_key = operator.attrgetter("as_python.version_sort") - return ( - k for k in sorted( - (p for p in self._filter_paths(finder) if p.is_python), - key=sort_key, - reverse=True - ) if k is not None - ) + pythons = [entry for entry in self._get_all_pythons(finder)] + for python in sorted(pythons, key=sort_key, reverse=True): + if python is not None: + yield python def find_all_python_versions( self, - major=None, - minor=None, - patch=None, - pre=None, - dev=None, - arch=None, - name=None, + major=None, # type: Optional[Union[str, int]] + minor=None, # type: Optional[int] + patch=None, # type: Optional[int] + pre=None, # type: Optional[bool] + dev=None, # type: Optional[bool] + arch=None, # type: Optional[str] + name=None, # type: Optional[str] ): + # type (...) -> List[PathEntry] """Search for a specific python version on the path. Return all copies :param major: Major python version to search for. @@ -305,21 +408,12 @@ def find_all_python_versions( """ sub_finder = operator.methodcaller( - "find_all_python_versions", - major=major, - minor=minor, - patch=patch, - pre=pre, - dev=dev, - arch=arch, - name=name, + "find_all_python_versions", major, minor, patch, pre, dev, arch, name ) alternate_sub_finder = None if major and not (minor or patch or pre or dev or arch or name): alternate_sub_finder = operator.methodcaller( - "find_all_python_versions", - major=None, - name=major + "find_all_python_versions", None, None, None, None, None, None, major ) if os.name == "nt" and self.windows_finder: windows_finder_version = sub_finder(self.windows_finder) @@ -332,14 +426,15 @@ def find_all_python_versions( def find_python_version( self, - major=None, - minor=None, - patch=None, - pre=None, - dev=None, - arch=None, - name=None, + major=None, # type: Optional[Union[str, int]] + minor=None, # type: Optional[Union[str, int]] + patch=None, # type: Optional[Union[str, int]] + pre=None, # type: Optional[bool] + dev=None, # type: Optional[bool] + arch=None, # type: Optional[str] + name=None, # type: Optional[str] ): + # type: (...) -> PathEntry """Search for a specific python version on the path. :param major: Major python version to search for. @@ -356,33 +451,31 @@ def find_python_version( if isinstance(major, six.string_types) and not minor and not patch: # Only proceed if this is in the format "x.y.z" or similar - if major.count(".") > 0 and major[0].isdigit(): + if major.isdigit() or (major.count(".") > 0 and major[0].isdigit()): version = major.split(".", 2) - if len(version) > 3: - major, minor, patch, rest = version - elif len(version) == 3: - major, minor, patch = version + if isinstance(version, (tuple, list)): + if len(version) > 3: + major, minor, patch, rest = version + elif len(version) == 3: + major, minor, patch = version + elif len(version) == 2: + major, minor = version + else: + major = major[0] else: - major, minor = version + major = major + name = None else: name = "{0!s}".format(major) major = None sub_finder = operator.methodcaller( "find_python_version", - major, - minor=minor, - patch=patch, - pre=pre, - dev=dev, - arch=arch, - name=name, + major, minor, patch, pre, dev, arch, name, ) alternate_sub_finder = None - if major and not (minor or patch or pre or dev or arch or name): + if name and not (minor or patch or pre or dev or arch or major): alternate_sub_finder = operator.methodcaller( - "find_all_python_versions", - major=None, - name=major + "find_all_python_versions", None, None, None, None, None, None, name ) if major and minor and patch: _tuple_pre = pre if pre is not None else False @@ -406,12 +499,13 @@ def find_python_version( @classmethod def create( cls, - path=None, - system=False, - only_python=False, - global_search=True, - ignore_unsupported=True, + path=None, # type: str + system=False, # type: bool + only_python=False, # type: bool + global_search=True, # type: bool + ignore_unsupported=True, # type: bool ): + # type: (...) -> SystemPath """Create a new :class:`pythonfinder.models.SystemPath` instance. :param path: Search path to prepend when searching, defaults to None @@ -423,14 +517,16 @@ def create( :rtype: :class:`pythonfinder.models.SystemPath` """ - path_entries = defaultdict(PathEntry) - paths = [] + path_entries = defaultdict(PathEntry) # type: DefaultDict[str, Union[PythonFinder, PathEntry]] + paths = [] # type: List[str] if ignore_unsupported: os.environ["PYTHONFINDER_IGNORE_UNSUPPORTED"] = fs_str("1") if global_search: - paths = os.environ.get("PATH").split(os.pathsep) + if "PATH" in os.environ: + paths = os.environ["PATH"].split(os.pathsep) if path: paths = [path] + paths + paths = [p for p in paths if not any(is_in_path(p, shim) for shim in SHIM_PATHS)] _path_objects = [ensure_path(p.strip('"')) for p in paths] paths = [p.as_posix() for p in _path_objects] path_entries.update( @@ -439,7 +535,6 @@ def create( path=p.absolute(), is_root=True, only_python=only_python ) for p in _path_objects - if not any(shim in normalize_path(str(p)) for shim in SHIM_PATHS) } ) return cls( @@ -454,18 +549,15 @@ def create( @attr.s(slots=True) class PathEntry(BasePath): - path = attr.ib(default=None, validator=optional_instance_of(Path)) - _children = attr.ib(default=attr.Factory(dict)) - is_root = attr.ib(default=True) - only_python = attr.ib(default=False) - name = attr.ib() - py_version = attr.ib() - _pythons = attr.ib(default=attr.Factory(defaultdict)) + is_root = attr.ib(default=True, type=bool) - def __str__(self): - return fs_str("{0}".format(self.path.as_posix())) + def __del__(self): + if "_children" in self.__dict__: + del self.__dict__["_children"] + BasePath.__del__(self) def _filter_children(self): + # type: () -> Iterator[Path] if self.only_python: children = filter_pythons(self.path) else: @@ -473,86 +565,47 @@ def _filter_children(self): return children def _gen_children(self): + # type: () -> Iterator + from ..environment import get_shim_paths + shim_paths = get_shim_paths() pass_name = self.name != self.path.name pass_args = {"is_root": False, "only_python": self.only_python} if pass_name: - pass_args["name"] = self.name + if self.name is not None and isinstance(self.name, six.string_types): + pass_args["name"] = self.name # type: ignore + elif self.path is not None and isinstance(self.path.name, six.string_types): + pass_args["name"] = self.path.name # type: ignore if not self.is_dir: - yield (self.path.as_posix(), copy.deepcopy(self)) + yield (self.path.as_posix(), self) elif self.is_root: for child in self._filter_children(): - if any(shim in normalize_path(str(child)) for shim in SHIM_PATHS): + if any(is_in_path(str(child), shim) for shim in shim_paths): continue if self.only_python: try: - entry = PathEntry.create(path=child, **pass_args) + entry = PathEntry.create(path=child, **pass_args) # type: ignore except (InvalidPythonVersion, ValueError): continue else: - entry = PathEntry.create(path=child, **pass_args) + entry = PathEntry.create(path=child, **pass_args) # type: ignore yield (child.as_posix(), entry) return @cached_property def children(self): - if not self._children: - children = {} + # type: () -> Dict[str, PathEntry] + children = getattr(self, "_children", {}) # type: Dict[str, PathEntry] + if not children: for child_key, child_val in self._gen_children(): children[child_key] = child_val self._children = children return self._children - @name.default - def get_name(self): - return self.path.name - - @py_version.default - def get_py_version(self): - from ..environment import IGNORE_UNSUPPORTED - if self.is_dir: - return None - if self.is_python: - py_version = None - try: - py_version = PythonVersion.from_path(path=self, name=self.name) - except (InvalidPythonVersion, ValueError): - py_version = None - except Exception: - if not IGNORE_UNSUPPORTED: - raise - return py_version - return - - @property - def pythons(self): - if not self._pythons: - if self.is_dir: - for path, entry in self.children.items(): - _path = ensure_path(entry.path) - if entry.is_python: - self._pythons[_path.as_posix()] = entry - else: - if self.is_python: - _path = ensure_path(self.path) - self._pythons[_path.as_posix()] = self - return self._pythons - - @cached_property - def as_python(self): - py_version = None - if self.py_version: - return self.py_version - if not self.is_dir and self.is_python: - try: - from .python import PythonVersion - py_version = PythonVersion.from_path(path=attr.evolve(self), name=self.name) - except (ValueError, InvalidPythonVersion): - py_version = None - return py_version @classmethod def create(cls, path, is_root=False, only_python=False, pythons=None, name=None): + # type: (Union[str, Path], bool, bool, Dict[str, PythonVersion], Optional[str]) -> PathEntry """Helper method for creating new :class:`pythonfinder.models.PathEntry` instances. :param str path: Path to the specified location. @@ -580,12 +633,12 @@ def create(cls, path, is_root=False, only_python=False, pythons=None, name=None) "only_python": only_python } if not guessed_name: - child_creation_args["name"] = name + child_creation_args["name"] = _new.name # type: ignore for pth, python in pythons.items(): if any(shim in normalize_path(str(pth)) for shim in SHIM_PATHS): continue pth = ensure_path(pth) - children[pth.as_posix()] = PathEntry( + children[pth.as_posix()] = PathEntry( # type: ignore py_version=python, path=pth, **child_creation_args @@ -593,29 +646,11 @@ def create(cls, path, is_root=False, only_python=False, pythons=None, name=None) _new._children = children return _new - @cached_property - def is_dir(self): - try: - ret_val = self.path.is_dir() - except OSError: - ret_val = False - return ret_val - - @cached_property - def is_executable(self): - return path_is_known_executable(self.path) - - @cached_property - def is_python(self): - return self.is_executable and ( - looks_like_python(self.path.name) - ) - @attr.s class VersionPath(SystemPath): - base = attr.ib(default=None, validator=optional_instance_of(Path)) - name = attr.ib(default=None) + base = attr.ib(default=None, validator=optional_instance_of(Path)) # type: Path + name = attr.ib(default=None) # type: str @classmethod def create(cls, path, only_python=True, pythons=None, name=None): diff --git a/pipenv/vendor/pythonfinder/models/python.py b/pipenv/vendor/pythonfinder/models/python.py index 4fcbbca643..8900660e6d 100644 --- a/pipenv/vendor/pythonfinder/models/python.py +++ b/pipenv/vendor/pythonfinder/models/python.py @@ -2,53 +2,71 @@ from __future__ import absolute_import, print_function import copy -import platform -import operator import logging +import operator +import platform +import sys from collections import defaultdict import attr +import six -from packaging.version import Version, LegacyVersion -from packaging.version import parse as parse_version -from vistir.compat import Path +from packaging.version import Version +from vistir.compat import Path, lru_cache -from ..environment import SYSTEM_ARCH, PYENV_ROOT, ASDF_DATA_DIR +from ..environment import ASDF_DATA_DIR, MYPY_RUNNING, PYENV_ROOT, SYSTEM_ARCH from ..exceptions import InvalidPythonVersion -from .mixins import BaseFinder, BasePath from ..utils import ( - _filter_none, - ensure_path, - get_python_version, - optional_instance_of, - unnest, - is_in_path, - parse_pyenv_version_order, - parse_asdf_version_order, - parse_python_version, + RE_MATCHER, _filter_none, ensure_path, get_python_version, is_in_path, + looks_like_python, optional_instance_of, parse_asdf_version_order, + parse_pyenv_version_order, parse_python_version, unnest ) +from .mixins import BaseFinder, BasePath + + +if MYPY_RUNNING: + from typing import ( + DefaultDict, Optional, Callable, Generator, Any, Union, Tuple, List, Dict, Type, + TypeVar, Iterator + ) + from .path import PathEntry + from .._vendor.pep514tools.environment import Environment + logger = logging.getLogger(__name__) @attr.s(slots=True) class PythonFinder(BaseFinder, BasePath): - root = attr.ib(default=None, validator=optional_instance_of(Path)) - #: ignore_unsupported should come before versions, because its value is used - #: in versions's default initializer. - ignore_unsupported = attr.ib(default=True) - #: The function to use to sort version order when returning an ordered verion set - sort_function = attr.ib(default=None) - paths = attr.ib(default=attr.Factory(list)) - roots = attr.ib(default=attr.Factory(defaultdict)) + root = attr.ib(default=None, validator=optional_instance_of(Path), type=Path) + # should come before versions, because its value is used in versions's default initializer. + #: Whether to ignore any paths which raise exceptions and are not actually python + ignore_unsupported = attr.ib(default=True, type=bool) #: Glob path for python versions off of the root directory - version_glob_path = attr.ib(default="versions/*") - versions = attr.ib() - pythons = attr.ib() + version_glob_path = attr.ib(default="versions/*", type=str) + #: The function to use to sort version order when returning an ordered verion set + sort_function = attr.ib(default=None) # type: Callable + #: The root locations used for discovery + roots = attr.ib(default=attr.Factory(defaultdict), type=defaultdict) + #: List of paths discovered during search + paths = attr.ib(type=list) + #: shim directory + shim_dir = attr.ib(default="shims", type=str) + #: Versions discovered in the specified paths + _versions = attr.ib(default=attr.Factory(defaultdict), type=defaultdict) + _pythons = attr.ib(default=attr.Factory(defaultdict), type=defaultdict) + + def __del__(self): + # type: () -> None + self._versions = defaultdict() + self._pythons = defaultdict() + self.roots = defaultdict() + self.paths = [] @property def expanded_paths(self): + # type: () -> Generator return ( path for path in unnest(p for p in self.versions.values()) if path is not None @@ -56,18 +74,22 @@ def expanded_paths(self): @property def is_pyenv(self): + # type: () -> bool return is_in_path(str(self.root), PYENV_ROOT) @property def is_asdf(self): + # type: () -> bool return is_in_path(str(self.root), ASDF_DATA_DIR) def get_version_order(self): + # type: () -> List[Path] version_paths = [ p for p in self.root.glob(self.version_glob_path) if not (p.parent.name == "envs" or p.name == "envs") ] versions = {v.name: v for v in version_paths} + version_order = [] # type: List[Path] if self.is_pyenv: version_order = [versions[v] for v in parse_pyenv_version_order() if v in versions] elif self.is_asdf: @@ -80,91 +102,129 @@ def get_version_order(self): version_order = version_paths return version_order + def get_bin_dir(self, base): + # type: (Union[Path, str]) -> Path + if isinstance(base, six.string_types): + base = Path(base) + return base / "bin" + @classmethod - def version_from_bin_dir(cls, base_dir, name=None): - from .path import PathEntry + def version_from_bin_dir(cls, entry): + # type: (PathEntry) -> Optional[PathEntry] py_version = None - version_path = PathEntry.create( - path=base_dir.absolute().as_posix(), - only_python=True, - name=base_dir.parent.name, - ) - py_version = next(iter(version_path.find_all_python_versions()), None) + py_version = next(iter(entry.find_all_python_versions()), None) return py_version - @versions.default - def get_versions(self): + def _iter_version_bases(self): + # type: () -> Iterator[Tuple[Path, PathEntry]] from .path import PathEntry - versions = defaultdict() - bin_ = "{base}/bin" for p in self.get_version_order(): - bin_dir = Path(bin_.format(base=p.as_posix())) - version_path = None - if bin_dir.exists(): - version_path = PathEntry.create( - path=bin_dir.absolute().as_posix(), - only_python=False, - name=p.name, - is_root=True, + bin_dir = self.get_bin_dir(p) + if bin_dir.exists() and bin_dir.is_dir(): + entry = PathEntry.create( + path=bin_dir.absolute(), only_python=False, name=p.name, + is_root=True ) + self.roots[p] = entry + yield (p, entry) + + def _iter_versions(self): + # type: () -> Iterator[Tuple[Path, PathEntry, Tuple]] + for base_path, entry in self._iter_version_bases(): version = None + version_entry = None try: - version = PythonVersion.parse(p.name) + version = PythonVersion.parse(entry.name) except (ValueError, InvalidPythonVersion): - entry = next(iter(version_path.find_all_python_versions()), None) - if not entry: - if self.ignore_unsupported: - continue - raise - else: - version = entry.py_version.as_dict() + version_entry = next(iter(entry.find_all_python_versions()), None) + if version is None: + if not self.ignore_unsupported: + raise + continue + if version_entry is not None: + version = version_entry.py_version.as_dict() except Exception: if not self.ignore_unsupported: raise - logger.warning( - "Unsupported Python version %r, ignoring...", p.name, exc_info=True - ) + logger.warning("Unsupported Python version %r, ignoring...", + base_path.name, exc_info=True) continue - if not version: - continue - version_tuple = ( - version.get("major"), - version.get("minor"), - version.get("patch"), - version.get("is_prerelease"), - version.get("is_devrelease"), - version.get("is_debug"), - ) - self.roots[p] = version_path - versions[version_tuple] = version_path - self.paths.append(version_path) - return versions + if version is not None: + version_tuple = ( + version.get("major"), + version.get("minor"), + version.get("patch"), + version.get("is_prerelease"), + version.get("is_devrelease"), + version.get("is_debug"), + ) + yield (base_path, entry, version_tuple) + + @property + def versions(self): + # type: () -> DefaultDict[Tuple, PathEntry] + if not self._versions: + for base_path, entry, version_tuple in self._iter_versions(): + self._versions[version_tuple] = entry + return self._versions + + def _iter_pythons(self): + # type: () -> Iterator + for path, entry, version_tuple in self._iter_versions(): + if path.as_posix() in self._pythons: + yield self._pythons[path.as_posix()] + elif version_tuple not in self.versions: + for python in entry.find_all_python_versions(): + yield python + else: + yield self.versions[version_tuple] + + @paths.default + def get_paths(self): + # type: () -> List[PathEntry] + _paths = [base for _, base in self._iter_version_bases()] + return _paths + + @property + def pythons(self): + # type: () -> DefaultDict[str, PathEntry] + if not self._pythons: + from .path import PathEntry + self._pythons = defaultdict(PathEntry) # type: DefaultDict[str, PathEntry] + for python in self._iter_pythons(): + python_path = python.path.as_posix() # type: ignore + self._pythons[python_path] = python + return self._pythons + + @pythons.setter + def pythons(self, value): + # type: (DefaultDict[str, PathEntry]) -> None + self._pythons = value - @pythons.default def get_pythons(self): - pythons = defaultdict() - for p in self.paths: - pythons.update(p.pythons) - return pythons + # type: () -> DefaultDict[str, PathEntry] + return self.pythons @classmethod - def create(cls, root, sort_function=None, version_glob_path=None, ignore_unsupported=True): + def create(cls, root, sort_function, version_glob_path=None, ignore_unsupported=True): # type: ignore + # type: (Type[PythonFinder], str, Callable, Optional[str], bool) -> PythonFinder root = ensure_path(root) if not version_glob_path: version_glob_path = "versions/*" - return cls(root=root, ignore_unsupported=ignore_unsupported, + return cls(root=root, path=root, ignore_unsupported=ignore_unsupported, # type: ignore sort_function=sort_function, version_glob_path=version_glob_path) def find_all_python_versions( self, - major=None, - minor=None, - patch=None, - pre=None, - dev=None, - arch=None, - name=None, + major=None, # type: Optional[Union[str, int]] + minor=None, # type: Optional[int] + patch=None, # type: Optional[int] + pre=None, # type: Optional[bool] + dev=None, # type: Optional[bool] + arch=None, # type: Optional[str] + name=None, # type: Optional[str] ): + # type: (...) -> List[PathEntry] """Search for a specific python version on the path. Return all copies :param major: Major python version to search for. @@ -179,36 +239,40 @@ def find_all_python_versions( :rtype: List[:class:`~pythonfinder.models.PathEntry`] """ - version_matcher = operator.methodcaller( - "matches", - major=major, - minor=minor, - patch=patch, - pre=pre, - dev=dev, - arch=arch, - name=name, + call_method = ( + "find_all_python_versions" if self.is_dir else "find_python_version" ) - py = operator.attrgetter("as_python") - pythons = ( - py_ver for py_ver in (py(p) for p in self.pythons.values() if p is not None) - if py_ver is not None + sub_finder = operator.methodcaller( + call_method, major, minor, patch, pre, dev, arch, name ) - # pythons = filter(None, [p.as_python for p in self.pythons.values()]) - matching_versions = filter(lambda py: version_matcher(py), pythons) - version_sort = operator.attrgetter("version_sort") - return sorted(matching_versions, key=version_sort, reverse=True) + if not any([major, minor, patch, name]): + pythons = [ + next(iter(py for py in base.find_all_python_versions()), None) + for _, base in self._iter_version_bases() + ] + else: + pythons = [ + sub_finder(path) for path in self.paths + ] + pythons = [p for p in pythons if p and p.is_python and p.as_python is not None] + version_sort = operator.attrgetter("as_python.version_sort") + paths = [ + p for p in sorted(list(pythons), key=version_sort, reverse=True) + if p is not None + ] + return paths def find_python_version( self, - major=None, - minor=None, - patch=None, - pre=None, - dev=None, - arch=None, - name=None, + major=None, # type: Optional[Union[str, int]] + minor=None, # type: Optional[int] + patch=None, # type: Optional[int] + pre=None, # type: Optional[bool] + dev=None, # type: Optional[bool] + arch=None, # type: Optional[str] + name=None, # type: Optional[str] ): + # type: (...) -> Optional[PathEntry] """Search or self for the specified Python version and return the first match. :param major: Major version number. @@ -222,40 +286,74 @@ def find_python_version( :returns: A :class:`~pythonfinder.models.PathEntry` instance matching the version requested. """ - version_matcher = operator.methodcaller( - "matches", - major=major, - minor=minor, - patch=patch, - pre=pre, - dev=dev, - arch=arch, - name=name, + sub_finder = operator.methodcaller( + "find_python_version", major, minor, patch, pre, dev, arch, name ) - pythons = filter(None, [p.as_python for p in self.pythons.values()]) - matching_versions = filter(lambda py: version_matcher(py), pythons) - version_sort = operator.attrgetter("version_sort") - return next(iter(c for c in sorted(matching_versions, key=version_sort, reverse=True)), None) + version_sort = operator.attrgetter("as_python.version_sort") + unnested = [sub_finder(self.roots[path]) for path in self.roots] + unnested = [ + p for p in unnested + if p is not None and p.is_python and p.as_python is not None + ] + paths = sorted(list(unnested), key=version_sort, reverse=True) + return next(iter(p for p in paths if p is not None), None) + + def which(self, name): + # type: (str) -> Optional[PathEntry] + """Search in this path for an executable. + + :param executable: The name of an executable to search for. + :type executable: str + :returns: :class:`~pythonfinder.models.PathEntry` instance. + """ + + matches = (p.which(name) for p in self.paths) + non_empty_match = next(iter(m for m in matches if m is not None), None) + return non_empty_match @attr.s(slots=True) class PythonVersion(object): - major = attr.ib(default=0) - minor = attr.ib(default=None) - patch = attr.ib(default=0) - is_prerelease = attr.ib(default=False) - is_postrelease = attr.ib(default=False) - is_devrelease = attr.ib(default=False) - is_debug = attr.ib(default=False) - version = attr.ib(default=None) - architecture = attr.ib(default=None) - comes_from = attr.ib(default=None) - executable = attr.ib(default=None) - name = attr.ib(default=None) + major = attr.ib(default=0, type=int) + minor = attr.ib(default=None) # type: Optional[int] + patch = attr.ib(default=None) # type: Optional[int] + is_prerelease = attr.ib(default=False, type=bool) + is_postrelease = attr.ib(default=False, type=bool) + is_devrelease = attr.ib(default=False, type=bool) + is_debug = attr.ib(default=False, type=bool) + version = attr.ib(default=None) # type: Version + architecture = attr.ib(default=None) # type: Optional[str] + comes_from = attr.ib(default=None) # type: Optional[PathEntry] + executable = attr.ib(default=None) # type: Optional[str] + name = attr.ib(default=None, type=str) + + def __getattribute__(self, key): + result = super(PythonVersion, self).__getattribute__(key) + if key in ["minor", "patch"] and result is None: + executable = None # type: Optional[str] + if self.executable: + executable = self.executable + elif self.comes_from: + executable = self.comes_from.path.as_posix() + if executable is not None: + if not isinstance(executable, six.string_types): + executable = executable.as_posix() + instance_dict = self.parse_executable(executable) + for k in instance_dict.keys(): + try: + super(PythonVersion, self).__getattribute__(k) + except AttributeError: + continue + else: + setattr(self, k, instance_dict[k]) + result = instance_dict.get(key) + return result @property def version_sort(self): - """version_sort tuple for sorting against other instances of the same class. + # type: () -> Tuple[Optional[int], Optional[int], int, int] + """ + A tuple for sorting against other instances of the same class. Returns a tuple of the python version but includes a point for non-dev, and a point for non-prerelease versions. So released versions will have 2 points @@ -275,7 +373,9 @@ def version_sort(self): @property def version_tuple(self): - """Provides a version tuple for using as a dictionary key. + # type: () -> Tuple[int, Optional[int], Optional[int], bool, bool, bool] + """ + Provides a version tuple for using as a dictionary key. :return: A tuple describing the python version meetadata contained. :rtype: tuple @@ -292,45 +392,52 @@ def version_tuple(self): def matches( self, - major=None, - minor=None, - patch=None, - pre=False, - dev=False, - arch=None, - debug=False, - name=None, + major=None, # type: Optional[int] + minor=None, # type: Optional[int] + patch=None, # type: Optional[int] + pre=False, # type: bool + dev=False, # type: bool + arch=None, # type: Optional[str] + debug=False, # type: bool + python_name=None, # type: Optional[str] ): + # type: (...) -> bool + result = False if arch: own_arch = self.get_architecture() if arch.isdigit(): arch = "{0}bit".format(arch) - return ( - (major is None or self.major == major) - and (minor is None or self.minor == minor) - and (patch is None or self.patch == patch) + if ( + (major is None or self.major and self.major == major) + and (minor is None or self.minor and self.minor == minor) + and (patch is None or self.patch and self.patch == patch) and (pre is None or self.is_prerelease == pre) and (dev is None or self.is_devrelease == dev) and (arch is None or own_arch == arch) and (debug is None or self.is_debug == debug) and ( - name is None - or (name and self.name) - and (self.name == name or self.name.startswith(name)) + python_name is None + or (python_name and self.name) + and (self.name == python_name or self.name.startswith(python_name)) ) - ) + ): + result = True + return result def as_major(self): + # type: () -> PythonVersion self_dict = attr.asdict(self, recurse=False, filter=_filter_none).copy() self_dict.update({"minor": None, "patch": None}) return self.create(**self_dict) def as_minor(self): + # type: () -> PythonVersion self_dict = attr.asdict(self, recurse=False, filter=_filter_none).copy() self_dict.update({"patch": None}) return self.create(**self_dict) def as_dict(self): + # type: () -> Dict[str, Union[int, bool, Version, None]] return { "major": self.major, "minor": self.minor, @@ -342,35 +449,67 @@ def as_dict(self): "version": self.version, } + def update_metadata(self, metadata): + # type: (Dict[str, Union[str, int, Version]]) -> None + """ + Update the metadata on the current :class:`pythonfinder.models.python.PythonVersion` + + Given a parsed version dictionary from :func:`pythonfinder.utils.parse_python_version`, + update the instance variables of the current version instance to reflect the newly + supplied values. + """ + + for key in metadata: + try: + current_value = getattr(self, key) + except AttributeError: + continue + else: + setattr(self, key, metadata[key]) + @classmethod + @lru_cache(maxsize=1024) def parse(cls, version): - """Parse a valid version string into a dictionary + # type: (str) -> Dict[str, Union[str, int, Version]] + """ + Parse a valid version string into a dictionary Raises: ValueError -- Unable to parse version string ValueError -- Not a valid python version + TypeError -- NoneType or unparseable type passed in - :param version: A valid version string - :type version: str + :param str version: A valid version string :return: A dictionary with metadata about the specified python version. - :rtype: dict. + :rtype: dict """ + if version is None: + raise TypeError("Must pass a value to parse!") version_dict = parse_python_version(str(version)) if not version_dict: raise ValueError("Not a valid python version: %r" % version) return version_dict def get_architecture(self): + # type: () -> str if self.architecture: return self.architecture - arch, _ = platform.architecture(self.comes_from.path.as_posix()) + arch = None + if self.comes_from is not None: + arch, _ = platform.architecture(self.comes_from.path.as_posix()) + elif self.executable is not None: + arch, _ = platform.architecture(self.executable) + if arch is None: + arch, _ = platform.architecture(sys.executable) self.architecture = arch return self.architecture @classmethod def from_path(cls, path, name=None, ignore_unsupported=True): - """Parses a python version from a system path. + # type: (Union[str, PathEntry], Optional[str], bool) -> PythonVersion + """ + Parses a python version from a system path. Raises: ValueError -- Not a valid python path @@ -389,22 +528,51 @@ def from_path(cls, path, name=None, ignore_unsupported=True): path = PathEntry.create(path, is_root=False, only_python=True, name=name) from ..environment import IGNORE_UNSUPPORTED ignore_unsupported = ignore_unsupported or IGNORE_UNSUPPORTED + path_name = getattr(path, "name", path.path.name) # str if not path.is_python: if not (ignore_unsupported or IGNORE_UNSUPPORTED): raise ValueError("Not a valid python path: %s" % path.path) - py_version = get_python_version(path.path.absolute().as_posix()) - instance_dict = cls.parse(py_version.strip()) + try: + instance_dict = cls.parse(path_name) + except Exception: + instance_dict = cls.parse_executable(path.path.absolute().as_posix()) + else: + if instance_dict.get("minor") is None and looks_like_python(path.path.name): + instance_dict = cls.parse_executable(path.path.absolute().as_posix()) + if not isinstance(instance_dict.get("version"), Version) and not ignore_unsupported: - raise ValueError("Not a valid python path: %s" % path.path) - if not name: - name = path.name + raise ValueError("Not a valid python path: %s" % path) + if instance_dict.get("patch") is None: + instance_dict = cls.parse_executable(path.path.absolute().as_posix()) + if name is None: + name = path_name instance_dict.update( - {"comes_from": path, "name": name} + {"comes_from": path, "name": name, "executable": path.path.as_posix()} ) - return cls(**instance_dict) + return cls(**instance_dict) # type: ignore + + @classmethod + @lru_cache(maxsize=1024) + def parse_executable(cls, path): + # type: (str) -> Dict[str, Optional[Union[str, int, Version]]] + result_dict = {} # type: Dict[str, Optional[Union[str, int, Version]]] + result_version = None # type: Optional[str] + if path is None: + raise TypeError("Must pass a valid path to parse.") + if not isinstance(path, six.string_types): + path = path.as_posix() + try: + result_version = get_python_version(path) + except Exception: + raise ValueError("Not a valid python path: %r" % path) + if result_version is None: + raise ValueError("Not a valid python path: %s" % path) + result_dict = cls.parse(result_version.strip()) + return result_dict @classmethod def from_windows_launcher(cls, launcher_entry, name=None): + # type: (Environment, Optional[str]) -> PythonVersion """Create a new PythonVersion instance from a Windows Launcher Entry :param launcher_entry: A python launcher environment object. @@ -433,13 +601,13 @@ def from_windows_launcher(cls, launcher_entry, name=None): ) py_version = cls.create(**creation_dict) comes_from = PathEntry.create(exe_path, only_python=True, name=name) - comes_from.py_version = copy.deepcopy(py_version) py_version.comes_from = comes_from py_version.name = comes_from.name return py_version @classmethod def create(cls, **kwargs): + # type: (...) -> PythonVersion if "architecture" in kwargs: if kwargs["architecture"].isdigit(): kwargs["architecture"] = "{0}bit".format(kwargs["architecture"]) @@ -448,10 +616,11 @@ def create(cls, **kwargs): @attr.s class VersionMap(object): - versions = attr.ib(default=attr.Factory(defaultdict(list))) + versions = attr.ib(factory=defaultdict) # type: DefaultDict[Tuple[int, Optional[int], Optional[int], bool, bool, bool], List[PathEntry]] def add_entry(self, entry): - version = entry.as_python + # type: (...) -> None + version = entry.as_python # type: PythonVersion if version: entries = self.versions[version.version_tuple] paths = {p.path for p in self.versions.get(version.version_tuple, [])} @@ -459,13 +628,18 @@ def add_entry(self, entry): self.versions[version.version_tuple].append(entry) def merge(self, target): + # type: (VersionMap) -> None for version, entries in target.versions.items(): if version not in self.versions: self.versions[version] = entries else: - current_entries = {p.path for p in self.versions.get(version)} + current_entries = { + p.path for p in + self.versions[version] # type: ignore + if version in self.versions + } new_entries = {p.path for p in entries} new_entries -= current_entries - self.versions[version].append( + self.versions[version].extend( [e for e in entries if e.path in new_entries] ) diff --git a/pipenv/vendor/pythonfinder/models/windows.py b/pipenv/vendor/pythonfinder/models/windows.py index f985630fb3..5d446339a8 100644 --- a/pipenv/vendor/pythonfinder/models/windows.py +++ b/pipenv/vendor/pythonfinder/models/windows.py @@ -7,6 +7,7 @@ import attr +from ..environment import MYPY_RUNNING from ..exceptions import InvalidPythonVersion from ..utils import ensure_path from .mixins import BaseFinder @@ -14,25 +15,50 @@ from .python import PythonVersion, VersionMap +if MYPY_RUNNING: + from typing import DefaultDict, Tuple, List, Optional, Union, TypeVar, Type, Any + FinderType = TypeVar('FinderType') + + @attr.s class WindowsFinder(BaseFinder): - paths = attr.ib(default=attr.Factory(list)) - version_list = attr.ib(default=attr.Factory(list)) - versions = attr.ib() - pythons = attr.ib() + paths = attr.ib(default=attr.Factory(list), type=list) + version_list = attr.ib(default=attr.Factory(list), type=list) + _versions = attr.ib() # type: DefaultDict[Tuple, PathEntry] + _pythons = attr.ib() # type: DefaultDict[str, PathEntry] def find_all_python_versions( self, - major=None, - minor=None, - patch=None, - pre=None, - dev=None, - arch=None, - name=None, + major=None, # type: Optional[Union[str, int]] + minor=None, # type: Optional[int] + patch=None, # type: Optional[int] + pre=None, # type: Optional[bool] + dev=None, # type: Optional[bool] + arch=None, # type: Optional[str] + name=None, # type: Optional[str] ): + # type (...) -> List[PathEntry] version_matcher = operator.methodcaller( - "matches", + "matches", major, minor, patch, pre, dev, arch, python_name=name + ) + pythons = [ + py for py in self.version_list if version_matcher(py) + ] + version_sort = operator.attrgetter("version_sort") + return [c.comes_from for c in sorted(pythons, key=version_sort, reverse=True)] + + def find_python_version( + self, + major=None, # type: Optional[Union[str, int]] + minor=None, # type: Optional[int] + patch=None, # type: Optional[int] + pre=None, # type: Optional[bool] + dev=None, # type: Optional[bool] + arch=None, # type: Optional[str] + name=None, # type: Optional[str] + ): + # type: (...) -> Optional[PathEntry] + return next(iter(v for v in self.find_all_python_versions( major=major, minor=minor, patch=patch, @@ -40,42 +66,13 @@ def find_all_python_versions( dev=dev, arch=arch, name=name, + )), None ) - py_filter = filter( - None, filter(lambda c: version_matcher(c), self.version_list) - ) - version_sort = operator.attrgetter("version_sort") - return [c.comes_from for c in sorted(py_filter, key=version_sort, reverse=True)] - def find_python_version( - self, - major=None, - minor=None, - patch=None, - pre=None, - dev=None, - arch=None, - name=None, - ): - return next( - ( - v - for v in self.find_all_python_versions( - major=major, - minor=minor, - patch=patch, - pre=pre, - dev=dev, - arch=arch, - name=None, - ) - ), - None, - ) - - @versions.default + @_versions.default def get_versions(self): - versions = defaultdict(PathEntry) + # type: () -> DefaultDict[Tuple, PathEntry] + versions = defaultdict(PathEntry) # type: DefaultDict[Tuple, PathEntry] from pythonfinder._vendor.pep514tools import environment as pep514env env_versions = pep514env.findall() @@ -92,25 +89,48 @@ def get_versions(self): py_version = PythonVersion.from_windows_launcher(version_object) except InvalidPythonVersion: continue + if py_version is None: + continue self.version_list.append(py_version) + python_path = py_version.comes_from.path if py_version.comes_from else py_version.executable + python_kwargs = {python_path: py_version} if python_path is not None else {} base_dir = PathEntry.create( path, is_root=True, only_python=True, - pythons={py_version.comes_from.path: py_version}, + pythons=python_kwargs, ) versions[py_version.version_tuple[:5]] = base_dir self.paths.append(base_dir) return versions - @pythons.default + @property + def versions(self): + # type: () -> DefaultDict[Tuple, PathEntry] + if not self._versions: + self._versions = self.get_versions() + return self._versions + + @_pythons.default def get_pythons(self): - pythons = defaultdict() + # type: () -> DefaultDict[str, PathEntry] + pythons = defaultdict() # type: DefaultDict[str, PathEntry] for version in self.version_list: _path = ensure_path(version.comes_from.path) pythons[_path.as_posix()] = version.comes_from return pythons + @property + def pythons(self): + # type: () -> DefaultDict[str, PathEntry] + return self._pythons + + @pythons.setter + def pythons(self, value): + # type: (DefaultDict[str, PathEntry]) -> None + self._pythons = value + @classmethod - def create(cls): + def create(cls, *args, **kwargs): + # type: (Type[FinderType], Any, Any) -> FinderType return cls() diff --git a/pipenv/vendor/pythonfinder/pythonfinder.py b/pipenv/vendor/pythonfinder/pythonfinder.py index 011754eafc..63f63c74fa 100644 --- a/pipenv/vendor/pythonfinder/pythonfinder.py +++ b/pipenv/vendor/pythonfinder/pythonfinder.py @@ -1,26 +1,46 @@ # -*- coding=utf-8 -*- -from __future__ import print_function, absolute_import +from __future__ import absolute_import, print_function + +import operator import os + import six -import operator -from .models import SystemPath + +from click import secho from vistir.compat import lru_cache +from . import environment +from .exceptions import InvalidPythonVersion +from .models import path +from .utils import Iterable, filter_pythons, version_re + + +if environment.MYPY_RUNNING: + from typing import Optional, Dict, Any, Union, List, Iterator + from .models.path import Path, PathEntry + from .models.windows import WindowsFinder + from .models.path import SystemPath + class Finder(object): - def __init__(self, path=None, system=False, global_search=True, ignore_unsupported=True): - """ - Finder A cross-platform Finder for locating python and other executables. - Searches for python and other specified binaries starting in `path`, if supplied, - but searching the bin path of `sys.executable` if `system=True`, and then - searching in the `os.environ['PATH']` if `global_search=True`. When `global_search` - is `False`, this search operation is restricted to the allowed locations of - `path` and `system`. + """ + A cross-platform Finder for locating python and other executables. + + Searches for python and other specified binaries starting in *path*, if supplied, + but searching the bin path of ``sys.executable`` if *system* is ``True``, and then + searching in the ``os.environ['PATH']`` if *global_search* is ``True``. When *global_search* + is ``False``, this search operation is restricted to the allowed locations of + *path* and *system*. + """ + + def __init__(self, path=None, system=False, global_search=True, ignore_unsupported=True): + # type: (Optional[str], bool, bool, bool) -> None + """Create a new :class:`~pythonfinder.pythonfinder.Finder` instance. :param path: A bin-directory search location, defaults to None :param path: str, optional - :param system: Whether to include the bin-dir of `sys.executable`, defaults to False + :param system: Whether to include the bin-dir of ``sys.executable``, defaults to False :param system: bool, optional :param global_search: Whether to search the global path from os.environ, defaults to True :param global_search: bool, optional @@ -29,34 +49,63 @@ def __init__(self, path=None, system=False, global_search=True, ignore_unsupport :returns: a :class:`~pythonfinder.pythonfinder.Finder` object. """ - self.path_prepend = path - self.global_search = global_search - self.system = system - self.ignore_unsupported = ignore_unsupported - self._system_path = None - self._windows_finder = None + self.path_prepend = path # type: Optional[str] + self.global_search = global_search # type: bool + self.system = system # type: bool + self.ignore_unsupported = ignore_unsupported # type: bool + self._system_path = None # type: Optional[SystemPath] + self._windows_finder = None # type: Optional[WindowsFinder] def __hash__(self): + # type: () -> int return hash( (self.path_prepend, self.system, self.global_search, self.ignore_unsupported) ) def __eq__(self, other): + # type: (Any) -> bool return self.__hash__() == other.__hash__() + def create_system_path(self): + # type: () -> SystemPath + return path.SystemPath.create( + path=self.path_prepend, system=self.system, global_search=self.global_search, + ignore_unsupported=self.ignore_unsupported + ) + + def reload_system_path(self): + # type: () -> None + """ + Rebuilds the base system path and all of the contained finders within it. + + This will re-apply any changes to the environment or any version changes on the system. + """ + + if self._system_path is not None: + self._system_path.clear_caches() + self._system_path = None + six.moves.reload_module(path) + self._system_path = self.create_system_path() + + def rehash(self): + # type: () -> None + if not self._system_path: + self._system_path = self.create_system_path() + self.find_all_python_versions.cache_clear() + self.find_python_version.cache_clear() + self.reload_system_path() + filter_pythons.cache_clear() + @property def system_path(self): - if not self._system_path: - self._system_path = SystemPath.create( - path=self.path_prepend, - system=self.system, - global_search=self.global_search, - ignore_unsupported=self.ignore_unsupported, - ) + # type: () -> SystemPath + if self._system_path is None: + self._system_path = self.create_system_path() return self._system_path @property def windows_finder(self): + # type: () -> Optional[WindowsFinder] if os.name == "nt" and not self._windows_finder: from .models import WindowsFinder @@ -64,13 +113,36 @@ def windows_finder(self): return self._windows_finder def which(self, exe): + # type: (str) -> Optional[PathEntry] return self.system_path.which(exe) @lru_cache(maxsize=1024) def find_python_version( self, major=None, minor=None, patch=None, pre=None, dev=None, arch=None, name=None ): + # type: (Optional[Union[str, int]], Optional[int], Optional[int], Optional[bool], Optional[bool], Optional[str], Optional[str]) -> PathEntry + """ + Find the python version which corresponds most closely to the version requested. + + :param Union[str, int] major: The major version to look for, or the full version, or the name of the target version. + :param Optional[int] minor: The minor version. If provided, disables string-based lookups from the major version field. + :param Optional[int] patch: The patch version. + :param Optional[bool] pre: If provided, specifies whether to search pre-releases. + :param Optional[bool] dev: If provided, whether to search dev-releases. + :param Optional[str] arch: If provided, which architecture to search. + :param Optional[str] name: *Name* of the target python, e.g. ``anaconda3-5.3.0`` + :return: A new *PathEntry* pointer at a matching python version, if one can be located. + :rtype: :class:`pythonfinder.models.path.PathEntry` + """ + from .models import PythonVersion + minor = int(minor) if minor is not None else minor + patch = int(patch) if patch is not None else patch + + version_dict = { + "minor": minor, + "patch": patch + } # type: Dict[str, Union[str, int, Any]] if ( isinstance(major, six.string_types) @@ -79,7 +151,7 @@ def find_python_version( and dev is None and patch is None ): - if arch is None and "-" in major: + if arch is None and "-" in major and major[0].isdigit(): orig_string = "{0!s}".format(major) major, _, arch = major.rpartition("-") if arch.startswith("x"): @@ -91,20 +163,43 @@ def find_python_version( arch = None else: arch = "{0}bit".format(arch) - try: - version_dict = PythonVersion.parse(major) - except ValueError: - if name is None: - name = "{0!s}".format(major) - major = None - version_dict = {} - major = version_dict.get("major", major) - minor = version_dict.get("minor", minor) - patch = version_dict.get("patch", patch) - pre = version_dict.get("is_prerelease", pre) if pre is None else pre - dev = version_dict.get("is_devrelease", dev) if dev is None else dev - arch = version_dict.get("architecture", arch) if arch is None else arch - if os.name == "nt": + try: + version_dict = PythonVersion.parse(major) + except (ValueError, InvalidPythonVersion): + if name is None: + name = "{0!s}".format(major) + major = None + version_dict = {} + elif major[0].isalpha(): + name = "%s" % major + major = None + else: + if "." in major and all(part.isdigit() for part in major.split(".")[:2]): + match = version_re.match(major) + version_dict = match.groupdict() + version_dict["is_prerelease"] = bool(version_dict.get("prerel", False)) + version_dict["is_devrelease"] = bool(version_dict.get("dev", False)) + else: + version_dict = { + "major": major, + "minor": minor, + "patch": patch, + "pre": pre, + "dev": dev, + "arch": arch + } + if version_dict.get("minor") is not None: + minor = int(version_dict["minor"]) + if version_dict.get("patch") is not None: + patch = int(version_dict["patch"]) + if version_dict.get("major") is not None: + major = int(version_dict["major"]) + _pre = version_dict.get("is_prerelease", pre) + pre = bool(_pre) if _pre is not None else pre + _dev = version_dict.get("is_devrelease", dev) + dev = bool(_dev) if _dev is not None else dev + arch = version_dict.get("architecture", None) if arch is None else arch # type: ignore + if os.name == "nt" and self.windows_finder is not None: match = self.windows_finder.find_python_version( major=major, minor=minor, patch=patch, pre=pre, dev=dev, arch=arch, name=name ) @@ -118,28 +213,26 @@ def find_python_version( def find_all_python_versions( self, major=None, minor=None, patch=None, pre=None, dev=None, arch=None, name=None ): + # type: (Optional[Union[str, int]], Optional[int], Optional[int], Optional[bool], Optional[bool], Optional[str], Optional[str]) -> List[PathEntry] version_sort = operator.attrgetter("as_python.version_sort") python_version_dict = getattr(self.system_path, "python_version_dict") if python_version_dict: - paths = filter( - None, - [ + paths = ( path for version in python_version_dict.values() for path in version - if path.as_python - ], + if path is not None and path.as_python ) - paths = sorted(paths, key=version_sort, reverse=True) - return paths + path_list = sorted(paths, key=version_sort, reverse=True) + return path_list versions = self.system_path.find_all_python_versions( major=major, minor=minor, patch=patch, pre=pre, dev=dev, arch=arch, name=name ) - if not isinstance(versions, list): - versions = [versions] - paths = sorted(versions, key=version_sort, reverse=True) - path_map = {} - for path in paths: + if not isinstance(versions, Iterable): + versions = [versions,] + path_list = sorted(versions, key=version_sort, reverse=True) + path_map = {} # type: Dict[str, PathEntry] + for path in path_list: try: resolved_path = path.path.resolve() except OSError: diff --git a/pipenv/vendor/pythonfinder/utils.py b/pipenv/vendor/pythonfinder/utils.py index 184419194f..24ffdea096 100644 --- a/pipenv/vendor/pythonfinder/utils.py +++ b/pipenv/vendor/pythonfinder/utils.py @@ -1,34 +1,39 @@ # -*- coding=utf-8 -*- from __future__ import absolute_import, print_function +import io import itertools import os +import re from fnmatch import fnmatch import attr -import io -import re import six - import vistir from packaging.version import LegacyVersion, Version -from .environment import PYENV_ROOT, ASDF_DATA_DIR, MYPY_RUNNING +from .environment import MYPY_RUNNING, PYENV_ROOT from .exceptions import InvalidPythonVersion -six.add_move(six.MovedAttribute("Iterable", "collections", "collections.abc")) -from six.moves import Iterable + +six.add_move(six.MovedAttribute("Iterable", "collections", "collections.abc")) # type: ignore # noqa +six.add_move(six.MovedAttribute("Sequence", "collections", "collections.abc")) # type: ignore # noqa +from six.moves import Iterable # type: ignore # noqa +from six.moves import Sequence # type: ignore # noqa try: from functools import lru_cache except ImportError: - from backports.functools_lru_cache import lru_cache + from backports.functools_lru_cache import lru_cache # type: ignore # noqa if MYPY_RUNNING: - from typing import Any, Union, List, Callable, Iterable, Set, Tuple, Dict, Optional - from attr.validators import _OptionalValidator + from typing import ( + Any, Union, List, Callable, Iterable, Set, Tuple, Dict, Optional, Iterator + ) + from attr.validators import _OptionalValidator # type: ignore + from .models.path import PathEntry version_re = re.compile(r"(?P\d+)(?:\.(?P\d+))?(?:\.(?P(?<=\.)[0-9]+))?\.?" @@ -40,7 +45,12 @@ "python", "ironpython", "jython", "pypy", "anaconda", "miniconda", "stackless", "activepython", "micropython" ) -RULES_BASE = ["*{0}", "*{0}?", "*{0}?.?", "*{0}?.?m"] +RE_MATCHER = re.compile(r"(({0})(?:\d?(?:\.\d[cpm]{{0,3}}))?(?:-?[\d\.]+)*[^z])".format( + "|".join(PYTHON_IMPLEMENTATIONS) +)) +RULES_BASE = [ + "*{0}", "*{0}?", "*{0}?.?", "*{0}?.?m", "{0}?-?.?", "{0}?-?.?.?", "{0}?.?-?.?.?" +] RULES = [rule.format(impl) for impl in PYTHON_IMPLEMENTATIONS for rule in RULES_BASE] KNOWN_EXTS = {"exe", "py", "fish", "sh", ""} @@ -178,7 +188,10 @@ def looks_like_python(name): if not any(name.lower().startswith(py_name) for py_name in PYTHON_IMPLEMENTATIONS): return False - return any(fnmatch(name, rule) for rule in MATCH_RULES) + match = RE_MATCHER.match(name) + if match: + return any(fnmatch(name, rule) for rule in MATCH_RULES) + return False @lru_cache(maxsize=1024) @@ -198,7 +211,7 @@ def path_is_python(path): @lru_cache(maxsize=1024) def ensure_path(path): - # type: (Union[vistir.compat.Path, str]) -> bool + # type: (Union[vistir.compat.Path, str]) -> vistir.compat.Path """ Given a path (either a string or a Path object), expand variables and return a Path object. @@ -248,13 +261,16 @@ def unnest(item): item, target = itertools.tee(item, 2) else: target = item - for el in target: - if isinstance(el, Iterable) and not isinstance(el, six.string_types): - el, el_copy = itertools.tee(el, 2) - for sub in unnest(el_copy): - yield sub - else: - yield el + if getattr(target, "__iter__", None): + for el in target: + if isinstance(el, Iterable) and not isinstance(el, six.string_types): + el, el_copy = itertools.tee(el, 2) + for sub in unnest(el_copy): + yield sub + else: + yield el + else: + yield target def parse_pyenv_version_order(filename="version"): @@ -278,7 +294,8 @@ def parse_asdf_version_order(filename=".tool-versions"): line for line in contents.splitlines() if line.startswith("python") ), None) if python_section: - python_key, _, versions = python_section.partition(" ") + # python_key, _, versions + _, _, versions = python_section.partition(" ") if versions: return versions.split() return [] @@ -287,3 +304,35 @@ def parse_asdf_version_order(filename=".tool-versions"): # TODO: Reimplement in vistir def is_in_path(path, parent): return normalize_path(str(path)).startswith(normalize_path(str(parent))) + + +def expand_paths(path, only_python=True): + # type: (Union[Sequence, PathEntry], bool) -> Iterator + """ + Recursively expand a list or :class:`~pythonfinder.models.path.PathEntry` instance + + :param Union[Sequence, PathEntry] path: The path or list of paths to expand + :param bool only_python: Whether to filter to include only python paths, default True + :returns: An iterator over the expanded set of path entries + :rtype: Iterator[PathEntry] + """ + + if path is not None and (isinstance(path, Sequence) and + not getattr(path.__class__, "__name__", "") == "PathEntry"): + for p in unnest(path): + if p is None: + continue + for expanded in itertools.chain.from_iterable( + expand_paths(p, only_python=only_python) + ): + yield expanded + elif path is not None and path.is_dir: + for p in path.children.values(): + if p is not None and p.is_python and p.as_python is not None: + for sub_path in itertools.chain.from_iterable( + expand_paths(p, only_python=only_python) + ): + yield sub_path + else: + if path is not None and path.is_python and path.as_python is not None: + yield path diff --git a/pipenv/vendor/pytoml/__init__.py b/pipenv/vendor/pytoml/__init__.py new file mode 100644 index 0000000000..8ed060ff52 --- /dev/null +++ b/pipenv/vendor/pytoml/__init__.py @@ -0,0 +1,4 @@ +from .core import TomlError +from .parser import load, loads +from .test import translate_to_test +from .writer import dump, dumps \ No newline at end of file diff --git a/pipenv/vendor/pytoml/core.py b/pipenv/vendor/pytoml/core.py new file mode 100644 index 0000000000..c182734e1c --- /dev/null +++ b/pipenv/vendor/pytoml/core.py @@ -0,0 +1,13 @@ +class TomlError(RuntimeError): + def __init__(self, message, line, col, filename): + RuntimeError.__init__(self, message, line, col, filename) + self.message = message + self.line = line + self.col = col + self.filename = filename + + def __str__(self): + return '{}({}, {}): {}'.format(self.filename, self.line, self.col, self.message) + + def __repr__(self): + return 'TomlError({!r}, {!r}, {!r}, {!r})'.format(self.message, self.line, self.col, self.filename) diff --git a/pipenv/vendor/pytoml/parser.py b/pipenv/vendor/pytoml/parser.py new file mode 100644 index 0000000000..3493aa644c --- /dev/null +++ b/pipenv/vendor/pytoml/parser.py @@ -0,0 +1,341 @@ +import string, re, sys, datetime +from .core import TomlError +from .utils import rfc3339_re, parse_rfc3339_re + +if sys.version_info[0] == 2: + _chr = unichr +else: + _chr = chr + +def load(fin, translate=lambda t, x, v: v, object_pairs_hook=dict): + return loads(fin.read(), translate=translate, object_pairs_hook=object_pairs_hook, filename=getattr(fin, 'name', repr(fin))) + +def loads(s, filename='', translate=lambda t, x, v: v, object_pairs_hook=dict): + if isinstance(s, bytes): + s = s.decode('utf-8') + + s = s.replace('\r\n', '\n') + + root = object_pairs_hook() + tables = object_pairs_hook() + scope = root + + src = _Source(s, filename=filename) + ast = _p_toml(src, object_pairs_hook=object_pairs_hook) + + def error(msg): + raise TomlError(msg, pos[0], pos[1], filename) + + def process_value(v, object_pairs_hook): + kind, text, value, pos = v + if kind == 'str' and value.startswith('\n'): + value = value[1:] + if kind == 'array': + if value and any(k != value[0][0] for k, t, v, p in value[1:]): + error('array-type-mismatch') + value = [process_value(item, object_pairs_hook=object_pairs_hook) for item in value] + elif kind == 'table': + value = object_pairs_hook([(k, process_value(value[k], object_pairs_hook=object_pairs_hook)) for k in value]) + return translate(kind, text, value) + + for kind, value, pos in ast: + if kind == 'kv': + k, v = value + if k in scope: + error('duplicate_keys. Key "{0}" was used more than once.'.format(k)) + scope[k] = process_value(v, object_pairs_hook=object_pairs_hook) + else: + is_table_array = (kind == 'table_array') + cur = tables + for name in value[:-1]: + if isinstance(cur.get(name), list): + d, cur = cur[name][-1] + else: + d, cur = cur.setdefault(name, (None, object_pairs_hook())) + + scope = object_pairs_hook() + name = value[-1] + if name not in cur: + if is_table_array: + cur[name] = [(scope, object_pairs_hook())] + else: + cur[name] = (scope, object_pairs_hook()) + elif isinstance(cur[name], list): + if not is_table_array: + error('table_type_mismatch') + cur[name].append((scope, object_pairs_hook())) + else: + if is_table_array: + error('table_type_mismatch') + old_scope, next_table = cur[name] + if old_scope is not None: + error('duplicate_tables') + cur[name] = (scope, next_table) + + def merge_tables(scope, tables): + if scope is None: + scope = object_pairs_hook() + for k in tables: + if k in scope: + error('key_table_conflict') + v = tables[k] + if isinstance(v, list): + scope[k] = [merge_tables(sc, tbl) for sc, tbl in v] + else: + scope[k] = merge_tables(v[0], v[1]) + return scope + + return merge_tables(root, tables) + +class _Source: + def __init__(self, s, filename=None): + self.s = s + self._pos = (1, 1) + self._last = None + self._filename = filename + self.backtrack_stack = [] + + def last(self): + return self._last + + def pos(self): + return self._pos + + def fail(self): + return self._expect(None) + + def consume_dot(self): + if self.s: + self._last = self.s[0] + self.s = self[1:] + self._advance(self._last) + return self._last + return None + + def expect_dot(self): + return self._expect(self.consume_dot()) + + def consume_eof(self): + if not self.s: + self._last = '' + return True + return False + + def expect_eof(self): + return self._expect(self.consume_eof()) + + def consume(self, s): + if self.s.startswith(s): + self.s = self.s[len(s):] + self._last = s + self._advance(s) + return True + return False + + def expect(self, s): + return self._expect(self.consume(s)) + + def consume_re(self, re): + m = re.match(self.s) + if m: + self.s = self.s[len(m.group(0)):] + self._last = m + self._advance(m.group(0)) + return m + return None + + def expect_re(self, re): + return self._expect(self.consume_re(re)) + + def __enter__(self): + self.backtrack_stack.append((self.s, self._pos)) + + def __exit__(self, type, value, traceback): + if type is None: + self.backtrack_stack.pop() + else: + self.s, self._pos = self.backtrack_stack.pop() + return type == TomlError + + def commit(self): + self.backtrack_stack[-1] = (self.s, self._pos) + + def _expect(self, r): + if not r: + raise TomlError('msg', self._pos[0], self._pos[1], self._filename) + return r + + def _advance(self, s): + suffix_pos = s.rfind('\n') + if suffix_pos == -1: + self._pos = (self._pos[0], self._pos[1] + len(s)) + else: + self._pos = (self._pos[0] + s.count('\n'), len(s) - suffix_pos) + +_ews_re = re.compile(r'(?:[ \t]|#[^\n]*\n|#[^\n]*\Z|\n)*') +def _p_ews(s): + s.expect_re(_ews_re) + +_ws_re = re.compile(r'[ \t]*') +def _p_ws(s): + s.expect_re(_ws_re) + +_escapes = { 'b': '\b', 'n': '\n', 'r': '\r', 't': '\t', '"': '"', + '\\': '\\', 'f': '\f' } + +_basicstr_re = re.compile(r'[^"\\\000-\037]*') +_short_uni_re = re.compile(r'u([0-9a-fA-F]{4})') +_long_uni_re = re.compile(r'U([0-9a-fA-F]{8})') +_escapes_re = re.compile(r'[btnfr\"\\]') +_newline_esc_re = re.compile('\n[ \t\n]*') +def _p_basicstr_content(s, content=_basicstr_re): + res = [] + while True: + res.append(s.expect_re(content).group(0)) + if not s.consume('\\'): + break + if s.consume_re(_newline_esc_re): + pass + elif s.consume_re(_short_uni_re) or s.consume_re(_long_uni_re): + v = int(s.last().group(1), 16) + if 0xd800 <= v < 0xe000: + s.fail() + res.append(_chr(v)) + else: + s.expect_re(_escapes_re) + res.append(_escapes[s.last().group(0)]) + return ''.join(res) + +_key_re = re.compile(r'[0-9a-zA-Z-_]+') +def _p_key(s): + with s: + s.expect('"') + r = _p_basicstr_content(s, _basicstr_re) + s.expect('"') + return r + if s.consume('\''): + if s.consume('\'\''): + r = s.expect_re(_litstr_ml_re).group(0) + s.expect('\'\'\'') + else: + r = s.expect_re(_litstr_re).group(0) + s.expect('\'') + return r + return s.expect_re(_key_re).group(0) + +_float_re = re.compile(r'[+-]?(?:0|[1-9](?:_?\d)*)(?:\.\d(?:_?\d)*)?(?:[eE][+-]?(?:\d(?:_?\d)*))?') + +_basicstr_ml_re = re.compile(r'(?:""?(?!")|[^"\\\000-\011\013-\037])*') +_litstr_re = re.compile(r"[^'\000\010\012-\037]*") +_litstr_ml_re = re.compile(r"(?:(?:|'|'')(?:[^'\000-\010\013-\037]))*") +def _p_value(s, object_pairs_hook): + pos = s.pos() + + if s.consume('true'): + return 'bool', s.last(), True, pos + if s.consume('false'): + return 'bool', s.last(), False, pos + + if s.consume('"'): + if s.consume('""'): + r = _p_basicstr_content(s, _basicstr_ml_re) + s.expect('"""') + else: + r = _p_basicstr_content(s, _basicstr_re) + s.expect('"') + return 'str', r, r, pos + + if s.consume('\''): + if s.consume('\'\''): + r = s.expect_re(_litstr_ml_re).group(0) + s.expect('\'\'\'') + else: + r = s.expect_re(_litstr_re).group(0) + s.expect('\'') + return 'str', r, r, pos + + if s.consume_re(rfc3339_re): + m = s.last() + return 'datetime', m.group(0), parse_rfc3339_re(m), pos + + if s.consume_re(_float_re): + m = s.last().group(0) + r = m.replace('_','') + if '.' in m or 'e' in m or 'E' in m: + return 'float', m, float(r), pos + else: + return 'int', m, int(r, 10), pos + + if s.consume('['): + items = [] + with s: + while True: + _p_ews(s) + items.append(_p_value(s, object_pairs_hook=object_pairs_hook)) + s.commit() + _p_ews(s) + s.expect(',') + s.commit() + _p_ews(s) + s.expect(']') + return 'array', None, items, pos + + if s.consume('{'): + _p_ws(s) + items = object_pairs_hook() + if not s.consume('}'): + k = _p_key(s) + _p_ws(s) + s.expect('=') + _p_ws(s) + items[k] = _p_value(s, object_pairs_hook=object_pairs_hook) + _p_ws(s) + while s.consume(','): + _p_ws(s) + k = _p_key(s) + _p_ws(s) + s.expect('=') + _p_ws(s) + items[k] = _p_value(s, object_pairs_hook=object_pairs_hook) + _p_ws(s) + s.expect('}') + return 'table', None, items, pos + + s.fail() + +def _p_stmt(s, object_pairs_hook): + pos = s.pos() + if s.consume( '['): + is_array = s.consume('[') + _p_ws(s) + keys = [_p_key(s)] + _p_ws(s) + while s.consume('.'): + _p_ws(s) + keys.append(_p_key(s)) + _p_ws(s) + s.expect(']') + if is_array: + s.expect(']') + return 'table_array' if is_array else 'table', keys, pos + + key = _p_key(s) + _p_ws(s) + s.expect('=') + _p_ws(s) + value = _p_value(s, object_pairs_hook=object_pairs_hook) + return 'kv', (key, value), pos + +_stmtsep_re = re.compile(r'(?:[ \t]*(?:#[^\n]*)?\n)+[ \t]*') +def _p_toml(s, object_pairs_hook): + stmts = [] + _p_ews(s) + with s: + stmts.append(_p_stmt(s, object_pairs_hook=object_pairs_hook)) + while True: + s.commit() + s.expect_re(_stmtsep_re) + stmts.append(_p_stmt(s, object_pairs_hook=object_pairs_hook)) + _p_ews(s) + s.expect_eof() + return stmts diff --git a/pipenv/vendor/pytoml/test.py b/pipenv/vendor/pytoml/test.py new file mode 100644 index 0000000000..ec8abfc650 --- /dev/null +++ b/pipenv/vendor/pytoml/test.py @@ -0,0 +1,30 @@ +import datetime +from .utils import format_rfc3339 + +try: + _string_types = (str, unicode) + _int_types = (int, long) +except NameError: + _string_types = str + _int_types = int + +def translate_to_test(v): + if isinstance(v, dict): + return { k: translate_to_test(v) for k, v in v.items() } + if isinstance(v, list): + a = [translate_to_test(x) for x in v] + if v and isinstance(v[0], dict): + return a + else: + return {'type': 'array', 'value': a} + if isinstance(v, datetime.datetime): + return {'type': 'datetime', 'value': format_rfc3339(v)} + if isinstance(v, bool): + return {'type': 'bool', 'value': 'true' if v else 'false'} + if isinstance(v, _int_types): + return {'type': 'integer', 'value': str(v)} + if isinstance(v, float): + return {'type': 'float', 'value': '{:.17}'.format(v)} + if isinstance(v, _string_types): + return {'type': 'string', 'value': v} + raise RuntimeError('unexpected value: {!r}'.format(v)) diff --git a/pipenv/vendor/pytoml/utils.py b/pipenv/vendor/pytoml/utils.py new file mode 100644 index 0000000000..636a680b06 --- /dev/null +++ b/pipenv/vendor/pytoml/utils.py @@ -0,0 +1,67 @@ +import datetime +import re + +rfc3339_re = re.compile(r'(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})(\.\d+)?(?:Z|([+-]\d{2}):(\d{2}))') + +def parse_rfc3339(v): + m = rfc3339_re.match(v) + if not m or m.group(0) != v: + return None + return parse_rfc3339_re(m) + +def parse_rfc3339_re(m): + r = map(int, m.groups()[:6]) + if m.group(7): + micro = float(m.group(7)) + else: + micro = 0 + + if m.group(8): + g = int(m.group(8), 10) * 60 + int(m.group(9), 10) + tz = _TimeZone(datetime.timedelta(0, g * 60)) + else: + tz = _TimeZone(datetime.timedelta(0, 0)) + + y, m, d, H, M, S = r + return datetime.datetime(y, m, d, H, M, S, int(micro * 1000000), tz) + + +def format_rfc3339(v): + offs = v.utcoffset() + offs = int(offs.total_seconds()) // 60 if offs is not None else 0 + + if offs == 0: + suffix = 'Z' + else: + if offs > 0: + suffix = '+' + else: + suffix = '-' + offs = -offs + suffix = '{0}{1:02}:{2:02}'.format(suffix, offs // 60, offs % 60) + + if v.microsecond: + return v.strftime('%Y-%m-%dT%H:%M:%S.%f') + suffix + else: + return v.strftime('%Y-%m-%dT%H:%M:%S') + suffix + +class _TimeZone(datetime.tzinfo): + def __init__(self, offset): + self._offset = offset + + def utcoffset(self, dt): + return self._offset + + def dst(self, dt): + return None + + def tzname(self, dt): + m = self._offset.total_seconds() // 60 + if m < 0: + res = '-' + m = -m + else: + res = '+' + h = m // 60 + m = m - h * 60 + return '{}{:.02}{:.02}'.format(res, h, m) diff --git a/pipenv/vendor/pytoml/writer.py b/pipenv/vendor/pytoml/writer.py new file mode 100644 index 0000000000..73b5089c24 --- /dev/null +++ b/pipenv/vendor/pytoml/writer.py @@ -0,0 +1,106 @@ +from __future__ import unicode_literals +import io, datetime, math, string, sys + +from .utils import format_rfc3339 + +if sys.version_info[0] == 3: + long = int + unicode = str + + +def dumps(obj, sort_keys=False): + fout = io.StringIO() + dump(obj, fout, sort_keys=sort_keys) + return fout.getvalue() + + +_escapes = {'\n': 'n', '\r': 'r', '\\': '\\', '\t': 't', '\b': 'b', '\f': 'f', '"': '"'} + + +def _escape_string(s): + res = [] + start = 0 + + def flush(): + if start != i: + res.append(s[start:i]) + return i + 1 + + i = 0 + while i < len(s): + c = s[i] + if c in '"\\\n\r\t\b\f': + start = flush() + res.append('\\' + _escapes[c]) + elif ord(c) < 0x20: + start = flush() + res.append('\\u%04x' % ord(c)) + i += 1 + + flush() + return '"' + ''.join(res) + '"' + + +_key_chars = string.digits + string.ascii_letters + '-_' +def _escape_id(s): + if any(c not in _key_chars for c in s): + return _escape_string(s) + return s + + +def _format_value(v): + if isinstance(v, bool): + return 'true' if v else 'false' + if isinstance(v, int) or isinstance(v, long): + return unicode(v) + if isinstance(v, float): + if math.isnan(v) or math.isinf(v): + raise ValueError("{0} is not a valid TOML value".format(v)) + else: + return repr(v) + elif isinstance(v, unicode) or isinstance(v, bytes): + return _escape_string(v) + elif isinstance(v, datetime.datetime): + return format_rfc3339(v) + elif isinstance(v, list): + return '[{0}]'.format(', '.join(_format_value(obj) for obj in v)) + elif isinstance(v, dict): + return '{{{0}}}'.format(', '.join('{} = {}'.format(_escape_id(k), _format_value(obj)) for k, obj in v.items())) + else: + raise RuntimeError(v) + + +def dump(obj, fout, sort_keys=False): + tables = [((), obj, False)] + + while tables: + name, table, is_array = tables.pop() + if name: + section_name = '.'.join(_escape_id(c) for c in name) + if is_array: + fout.write('[[{0}]]\n'.format(section_name)) + else: + fout.write('[{0}]\n'.format(section_name)) + + table_keys = sorted(table.keys()) if sort_keys else table.keys() + new_tables = [] + has_kv = False + for k in table_keys: + v = table[k] + if isinstance(v, dict): + new_tables.append((name + (k,), v, False)) + elif isinstance(v, list) and v and all(isinstance(o, dict) for o in v): + new_tables.extend((name + (k,), d, True) for d in v) + elif v is None: + # based on mojombo's comment: https://github.com/toml-lang/toml/issues/146#issuecomment-25019344 + fout.write( + '#{} = null # To use: uncomment and replace null with value\n'.format(_escape_id(k))) + has_kv = True + else: + fout.write('{0} = {1}\n'.format(_escape_id(k), _format_value(v))) + has_kv = True + + tables.extend(reversed(new_tables)) + + if (name or has_kv) and tables: + fout.write('\n') diff --git a/pipenv/vendor/requests/__version__.py b/pipenv/vendor/requests/__version__.py index 803773a0fd..f5b5d03671 100644 --- a/pipenv/vendor/requests/__version__.py +++ b/pipenv/vendor/requests/__version__.py @@ -5,8 +5,8 @@ __title__ = 'requests' __description__ = 'Python HTTP for Humans.' __url__ = 'http://python-requests.org' -__version__ = '2.20.1' -__build__ = 0x022001 +__version__ = '2.21.0' +__build__ = 0x022100 __author__ = 'Kenneth Reitz' __author_email__ = 'me@kennethreitz.org' __license__ = 'Apache 2.0' diff --git a/pipenv/vendor/requests/models.py b/pipenv/vendor/requests/models.py index 3dded57eff..62dcd0b7c8 100644 --- a/pipenv/vendor/requests/models.py +++ b/pipenv/vendor/requests/models.py @@ -781,7 +781,7 @@ def generate(): return chunks - def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None, delimiter=None): + def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None): """Iterates over the response data, one line at a time. When stream=True is set on the request, this avoids reading the content at once into memory for large responses. diff --git a/pipenv/vendor/requirementslib/__init__.py b/pipenv/vendor/requirementslib/__init__.py index aca59917db..c71569714e 100644 --- a/pipenv/vendor/requirementslib/__init__.py +++ b/pipenv/vendor/requirementslib/__init__.py @@ -1,5 +1,6 @@ # -*- coding=utf-8 -*- -__version__ = '1.3.2' +from __future__ import absolute_import, print_function +__version__ = '1.4.1.dev0' import logging import warnings diff --git a/pipenv/vendor/requirementslib/models/lockfile.py b/pipenv/vendor/requirementslib/models/lockfile.py index 54b2761c25..42248868af 100644 --- a/pipenv/vendor/requirementslib/models/lockfile.py +++ b/pipenv/vendor/requirementslib/models/lockfile.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -from __future__ import absolute_import +from __future__ import absolute_import, print_function import copy import os diff --git a/pipenv/vendor/requirementslib/models/pipfile.py b/pipenv/vendor/requirementslib/models/pipfile.py index 84a4a26dad..021b5d537a 100644 --- a/pipenv/vendor/requirementslib/models/pipfile.py +++ b/pipenv/vendor/requirementslib/models/pipfile.py @@ -18,16 +18,16 @@ from ..utils import is_editable, is_vcs, merge_items from .project import ProjectFile from .requirements import Requirement -from .utils import optional_instance_of +from .utils import optional_instance_of, get_url_name from ..environment import MYPY_RUNNING if MYPY_RUNNING: - from typing import Union, Any, Dict, Iterable, Sequence, Mapping, List, NoReturn - package_type = Dict[str, Dict[str, Union[List[str], str]]] - source_type = Dict[str, Union[str, bool]] + from typing import Union, Any, Dict, Iterable, Sequence, Mapping, List, NoReturn, Text + package_type = Dict[Text, Dict[Text, Union[List[Text], Text]]] + source_type = Dict[Text, Union[Text, bool]] sources_type = Iterable[source_type] - meta_type = Dict[str, Union[int, Dict[str, str], sources_type]] - lockfile_type = Dict[str, Union[package_type, meta_type]] + meta_type = Dict[Text, Union[int, Dict[Text, Text], sources_type]] + lockfile_type = Dict[Text, Union[package_type, meta_type]] # Let's start by patching plette to make sure we can validate data without being broken @@ -45,7 +45,7 @@ def patch_plette(): global VALIDATORS def validate(cls, data): - # type: (Any, Dict[str, Any]) -> None + # type: (Any, Dict[Text, Any]) -> None if not cerberus: # Skip validation if Cerberus is not available. return schema = cls.__SCHEMA__ @@ -87,9 +87,10 @@ def reorder_source_keys(data): sources = data["source"] # type: sources_type for i, entry in enumerate(sources): table = tomlkit.table() # type: Mapping - table["name"] = entry["name"] - table["url"] = entry["url"] - table["verify_ssl"] = entry["verify_ssl"] + source_entry = PipfileLoader.populate_source(entry.copy()) + table["name"] = source_entry["name"] + table["url"] = source_entry["url"] + table["verify_ssl"] = source_entry["verify_ssl"] data["source"][i] = table return data @@ -97,7 +98,7 @@ def reorder_source_keys(data): class PipfileLoader(plette.pipfiles.Pipfile): @classmethod def validate(cls, data): - # type: (Dict[str, Any]) -> None + # type: (Dict[Text, Any]) -> None for key, klass in plette.pipfiles.PIPFILE_SECTIONS.items(): if key not in data or key == "source": continue @@ -106,9 +107,21 @@ def validate(cls, data): except Exception: pass + @classmethod + def populate_source(cls, source): + """Derive missing values of source from the existing fields.""" + # Only URL pararemter is mandatory, let the KeyError be thrown. + if "name" not in source: + source["name"] = get_url_name(source["url"]) + if "verify_ssl" not in source: + source["verify_ssl"] = "https://" in source["url"] + if not isinstance(source["verify_ssl"], bool): + source["verify_ssl"] = source["verify_ssl"].lower() == "true" + return source + @classmethod def load(cls, f, encoding=None): - # type: (Any, str) -> PipfileLoader + # type: (Any, Text) -> PipfileLoader content = f.read() if encoding is not None: content = content.decode(encoding) @@ -132,7 +145,7 @@ def load(cls, f, encoding=None): return instance def __getattribute__(self, key): - # type: (str) -> Any + # type: (Text) -> Any if key == "source": return self._data[key] return super(PipfileLoader, self).__getattribute__(key) @@ -169,8 +182,8 @@ def pipfile(self): return self._pipfile def get_deps(self, dev=False, only=True): - # type: (bool, bool) -> Dict[str, Dict[str, Union[List[str], str]]] - deps = {} # type: Dict[str, Dict[str, Union[List[str], str]]] + # type: (bool, bool) -> Dict[Text, Dict[Text, Union[List[Text], Text]]] + deps = {} # type: Dict[Text, Dict[Text, Union[List[Text], Text]]] if dev: deps.update(self.pipfile._data["dev-packages"]) if only: @@ -178,11 +191,11 @@ def get_deps(self, dev=False, only=True): return merge_items([deps, self.pipfile._data["packages"]]) def get(self, k): - # type: (str) -> Any + # type: (Text) -> Any return self.__getitem__(k) def __contains__(self, k): - # type: (str) -> bool + # type: (Text) -> bool check_pipfile = k in self.extended_keys or self.pipfile.__contains__(k) if check_pipfile: return True @@ -234,10 +247,10 @@ def allow_prereleases(self): @classmethod def read_projectfile(cls, path): - # type: (str) -> ProjectFile + # type: (Text) -> ProjectFile """Read the specified project file and provide an interface for writing/updating. - :param str path: Path to the target file. + :param Text path: Path to the target file. :return: A project file with the model and location for interaction :rtype: :class:`~requirementslib.models.project.ProjectFile` """ @@ -250,10 +263,10 @@ def read_projectfile(cls, path): @classmethod def load_projectfile(cls, path, create=False): - # type: (str, bool) -> ProjectFile + # type: (Text, bool) -> ProjectFile """Given a path, load or create the necessary pipfile. - :param str path: Path to the project root or pipfile + :param Text path: Path to the project root or pipfile :param bool create: Whether to create the pipfile if not found, defaults to True :raises OSError: Thrown if the project root directory doesn't exist :raises FileNotFoundError: Thrown if the pipfile doesn't exist and ``create=False`` @@ -275,10 +288,10 @@ def load_projectfile(cls, path, create=False): @classmethod def load(cls, path, create=False): - # type: (str, bool) -> Pipfile + # type: (Text, bool) -> Pipfile """Given a path, load or create the necessary pipfile. - :param str path: Path to the project root or pipfile + :param Text path: Path to the project root or pipfile :param bool create: Whether to create the pipfile if not found, defaults to True :raises OSError: Thrown if the project root directory doesn't exist :raises FileNotFoundError: Thrown if the pipfile doesn't exist and ``create=False`` @@ -334,10 +347,10 @@ def _read_pyproject(self): @property def build_requires(self): - # type: () -> List[str] + # type: () -> List[Text] return self.build_system.get("requires", []) @property def build_backend(self): - # type: () -> str + # type: () -> Text return self.build_system.get("build-backend", None) diff --git a/pipenv/vendor/requirementslib/models/project.py b/pipenv/vendor/requirementslib/models/project.py index f6e037d651..28afcf0ba5 100644 --- a/pipenv/vendor/requirementslib/models/project.py +++ b/pipenv/vendor/requirementslib/models/project.py @@ -1,6 +1,6 @@ # -*- coding=utf-8 -*- -from __future__ import absolute_import, unicode_literals +from __future__ import absolute_import, unicode_literals, print_function import collections import io diff --git a/pipenv/vendor/requirementslib/models/requirements.py b/pipenv/vendor/requirementslib/models/requirements.py index af3d07edd4..d6c1381938 100644 --- a/pipenv/vendor/requirementslib/models/requirements.py +++ b/pipenv/vendor/requirementslib/models/requirements.py @@ -1,31 +1,40 @@ # -*- coding: utf-8 -*- -from __future__ import absolute_import +from __future__ import absolute_import, print_function import collections import copy import hashlib import os +import sys +from distutils.sysconfig import get_python_lib from contextlib import contextmanager +from functools import partial import attr import pip_shims +import six +import vistir from first import first +from cached_property import cached_property from packaging.markers import Marker from packaging.requirements import Requirement as PackagingRequirement from packaging.specifiers import Specifier, SpecifierSet, LegacySpecifier, InvalidSpecifier from packaging.utils import canonicalize_name from six.moves.urllib import parse as urllib_parse from six.moves.urllib.parse import unquote -from vistir.compat import FileNotFoundError, Path +from vistir.compat import Path, FileNotFoundError, lru_cache +from vistir.contextmanagers import temp_path from vistir.misc import dedup from vistir.path import ( create_tracked_tempdir, get_converted_relative_path, is_file_url, is_valid_url, + normalize_path, + mkdir_p ) from ..exceptions import RequirementError @@ -33,14 +42,14 @@ VCS_LIST, is_installable_file, is_vcs, - ensure_setup_py, add_ssh_scheme_to_git_uri, strip_ssh_from_git_uri, + get_setup_paths ) -from .setup_info import SetupInfo +from .setup_info import SetupInfo, _prepare_wheel_building_kwargs from .utils import ( HASH_STRING, - build_vcs_link, + build_vcs_uri, extras_to_string, filter_none, format_requirement, @@ -51,35 +60,1021 @@ parse_extras, specs_to_string, split_markers_from_line, + split_ref_from_uri, split_vcs_method_from_uri, validate_path, validate_specifiers, validate_vcs, normalize_name, create_link, - get_pyproject + get_pyproject, + convert_direct_url_to_url, + URL_RE, + DIRECT_URL_RE, + get_default_pyproject_backend ) +from ..environment import MYPY_RUNNING -@attr.s(slots=True) +if MYPY_RUNNING: + from typing import Optional, TypeVar, List, Dict, Union, Any, Tuple, Generator, Set, Text + from pip_shims.shims import Link, InstallRequirement + RequirementType = TypeVar('RequirementType', covariant=True, bound=PackagingRequirement) + from six.moves.urllib.parse import SplitResult + from .vcs import VCSRepository + NON_STRING_ITERABLE = Union[List, Set, Tuple] + + +SPECIFIERS_BY_LENGTH = sorted(list(Specifier._operators.keys()), key=len, reverse=True) + + +run = partial(vistir.misc.run, combine_stderr=False, return_object=True, nospin=True) + + +class Line(object): + def __init__(self, line, extras=None): + # type: (Text, Optional[NON_STRING_ITERABLE]) -> None + self.editable = False # type: bool + if line.startswith("-e "): + line = line[len("-e "):] + self.editable = True + self.extras = () # type: Tuple[Text] + if extras is not None: + self.extras = tuple(sorted(set(extras))) + self.line = line # type: Text + self.hashes = [] # type: List[Text] + self.markers = None # type: Optional[Text] + self.vcs = None # type: Optional[Text] + self.path = None # type: Optional[Text] + self.relpath = None # type: Optional[Text] + self.uri = None # type: Optional[Text] + self._link = None # type: Optional[Link] + self.is_local = False # type: bool + self._name = None # type: Optional[Text] + self._specifier = None # type: Optional[Text] + self.parsed_marker = None # type: Optional[Marker] + self.preferred_scheme = None # type: Optional[Text] + self._requirement = None # type: Optional[PackagingRequirement] + self.is_direct_url = False # type: bool + self._parsed_url = None # type: Optional[urllib_parse.ParseResult] + self._setup_cfg = None # type: Optional[Text] + self._setup_py = None # type: Optional[Text] + self._pyproject_toml = None # type: Optional[Text] + self._pyproject_requires = None # type: Optional[List[Text]] + self._pyproject_backend = None # type: Optional[Text] + self._wheel_kwargs = None # type: Dict[Text, Text] + self._vcsrepo = None # type: Optional[VCSRepository] + self._setup_info = None # type: Optional[SetupInfo] + self._ref = None # type: Optional[Text] + self._ireq = None # type: Optional[InstallRequirement] + self._src_root = None # type: Optional[Text] + self.dist = None # type: Any + super(Line, self).__init__() + self.parse() + + def __hash__(self): + return hash(( + self.editable, self.line, self.markers, tuple(self.extras), + tuple(self.hashes), self.vcs, self.ireq) + ) + + def __repr__(self): + try: + return ( + "".format( + self=self + )) + except Exception: + return "".format(self.__dict__.values()) + + @classmethod + def split_hashes(cls, line): + # type: (Text) -> Tuple[Text, List[Text]] + if "--hash" not in line: + return line, [] + split_line = line.split() + line_parts = [] # type: List[Text] + hashes = [] # type: List[Text] + for part in split_line: + if part.startswith("--hash"): + param, _, value = part.partition("=") + hashes.append(value) + else: + line_parts.append(part) + line = " ".join(line_parts) + return line, hashes + + @property + def line_with_prefix(self): + # type: () -> Text + line = self.line + extras_str = extras_to_string(self.extras) + if self.is_direct_url: + line = self.link.url + # if self.link.egg_info and self.extras: + # line = "{0}{1}".format(line, extras_str) + elif extras_str: + if self.is_vcs: + line = self.link.url + if "git+file:/" in line and "git+file:///" not in line: + line = line.replace("git+file:/", "git+file:///") + else: + line = "{0}{1}".format(line, extras_str) + if self.editable: + return "-e {0}".format(line) + return line + + @property + def line_for_ireq(self): + # type: () -> Text + line = "" + if self.is_file or self.is_url and not self.is_vcs: + scheme = self.preferred_scheme if self.preferred_scheme is not None else "uri" + local_line = next(iter([ + os.path.dirname(os.path.abspath(f)) for f in [ + self.setup_py, self.setup_cfg, self.pyproject_toml + ] if f is not None + ]), None) + if local_line and self.extras: + local_line = "{0}{1}".format(local_line, extras_to_string(self.extras)) + line = local_line if local_line is not None else self.line + if scheme == "path": + if not line and self.base_path is not None: + line = os.path.abspath(self.base_path) + else: + if DIRECT_URL_RE.match(self.line): + self._requirement = init_requirement(self.line) + line = convert_direct_url_to_url(self.line) + else: + line = self.link.url + + if self.editable: + if not line: + if self.is_path or self.is_file: + if not self.path: + line = pip_shims.shims.url_to_path(self.url) + else: + line = self.path + if self.extras: + line = "{0}{1}".format(line, extras_to_string(self.extras)) + else: + line = self.link.url + elif self.is_vcs and not self.editable: + line = add_ssh_scheme_to_git_uri(self.line) + if not line: + line = self.line + return line + + @property + def base_path(self): + # type: () -> Optional[Text] + if not self.link and not self.path: + self.parse_link() + if not self.path: + pass + path = normalize_path(self.path) + if os.path.exists(path) and os.path.isdir(path): + path = path + elif os.path.exists(path) and os.path.isfile(path): + path = os.path.dirname(path) + else: + path = None + return path + + @property + def setup_py(self): + # type: () -> Optional[Text] + if self._setup_py is None: + self.populate_setup_paths() + return self._setup_py + + @property + def setup_cfg(self): + # type: () -> Optional[Text] + if self._setup_cfg is None: + self.populate_setup_paths() + return self._setup_cfg + + @property + def pyproject_toml(self): + # type: () -> Optional[Text] + if self._pyproject_toml is None: + self.populate_setup_paths() + return self._pyproject_toml + + @property + def specifier(self): + # type: () -> Optional[Text] + options = [self._specifier] + for req in (self.ireq, self.requirement): + if req is not None and getattr(req, "specifier", None): + options.append(req.specifier) + specifier = next(iter(spec for spec in options if spec is not None), None) + if specifier is not None: + specifier = specs_to_string(specifier) + elif specifier is None and not self.is_named and self._setup_info is not None: + if self._setup_info.version: + specifier = "=={0}".format(self._setup_info.version) + if specifier: + self._specifier = specifier + return self._specifier + + @specifier.setter + def specifier(self, spec): + # type: (str) -> None + if not spec.startswith("=="): + spec = "=={0}".format(spec) + self._specifier = spec + self.specifiers = SpecifierSet(spec) + + @property + def specifiers(self): + # type: () -> Optional[SpecifierSet] + ireq_needs_specifier = False + req_needs_specifier = False + if self.ireq is None or self.ireq.req is None or not self.ireq.req.specifier: + ireq_needs_specifier = True + if self.requirement is None or not self.requirement.specifier: + req_needs_specifier = True + if any([ireq_needs_specifier, req_needs_specifier]): + # TODO: Should we include versions for VCS dependencies? IS there a reason not + # to? For now we are using hashes as the equivalent to pin + # note: we need versions for direct dependencies at the very least + if self.is_file or self.is_url or self.is_path or (self.is_vcs and not self.editable): + if self.specifier is not None: + specifier = self.specifier + if not isinstance(specifier, SpecifierSet): + specifier = SpecifierSet(specifier) + self.specifiers = specifier + return specifier + if self.ireq is not None and self.ireq.req is not None: + return self.ireq.req.specifier + elif self.requirement is not None: + return self.requirement.specifier + return None + + @specifiers.setter + def specifiers(self, specifiers): + # type: (Union[Text, SpecifierSet]) -> None + if type(specifiers) is not SpecifierSet: + if type(specifiers) in six.string_types: + specifiers = SpecifierSet(specifiers) + else: + raise TypeError("Must pass a string or a SpecifierSet") + specs = self.get_requirement_specs(specifiers) + if self.ireq is not None and self.ireq.req is not None: + self._ireq.req.specifier = specifiers + self._ireq.req.specs = specs + if self.requirement is not None: + self.requirement.specifier = specifiers + self.requirement.specs = specs + + @classmethod + def get_requirement_specs(cls, specifierset): + # type: (SpecifierSet) -> List[Tuple[Text, Text]] + specs = [] + spec = next(iter(specifierset._specs), None) + if spec: + specs.append(spec._spec) + return specs + + @property + def requirement(self): + # type: () -> Optional[PackagingRequirement] + if self._requirement is None: + self.parse_requirement() + if self._requirement is None and self._name is not None: + self._requirement = init_requirement(canonicalize_name(self.name)) + if self.is_file or self.is_url and self._requirement is not None: + self._requirement.url = self.url + if self._requirement and self._requirement.specifier and not self._requirement.specs: + specs = self.get_requirement_specs(self._requirement.specifier) + self._requirement.specs = specs + return self._requirement + + def populate_setup_paths(self): + # type: () -> None + if not self.link and not self.path: + self.parse_link() + if not self.path: + return + base_path = self.base_path + if base_path is None: + return + setup_paths = get_setup_paths(self.base_path, subdirectory=self.subdirectory) # type: Dict[Text, Optional[Text]] + self._setup_py = setup_paths.get("setup_py") + self._setup_cfg = setup_paths.get("setup_cfg") + self._pyproject_toml = setup_paths.get("pyproject_toml") + + @property + def pyproject_requires(self): + # type: () -> Optional[List[Text]] + if self._pyproject_requires is None and self.pyproject_toml is not None: + pyproject_requires, pyproject_backend = get_pyproject(self.path) + self._pyproject_requires = pyproject_requires + self._pyproject_backend = pyproject_backend + return self._pyproject_requires + + @property + def pyproject_backend(self): + # type: () -> Optional[Text] + if self._pyproject_requires is None and self.pyproject_toml is not None: + pyproject_requires, pyproject_backend = get_pyproject(self.path) + if not pyproject_backend and self.setup_cfg is not None: + setup_dict = SetupInfo.get_setup_cfg(self.setup_cfg) + pyproject_backend = get_default_pyproject_backend() + pyproject_requires = setup_dict.get("build_requires", ["setuptools", "wheel"]) + + self._pyproject_requires = pyproject_requires + self._pyproject_backend = pyproject_backend + return self._pyproject_backend + + def parse_hashes(self): + # type: () -> None + """ + Parse hashes from *self.line* and set them on the current object. + :returns: Nothing + :rtype: None + """ + + line, hashes = self.split_hashes(self.line) + self.hashes = hashes + self.line = line + + def parse_extras(self): + # type: () -> None + """ + Parse extras from *self.line* and set them on the current object + :returns: Nothing + :rtype: None + """ + + extras = None + if "@" in self.line or self.is_vcs or self.is_url: + line = "{0}".format(self.line) + match = DIRECT_URL_RE.match(line) + if match is None: + match = URL_RE.match(line) + else: + self.is_direct_url = True + if match is not None: + match_dict = match.groupdict() + name = match_dict.get("name") + extras = match_dict.get("extras") + scheme = match_dict.get("scheme") + host = match_dict.get("host") + path = match_dict.get("path") + ref = match_dict.get("ref") + subdir = match_dict.get("subdirectory") + pathsep = match_dict.get("pathsep", "/") + url = scheme + if host: + url = "{0}{1}".format(url, host) + if path: + url = "{0}{1}{2}".format(url, pathsep, path) + if self.is_vcs and ref: + url = "{0}@{1}".format(url, ref) + if name: + url = "{0}#egg={1}".format(url, name) + if extras: + url = "{0}{1}".format(url, extras) + elif is_file_url(url) and extras and not name and self.editable: + url = "{0}{1}{2}".format(pathsep, path, extras) + if subdir: + url = "{0}&subdirectory={1}".format(url, subdir) + elif extras and not path: + url = "{0}{1}".format(url, extras) + self.line = add_ssh_scheme_to_git_uri(url) + if name: + self._name = name + # line = add_ssh_scheme_to_git_uri(self.line) + # parsed = urllib_parse.urlparse(line) + # if not parsed.scheme and "@" in line: + # matched = URL_RE.match(line) + # if matched is None: + # matched = NAME_RE.match(line) + # if matched: + # name = matched.groupdict().get("name") + # if name is not None: + # self._name = name + # extras = matched.groupdict().get("extras") + # else: + # name, _, line = self.line.partition("@") + # name = name.strip() + # line = line.strip() + # matched = NAME_RE.match(name) + # match_dict = matched.groupdict() + # name = match_dict.get("name") + # extras = match_dict.get("extras") + # if is_vcs(line) or is_valid_url(line): + # self.is_direct_url = True + # # name, extras = pip_shims.shims._strip_extras(name) + # self._name = name + # self.line = line + else: + self.line, extras = pip_shims.shims._strip_extras(self.line) + else: + self.line, extras = pip_shims.shims._strip_extras(self.line) + if extras is not None: + extras = set(parse_extras(extras)) + if self._name: + self._name, name_extras = pip_shims.shims._strip_extras(self._name) + if name_extras: + name_extras = set(parse_extras(name_extras)) + if extras: + extras |= name_extras + else: + extras = name_extras + if extras is not None: + self.extras = tuple(sorted(extras)) + + def get_url(self): + # type: () -> Text + """Sets ``self.name`` if given a **PEP-508** style URL""" + + line = self.line + if self.vcs is not None and self.line.startswith("{0}+".format(self.vcs)): + _, _, _parseable = self.line.partition("+") + parsed = urllib_parse.urlparse(add_ssh_scheme_to_git_uri(_parseable)) + line, _ = split_ref_from_uri(line) + else: + parsed = urllib_parse.urlparse(add_ssh_scheme_to_git_uri(line)) + if "@" in self.line and parsed.scheme == "": + name, _, url = self.line.partition("@") + if self._name is None: + url = url.strip() + self._name = name.strip() + if is_valid_url(url): + self.is_direct_url = True + line = url.strip() + parsed = urllib_parse.urlparse(line) + url_path = parsed.path + if "@" in url_path: + url_path, _, _ = url_path.rpartition("@") + parsed = parsed._replace(path=url_path) + self._parsed_url = parsed + return line + + @property + def name(self): + # type: () -> Optional[Text] + if self._name is None: + self.parse_name() + if self._name is None and not self.is_named and not self.is_wheel: + if self.setup_info: + self._name = self.setup_info.name + return self._name + + @name.setter + def name(self, name): + # type: (Text) -> None + self._name = name + if self._setup_info: + self._setup_info.name = name + if self.requirement: + self._requirement.name = name + if self.ireq and self.ireq.req: + self._ireq.req.name = name + + @property + def url(self): + # type: () -> Optional[Text] + if self.uri is not None: + url = add_ssh_scheme_to_git_uri(self.uri) + else: + url = getattr(self.link, "url_without_fragment", None) + if url is not None: + url = add_ssh_scheme_to_git_uri(unquote(url)) + if url is not None and self._parsed_url is None: + if self.vcs is not None: + _, _, _parseable = url.partition("+") + self._parsed_url = urllib_parse.urlparse(_parseable) + if self.is_vcs: + # strip the ref from the url + url, _ = split_ref_from_uri(url) + return url + + @property + def link(self): + # type: () -> Link + if self._link is None: + self.parse_link() + return self._link + + @property + def subdirectory(self): + # type: () -> Optional[Text] + if self.link is not None: + return self.link.subdirectory_fragment + return "" + + @property + def is_wheel(self): + # type: () -> bool + if self.link is None: + return False + return self.link.is_wheel + + @property + def is_artifact(self): + # type: () -> bool + if self.link is None: + return False + return self.link.is_artifact + + @property + def is_vcs(self): + # type: () -> bool + # Installable local files and installable non-vcs urls are handled + # as files, generally speaking + if is_vcs(self.line) or is_vcs(self.get_url()): + return True + return False + + @property + def is_url(self): + # type: () -> bool + url = self.get_url() + if (is_valid_url(url) or is_file_url(url)): + return True + return False + + @property + def is_path(self): + # type: () -> bool + if self.path and ( + self.path.startswith(".") or os.path.isabs(self.path) or + os.path.exists(self.path) + ) and is_installable_file(self.path): + return True + elif (os.path.exists(self.line) and is_installable_file(self.line)) or ( + os.path.exists(self.get_url()) and is_installable_file(self.get_url()) + ): + return True + return False + + @property + def is_file_url(self): + # type: () -> bool + url = self.get_url() + parsed_url_scheme = self._parsed_url.scheme if self._parsed_url else "" + if url and is_file_url(self.get_url()) or parsed_url_scheme == "file": + return True + return False + + @property + def is_file(self): + # type: () -> bool + if self.is_path or ( + is_file_url(self.get_url()) and is_installable_file(self.get_url()) + ) or ( + self._parsed_url and self._parsed_url.scheme == "file" and + is_installable_file(urllib_parse.urlunparse(self._parsed_url)) + ): + return True + return False + + @property + def is_named(self): + # type: () -> bool + return not (self.is_file_url or self.is_url or self.is_file or self.is_vcs) + + @property + def ref(self): + # type: () -> Optional[Text] + if self._ref is None and self.relpath is not None: + self.relpath, self._ref = split_ref_from_uri(self.relpath) + return self._ref + + @property + def ireq(self): + # type: () -> Optional[pip_shims.InstallRequirement] + if self._ireq is None: + self.parse_ireq() + return self._ireq + + @property + def is_installable(self): + # type: () -> bool + possible_paths = (self.line, self.get_url(), self.path, self.base_path) + return any(is_installable_file(p) for p in possible_paths if p is not None) + + @property + def wheel_kwargs(self): + if not self._wheel_kwargs: + self._wheel_kwargs = _prepare_wheel_building_kwargs(self.ireq) + return self._wheel_kwargs + + def get_setup_info(self): + # type: () -> SetupInfo + setup_info = SetupInfo.from_ireq(self.ireq) + if not setup_info.name: + setup_info.get_info() + return setup_info + + @property + def setup_info(self): + # type: () -> Optional[SetupInfo] + if self._setup_info is None and not self.is_named and not self.is_wheel: + if self._setup_info: + if not self._setup_info.name: + self._setup_info.get_info() + else: + # make two attempts at this before failing to allow for stale data + try: + self.setup_info = self.get_setup_info() + except FileNotFoundError: + try: + self.setup_info = self.get_setup_info() + except FileNotFoundError: + raise + return self._setup_info + + @setup_info.setter + def setup_info(self, setup_info): + # type: (SetupInfo) -> None + self._setup_info = setup_info + if setup_info.version: + self.specifier = setup_info.version + if setup_info.name and not self.name: + self.name = setup_info.name + + def _get_vcsrepo(self): + # type: () -> Optional[VCSRepository] + from .vcs import VCSRepository + checkout_directory = self.wheel_kwargs["src_dir"] # type: ignore + if self.name is not None: + checkout_directory = os.path.join(checkout_directory, self.name) # type: ignore + vcsrepo = VCSRepository( + url=self.link.url, + name=self.name, + ref=self.ref if self.ref else None, + checkout_directory=checkout_directory, + vcs_type=self.vcs, + subdirectory=self.subdirectory, + ) + if not ( + self.link.scheme.startswith("file") and + self.editable + ): + vcsrepo.obtain() + return vcsrepo + + @property + def vcsrepo(self): + # type: () -> Optional[VCSRepository] + if self._vcsrepo is None and self.is_vcs: + self._vcsrepo = self._get_vcsrepo() + return self._vcsrepo + + @vcsrepo.setter + def vcsrepo(self, repo): + # type (VCSRepository) -> None + self._vcsrepo = repo + ireq = self.ireq + wheel_kwargs = self.wheel_kwargs.copy() + wheel_kwargs["src_dir"] = repo.checkout_directory + ireq.source_dir = wheel_kwargs["src_dir"] + build_dir = ireq.build_location(wheel_kwargs["build_dir"]) + ireq._temp_build_dir.path = wheel_kwargs["build_dir"] + with temp_path(): + sys.path = [repo.checkout_directory, "", ".", get_python_lib(plat_specific=0)] + setupinfo = SetupInfo.create( + repo.checkout_directory, ireq=ireq, subdirectory=self.subdirectory, + kwargs=wheel_kwargs + ) + self._setup_info = setupinfo + self._setup_info.reload() + + def get_ireq(self): + # type: () -> InstallRequirement + line = self.line_for_ireq + if self.editable: + ireq = pip_shims.shims.install_req_from_editable(line) + else: + ireq = pip_shims.shims.install_req_from_line(line) + if self.is_named: + ireq = pip_shims.shims.install_req_from_line(self.line) + if self.is_file or self.is_url: + ireq.link = self.link + if self.extras and not ireq.extras: + ireq.extras = set(self.extras) + if self.parsed_marker is not None and not ireq.markers: + ireq.markers = self.parsed_marker + if not ireq.req and self._requirement is not None: + ireq.req = copy.deepcopy(self._requirement) + return ireq + + def parse_ireq(self): + # type: () -> None + if self._ireq is None: + self._ireq = self.get_ireq() + if self._ireq is not None: + if self.requirement is not None and self._ireq.req is None: + self._ireq.req = self.requirement + + def _parse_wheel(self): + # type: () -> Optional[Text] + if not self.is_wheel: + pass + from pip_shims.shims import Wheel + _wheel = Wheel(self.link.filename) + name = _wheel.name + version = _wheel.version + self._specifier = "=={0}".format(version) + return name + + def _parse_name_from_link(self): + # type: () -> Optional[Text] + + if self.link is None: + return None + if getattr(self.link, "egg_fragment", None): + return self.link.egg_fragment + elif self.is_wheel: + return self._parse_wheel() + return None + + def _parse_name_from_line(self): + # type: () -> Optional[Text] + + if not self.is_named: + pass + try: + self._requirement = init_requirement(self.line) + except Exception: + raise RequirementError("Failed parsing requirement from {0!r}".format(self.line)) + name = self._requirement.name + if not self._specifier and self._requirement and self._requirement.specifier: + self._specifier = specs_to_string(self._requirement.specifier) + if self._requirement.extras and not self.extras: + self.extras = self._requirement.extras + if not name: + name = self.line + specifier_match = next( + iter(spec for spec in SPECIFIERS_BY_LENGTH if spec in self.line), None + ) + if specifier_match is not None: + name, specifier_match, version = name.partition(specifier_match) + self._specifier = "{0}{1}".format(specifier_match, version) + return name + + def parse_name(self): + # type: () -> None + if self._name is None: + name = None + if self.link is not None: + name = self._parse_name_from_link() + if name is None and ( + (self.is_url or self.is_artifact or self.is_vcs) and self._parsed_url + ): + if self._parsed_url.fragment: + _, _, name = self._parsed_url.fragment.partition("egg=") + if "&" in name: + # subdirectory fragments might also be in here + name, _, _ = name.partition("&") + if self.is_named: + name = self._parse_name_from_line() + if name is not None: + name, extras = pip_shims.shims._strip_extras(name) + if extras is not None and not self.extras: + self.extras = tuple(sorted(set(parse_extras(extras)))) + self._name = name + + def _parse_requirement_from_vcs(self): + # type: () -> Optional[PackagingRequirement] + if ( + self.uri != unquote(self.url) + and "git+ssh://" in self.url + and (self.uri is not None and "git+git@" in self.uri) + ): + self._requirement.line = self.uri + self._requirement.url = self.url + self._requirement.link = create_link(build_vcs_uri( + vcs=self.vcs, + uri=self.url, + ref=self.ref, + subdirectory=self.subdirectory, + extras=self.extras, + name=self.name + )) + # else: + # req.link = self.link + if self.ref: + if self._vcsrepo is not None: + self._requirement.revision = self._vcsrepo.get_commit_hash() + else: + self._requirement.revision = self.ref + return self._requirement + + def parse_requirement(self): + # type: () -> None + if self._name is None: + self.parse_name() + if not self._name and not self.is_vcs and not self.is_named: + if self.setup_info and self.setup_info.name: + self._name = self.setup_info.name + name, extras, url = self.requirement_info + if name: + self._requirement = init_requirement(name) # type: PackagingRequirement + if extras: + self._requirement.extras = set(extras) + if url: + self._requirement.url = url + if self.is_direct_url: + url = self.link.url + if self.link: + self._requirement.link = self.link + self._requirement.editable = self.editable + if self.path and self.link and self.link.scheme.startswith("file"): + self._requirement.local_file = True + self._requirement.path = self.path + if self.is_vcs: + self._requirement.vcs = self.vcs + self._requirement.line = self.link.url + self._parse_requirement_from_vcs() + else: + self._requirement.line = self.line + if self.parsed_marker is not None: + self._requirement.marker = self.parsed_marker + if self.specifiers: + self._requirement.specifier = self.specifiers + specs = [] + spec = next(iter(s for s in self.specifiers._specs), None) + if spec: + specs.append(spec._spec) + self._requirement.spec = spec + else: + if self.is_vcs: + raise ValueError( + "pipenv requires an #egg fragment for version controlled " + "dependencies. Please install remote dependency " + "in the form {0}#egg=.".format(url) + ) + + def parse_link(self): + # type: () -> None + if self.is_file or self.is_url or self.is_vcs: + vcs, prefer, relpath, path, uri, link = FileRequirement.get_link_from_line(self.line) + ref = None + if link is not None and "@" in unquote(link.path) and uri is not None: + uri, _, ref = unquote(uri).rpartition("@") + if relpath is not None and "@" in relpath: + relpath, _, ref = relpath.rpartition("@") + if path is not None and "@" in path: + path, _ = split_ref_from_uri(path) + link_url = link.url_without_fragment + if "@" in link_url: + link_url, _ = split_ref_from_uri(link_url) + self._ref = ref + self.vcs = vcs + self.preferred_scheme = prefer + self.relpath = relpath + self.path = path + self.uri = uri + if link.egg_fragment: + name, extras = pip_shims.shims._strip_extras(link.egg_fragment) + self.extras = tuple(sorted(set(parse_extras(extras)))) + self._name = name + else: + # set this so we can call `self.name` without a recursion error + self._link = link + if (self.is_direct_url or vcs) and self.name is not None and vcs is not None: + self._link = create_link( + build_vcs_uri(vcs=vcs, uri=link_url, ref=ref, + extras=self.extras, name=self.name, + subdirectory=link.subdirectory_fragment + ) + ) + else: + self._link = link + + def parse_markers(self): + # type: () -> None + if self.markers: + markers = PackagingRequirement("fakepkg; {0}".format(self.markers)).marker + self.parsed_marker = markers + + @property + def requirement_info(self): + # type: () -> Tuple(Optional[Text], Tuple[Optional[Text]], Optional[Text]) + """ + Generates a 3-tuple of the requisite *name*, *extras* and *url* to generate a + :class:`~packaging.requirements.Requirement` out of. + + :return: A Tuple containing an optional name, a Tuple of extras names, and an optional URL. + :rtype: Tuple[Optional[Text], Tuple[Optional[Text]], Optional[Text]] + """ + + # Direct URLs can be converted to packaging requirements directly, but + # only if they are `file://` (with only two slashes) + name = None + extras = () + url = None + # if self.is_direct_url: + if self._name: + name = canonicalize_name(self._name) + if self.is_file or self.is_url or self.is_path or self.is_file_url or self.is_vcs: + url = "" + if self.is_vcs: + url = self.url if self.url else self.uri + if self.is_direct_url: + url = self.link.url_without_fragment + else: + if self.link: + url = self.link.url_without_fragment + elif self.url: + url = self.url + if self.ref: + url = "{0}@{1}".format(url, self.ref) + else: + url = self.uri + if self.link and name is None: + self._name = self.link.egg_fragment + if self._name: + name = canonicalize_name(self._name) + # return "{0}{1}@ {2}".format( + # normalize_name(self.name), extras_to_string(self.extras), url + # ) + return (name, extras, url) + + @property + def line_is_installable(self): + # type: () -> bool + """ + This is a safeguard against decoy requirements when a user installs a package + whose name coincides with the name of a folder in the cwd, e.g. install *alembic* + when there is a folder called *alembic* in the working directory. + + In this case we first need to check that the given requirement is a valid + URL, VCS requirement, or installable filesystem path before deciding to treat it as + a file requirement over a named requirement. + """ + line = self.line + if is_file_url(line): + link = create_link(line) + line = link.url_without_fragment + line, _ = split_ref_from_uri(line) + if (is_vcs(line) or (is_valid_url(line) and ( + not is_file_url(line) or is_installable_file(line))) + or is_installable_file(line)): + return True + return False + + def parse(self): + # type: () -> None + self.parse_hashes() + self.line, self.markers = split_markers_from_line(self.line) + self.parse_extras() + self.line = self.line.strip('"').strip("'").strip() + if self.line.startswith("git+file:/") and not self.line.startswith("git+file:///"): + self.line = self.line.replace("git+file:/", "git+file:///") + self.parse_markers() + if self.is_file_url: + if self.line_is_installable: + self.populate_setup_paths() + else: + raise RequirementError( + "Supplied requirement is not installable: {0!r}".format(self.line) + ) + self.parse_link() + # self.parse_requirement() + # self.parse_ireq() + + +@attr.s(slots=True, hash=True) class NamedRequirement(object): - name = attr.ib() - version = attr.ib(validator=attr.validators.optional(validate_specifiers)) - req = attr.ib() - extras = attr.ib(default=attr.Factory(list)) - editable = attr.ib(default=False) + name = attr.ib() # type: Text + version = attr.ib() # type: Optional[Text] + req = attr.ib() # type: PackagingRequirement + extras = attr.ib(default=attr.Factory(list)) # type: Tuple[Text] + editable = attr.ib(default=False) # type: bool + _parsed_line = attr.ib(default=None) # type: Optional[Line] @req.default def get_requirement(self): + # type: () -> RequirementType req = init_requirement( "{0}{1}".format(canonicalize_name(self.name), self.version) ) return req + @property + def parsed_line(self): + # type: () -> Optional[Line] + if self._parsed_line is None: + self._parsed_line = Line(self.line_part) + return self._parsed_line + @classmethod - def from_line(cls, line): + def from_line(cls, line, parsed_line=None): + # type: (Text, Optional[Line]) -> NamedRequirement req = init_requirement(line) - specifiers = None + specifiers = None # type: Optional[Text] if req.specifier: specifiers = specs_to_string(req.specifier) req.line = line @@ -90,39 +1085,52 @@ def from_line(cls, line): if not name: name = getattr(req, "key", line) req.name = name - extras = None + creation_kwargs = { + "name": name, + "version": specifiers, + "req": req, + "parsed_line": parsed_line, + "extras": None + } + extras = None # type: Optional[Tuple[Text]] if req.extras: extras = list(req.extras) - return cls(name=name, version=specifiers, req=req, extras=extras) + creation_kwargs["extras"] = extras + return cls(**creation_kwargs) @classmethod def from_pipfile(cls, name, pipfile): - creation_args = {} + # type: (Text, Dict[Text, Union[Text, Optional[Text], Optional[List[Text]]]]) -> NamedRequirement + creation_args = {} # type: Dict[Text, Union[Optional[Text], Optional[List[Text]]]] if hasattr(pipfile, "keys"): attr_fields = [field.name for field in attr.fields(cls)] creation_args = {k: v for k, v in pipfile.items() if k in attr_fields} creation_args["name"] = name - version = get_version(pipfile) + version = get_version(pipfile) # type: Optional[Text] extras = creation_args.get("extras", None) creation_args["version"] = version req = init_requirement("{0}{1}".format(name, version)) if extras: req.extras += tuple(extras) creation_args["req"] = req - return cls(**creation_args) + return cls(**creation_args) # type: ignore @property def line_part(self): + # type: () -> Text # FIXME: This should actually be canonicalized but for now we have to # simply lowercase it and replace underscores, since full canonicalization # also replaces dots and that doesn't actually work when querying the index - return "{0}".format(normalize_name(self.name)) + return normalize_name(self.name) @property def pipfile_part(self): - pipfile_dict = attr.asdict(self, filter=filter_none).copy() + # type: () -> Dict[Text, Any] + pipfile_dict = attr.asdict(self, filter=filter_none).copy() # type: ignore if "version" not in pipfile_dict: pipfile_dict["version"] = "*" + if "_parsed_line" in pipfile_dict: + pipfile_dict.pop("_parsed_line") name = pipfile_dict.pop("name") return {name: pipfile_dict} @@ -132,40 +1140,42 @@ def pipfile_part(self): ) -@attr.s(slots=True) +@attr.s(slots=True, cmp=True, hash=True) class FileRequirement(object): """File requirements for tar.gz installable files or wheels or setup.py containing directories.""" #: Path to the relevant `setup.py` location - setup_path = attr.ib(default=None) + setup_path = attr.ib(default=None, cmp=True) # type: Optional[Text] #: path to hit - without any of the VCS prefixes (like git+ / http+ / etc) - path = attr.ib(default=None, validator=attr.validators.optional(validate_path)) + path = attr.ib(default=None, cmp=True) # type: Optional[Text] #: Whether the package is editable - editable = attr.ib(default=False) + editable = attr.ib(default=False, cmp=True) # type: bool #: Extras if applicable - extras = attr.ib(default=attr.Factory(list)) - _uri_scheme = attr.ib(default=None) + extras = attr.ib(default=attr.Factory(tuple), cmp=True) # type: Tuple[Text] + _uri_scheme = attr.ib(default=None, cmp=True) # type: Optional[Text] #: URI of the package - uri = attr.ib() + uri = attr.ib(cmp=True) # type: Optional[Text] #: Link object representing the package to clone - link = attr.ib() + link = attr.ib(cmp=True) # type: Optional[Link] #: PyProject Requirements - pyproject_requires = attr.ib(default=attr.Factory(list)) + pyproject_requires = attr.ib(default=attr.Factory(tuple), cmp=True) # type: Tuple #: PyProject Build System - pyproject_backend = attr.ib(default=None) + pyproject_backend = attr.ib(default=None, cmp=True) # type: Optional[Text] #: PyProject Path - pyproject_path = attr.ib(default=None) - _has_hashed_name = attr.ib(default=False) + pyproject_path = attr.ib(default=None, cmp=True) # type: Optional[Text] + #: Setup metadata e.g. dependencies + _setup_info = attr.ib(default=None, cmp=True) # type: Optional[SetupInfo] + _has_hashed_name = attr.ib(default=False, cmp=True) # type: bool + _parsed_line = attr.ib(default=None, cmp=False, hash=True) # type: Optional[Line] #: Package name - name = attr.ib() + name = attr.ib(cmp=True) # type: Optional[Text] #: A :class:`~pkg_resources.Requirement` isntance - req = attr.ib() - #: Setup metadata e.g. dependencies - setup_info = attr.ib(default=None) + req = attr.ib(cmp=True) # type: Optional[PackagingRequirement] @classmethod def get_link_from_line(cls, line): + # type: (Text) -> LinkInfo """Parse link information from given requirement line. Return a 6-tuple: @@ -199,15 +1209,16 @@ def get_link_from_line(cls, line): # Git allows `git@github.com...` lines that are not really URIs. # Add "ssh://" so we can parse correctly, and restore afterwards. - fixed_line = add_ssh_scheme_to_git_uri(line) - added_ssh_scheme = fixed_line != line + fixed_line = add_ssh_scheme_to_git_uri(line) # type: Text + added_ssh_scheme = fixed_line != line # type: bool # We can assume a lot of things if this is a local filesystem path. if "://" not in fixed_line: - p = Path(fixed_line).absolute() - path = p.as_posix() - uri = p.as_uri() - link = create_link(uri) + p = Path(fixed_line).absolute() # type: Path + path = p.as_posix() # type: Optional[Text] + uri = p.as_uri() # type: Text + link = create_link(uri) # type: Link + relpath = None # type: Optional[Text] try: relpath = get_converted_relative_path(path) except ValueError: @@ -216,19 +1227,17 @@ def get_link_from_line(cls, line): # This is an URI. We'll need to perform some elaborated parsing. - parsed_url = urllib_parse.urlsplit(fixed_line) - original_url = parsed_url._replace() - if added_ssh_scheme and ":" in parsed_url.netloc: - original_netloc, original_path_start = parsed_url.netloc.rsplit(":", 1) - uri_path = "/{0}{1}".format(original_path_start, parsed_url.path) - parsed_url = original_url._replace(netloc=original_netloc, path=uri_path) + parsed_url = urllib_parse.urlsplit(fixed_line) # type: SplitResult + original_url = parsed_url._replace() # type: SplitResult # Split the VCS part out if needed. - original_scheme = parsed_url.scheme + original_scheme = parsed_url.scheme # type: Text + vcs_type = None # type: Optional[Text] if "+" in original_scheme: - vcs_type, scheme = original_scheme.split("+", 1) + scheme = None # type: Optional[Text] + vcs_type, _, scheme = original_scheme.partition("+") parsed_url = parsed_url._replace(scheme=scheme) - prefer = "uri" + prefer = "uri" # type: Text else: vcs_type = None prefer = "file" @@ -268,41 +1277,90 @@ def get_link_from_line(cls, line): @property def setup_py_dir(self): + # type: () -> Optional[Text] if self.setup_path: return os.path.dirname(os.path.abspath(self.setup_path)) + return None @property def dependencies(self): - build_deps = [] - setup_deps = [] - deps = {} + # type: () -> Tuple[Dict[Text, PackagingRequirement], List[Union[Text, PackagingRequirement]], List[Text]] + build_deps = [] # type: List[Union[Text, PackagingRequirement]] + setup_deps = [] # type: List[Text] + deps = {} # type: Dict[Text, PackagingRequirement] if self.setup_info: setup_info = self.setup_info.as_dict() deps.update(setup_info.get("requires", {})) setup_deps.extend(setup_info.get("setup_requires", [])) build_deps.extend(setup_info.get("build_requires", [])) if self.pyproject_requires: - build_deps.extend(self.pyproject_requires) + build_deps.extend(list(self.pyproject_requires)) + setup_deps = list(set(setup_deps)) + build_deps = list(set(build_deps)) return deps, setup_deps, build_deps + def __attrs_post_init__(self): + # type: () -> None + if self.name is None and self.parsed_line: + if self.parsed_line.setup_info: + self._setup_info = self.parsed_line.setup_info + if self.parsed_line.setup_info.name: + self.name = self.parsed_line.setup_info.name + if self.req is None and self._parsed_line.requirement is not None: + self.req = self._parsed_line.requirement + if self._parsed_line and self._parsed_line.ireq and not self._parsed_line.ireq.req: + if self.req is not None: + self._parsed_line._ireq.req = self.req + + @property + def setup_info(self): + # type: () -> SetupInfo + from .setup_info import SetupInfo + if self._setup_info is None and self.parsed_line: + if self.parsed_line.setup_info: + if not self._parsed_line.setup_info.name: + self._parsed_line._setup_info.get_info() + self._setup_info = self.parsed_line.setup_info + elif self.parsed_line.ireq and not self.parsed_line.is_wheel: + self._setup_info = SetupInfo.from_ireq(self.parsed_line.ireq) + else: + if self.link and not self.link.is_wheel: + self._setup_info = Line(self.line_part).setup_info + self._setup_info.get_info() + return self._setup_info + + @setup_info.setter + def setup_info(self, setup_info): + # type: (SetupInfo) -> None + self._setup_info = setup_info + if self._parsed_line: + self._parsed_line._setup_info = setup_info + @uri.default def get_uri(self): + # type: () -> Text if self.path and not self.uri: self._uri_scheme = "path" return pip_shims.shims.path_to_url(os.path.abspath(self.path)) - elif getattr(self, "req", None) and getattr(self.req, "url"): + elif getattr(self, "req", None) and self.req is not None and getattr(self.req, "url"): return self.req.url + elif self.link is not None: + return self.link.url_without_fragment + return "" @name.default def get_name(self): + # type: () -> Text loc = self.path or self.uri if loc and not self._uri_scheme: self._uri_scheme = "path" if self.path else "file" name = None - if getattr(self, "req", None) and getattr(self.req, "name") and self.req.name is not None: - if self.is_direct_url: + hashed_loc = hashlib.sha256(loc.encode("utf-8")).hexdigest() + hashed_name = hashed_loc[-7:] + if getattr(self, "req", None) and self.req is not None and getattr(self.req, "name") and self.req.name is not None: + if self.is_direct_url and self.req.name != hashed_name: return self.req.name - if self.link and self.link.egg_fragment and not self._has_hashed_name: + if self.link and self.link.egg_fragment and self.link.egg_fragment != hashed_name: return self.link.egg_fragment elif self.link and self.link.is_wheel: from pip_shims import Wheel @@ -311,26 +1369,36 @@ def get_name(self): elif self.link and ((self.link.scheme == "file" or self.editable) or ( self.path and self.setup_path and os.path.isfile(str(self.setup_path)) )): + _ireq = None if self.editable: - line = pip_shims.shims.path_to_url(self.setup_py_dir) + if self.setup_path: + line = pip_shims.shims.path_to_url(self.setup_py_dir) + else: + line = pip_shims.shims.path_to_url(os.path.abspath(self.path)) if self.extras: line = "{0}[{1}]".format(line, ",".join(self.extras)) _ireq = pip_shims.shims.install_req_from_editable(line) else: - line = Path(self.setup_py_dir).as_posix() + if self.setup_path: + line = Path(self.setup_py_dir).as_posix() + else: + line = Path(os.path.abspath(self.path)).as_posix() if self.extras: line = "{0}[{1}]".format(line, ",".join(self.extras)) _ireq = pip_shims.shims.install_req_from_line(line) - if getattr(self, "req", None): + if getattr(self, "req", None) is not None: _ireq.req = copy.deepcopy(self.req) - else: - if self.extras: - _ireq.extras = set(self.extras) + if self.extras and _ireq and not _ireq.extras: + _ireq.extras = set(self.extras) from .setup_info import SetupInfo subdir = getattr(self, "subdirectory", None) - setupinfo = SetupInfo.from_ireq(_ireq, subdir=subdir) + if self.setup_info is not None: + setupinfo = self.setup_info + else: + setupinfo = SetupInfo.from_ireq(_ireq, subdir=subdir) if setupinfo: - self.setup_info = setupinfo + self._setup_info = setupinfo + self.setup_info.get_info() setupinfo_dict = setupinfo.as_dict() setup_name = setupinfo_dict.get("name", None) if setup_name: @@ -339,23 +1407,22 @@ def get_name(self): build_requires = setupinfo_dict.get("build_requires") build_backend = setupinfo_dict.get("build_backend") if build_requires and not self.pyproject_requires: - self.pyproject_requires = build_requires + self.pyproject_requires = tuple(build_requires) if build_backend and not self.pyproject_backend: self.pyproject_backend = build_backend - hashed_loc = hashlib.sha256(loc.encode("utf-8")).hexdigest() - hashed_name = hashed_loc[-7:] if not name or name.lower() == "unknown": self._has_hashed_name = True name = hashed_name - else: - self._has_hashed_name = False name_in_link = getattr(self.link, "egg_fragment", "") if self.link else "" - if not self._has_hashed_name and name_in_link != name: + if not self._has_hashed_name and name_in_link != name and self.link is not None: self.link = create_link("{0}#egg={1}".format(self.link.url, name)) - return name + if name is not None: + return name + return "" @link.default def get_link(self): + # type: () -> pip_shims.shims.Link target = "{0}".format(self.uri) if hasattr(self, "name") and not self._has_hashed_name: target = "{0}#egg={1}".format(target, self.name) @@ -364,9 +1431,31 @@ def get_link(self): @req.default def get_requirement(self): + # type: () -> RequirementType + if self.name is None: + if self._parsed_line is not None and self._parsed_line.name is not None: + self.name = self._parsed_line.name + else: + raise ValueError( + "Failed to generate a requirement: missing name for {0!r}".format(self) + ) + if self._parsed_line: + try: + # initialize specifiers to make sure we capture them + self._parsed_line.specifiers + except Exception: + pass + req = copy.deepcopy(self._parsed_line.requirement) + return req + req = init_requirement(normalize_name(self.name)) req.editable = False - req.line = self.link.url_without_fragment + if self.link is not None: + req.line = self.link.url_without_fragment + elif self.uri is not None: + req.line = self.uri + else: + req.line = self.name if self.path and self.link and self.link.scheme.startswith("file"): req.local_file = True req.path = self.path @@ -383,8 +1472,33 @@ def get_requirement(self): req.link = self.link return req + @property + def parsed_line(self): + # type: () -> Optional[Line] + if self._parsed_line is None: + self._parsed_line = Line(self.line_part) + return self._parsed_line + + @property + def is_local(self): + # type: () -> bool + uri = getattr(self, "uri", None) + if uri is None: + if getattr(self, "path", None) and self.path is not None: + uri = pip_shims.shims.path_to_url(os.path.abspath(self.path)) + elif getattr(self, "req", None) and self.req is not None and ( + getattr(self.req, "url") and self.req.url is not None + ): + uri = self.req.url + if uri and is_file_url(uri): + return True + return False + @property def is_remote_artifact(self): + # type: () -> bool + if self.link is None: + return False return ( any( self.link.scheme.startswith(scheme) @@ -396,25 +1510,44 @@ def is_remote_artifact(self): @property def is_direct_url(self): + # type: () -> bool + if self._parsed_line is not None and self._parsed_line.is_direct_url: + return True return self.is_remote_artifact @property def formatted_path(self): + # type: () -> Optional[Text] if self.path: path = self.path if not isinstance(path, Path): path = Path(path) return path.as_posix() - return + return None @classmethod def create( - cls, path=None, uri=None, editable=False, extras=None, link=None, vcs_type=None, - name=None, req=None, line=None, uri_scheme=None, setup_path=None, relpath=None + cls, + path=None, # type: Optional[Text] + uri=None, # type: Text + editable=False, # type: bool + extras=None, # type: Optional[Tuple[Text]] + link=None, # type: Link + vcs_type=None, # type: Optional[Any] + name=None, # type: Optional[Text] + req=None, # type: Optional[Any] + line=None, # type: Optional[Text] + uri_scheme=None, # type: Text + setup_path=None, # type: Optional[Any] + relpath=None, # type: Optional[Any] + parsed_line=None, # type: Optional[Line] ): + # type: (...) -> FileRequirement + if parsed_line is None and line is not None: + parsed_line = Line(line) if relpath and not path: path = relpath - if not path and uri and link.scheme == "file": + if not path and uri and link is not None and link.scheme == "file": path = os.path.abspath(pip_shims.shims.url_to_path(unquote(uri))) try: path = get_converted_relative_path(path) @@ -427,24 +1560,25 @@ def create( if path and not uri: uri = unquote(pip_shims.shims.path_to_url(os.path.abspath(path))) if not link: - link = create_link(uri) + link = cls.get_link_from_line(uri).link if not uri: uri = unquote(link.url_without_fragment) if not extras: - extras = [] + extras = () pyproject_path = None - if path is not None: - pyproject_requires = get_pyproject(os.path.abspath(path)) - pyproject_backend = None pyproject_requires = None + pyproject_backend = None + if path is not None: + pyproject_requires = get_pyproject(path) if pyproject_requires is not None: pyproject_requires, pyproject_backend = pyproject_requires + pyproject_requires = tuple(pyproject_requires) if path: - pyproject_path = Path(path).joinpath("pyproject.toml") - if not pyproject_path.exists(): - pyproject_path = None - if not setup_path and path is not None: - setup_path = Path(path).joinpath("setup.py") + setup_paths = get_setup_paths(path) + if setup_paths["pyproject_toml"] is not None: + pyproject_path = Path(setup_paths["pyproject_toml"]) + if setup_paths["setup_py"] is not None: + setup_path = Path(setup_paths["setup_py"]).as_posix() if setup_path and isinstance(setup_path, Path): setup_path = setup_path.as_posix() creation_kwargs = { @@ -456,56 +1590,89 @@ def create( "link": link, "uri": uri, "pyproject_requires": pyproject_requires, - "pyproject_backend": pyproject_backend + "pyproject_backend": pyproject_backend, + "path": path or relpath, + "parsed_line": parsed_line } if vcs_type: - creation_kwargs["vcs_type"] = vcs_type - _line = None - if not name: - _line = unquote(link.url_without_fragment) if link.url else uri + creation_kwargs["vcs"] = vcs_type + if name: + creation_kwargs["name"] = name + _line = None # type: Optional[Text] + ireq = None # type: Optional[InstallRequirement] + setup_info = None # type: Optional[SetupInfo] + if parsed_line: + if parsed_line.name: + name = parsed_line.name + if parsed_line.setup_info: + name = parsed_line.setup_info.as_dict().get("name", name) + if not name or not parsed_line: + if link is not None and link.url_without_fragment is not None: + _line = unquote(link.url_without_fragment) + if name: + _line = "{0}#egg={1}".format(_line, name) + if extras and extras_to_string(extras) not in _line: + _line = "{0}[{1}]".format(_line, ",".join(sorted(set(extras)))) + elif uri is not None: + _line = unquote(uri) + else: + _line = unquote(line) if editable: - if extras: + if extras and extras_to_string(extras) not in _line and ( + (link and link.scheme == "file") or (uri and uri.startswith("file")) + or (not uri and not link) + ): _line = "{0}[{1}]".format(_line, ",".join(sorted(set(extras)))) - ireq = pip_shims.shims.install_req_from_editable(_line) + if ireq is None: + ireq = pip_shims.shims.install_req_from_editable(_line) else: _line = path if (uri_scheme and uri_scheme == "path") else _line - if extras: + if extras and extras_to_string(extras) not in _line: _line = "{0}[{1}]".format(_line, ",".join(sorted(set(extras)))) - ireq = pip_shims.shims.install_req_from_line(_line) - if extras and not ireq.extras: + if ireq is None: + ireq = pip_shims.shims.install_req_from_line(_line) + if editable: + _line = "-e {0}".format(editable) + parsed_line = Line(_line) + if ireq is None: + ireq = parsed_line.ireq + if extras and ireq is not None and not ireq.extras: ireq.extras = set(extras) - setup_info = SetupInfo.from_ireq(ireq) + if setup_info is None: + setup_info = SetupInfo.from_ireq(ireq) setupinfo_dict = setup_info.as_dict() setup_name = setupinfo_dict.get("name", None) - if setup_name: + if setup_name is not None: name = setup_name - build_requires = setupinfo_dict.get("build_requires", []) - build_backend = setupinfo_dict.get("build_backend", []) + build_requires = setupinfo_dict.get("build_requires", ()) + build_backend = setupinfo_dict.get("build_backend", "") if not creation_kwargs.get("pyproject_requires") and build_requires: - creation_kwargs["pyproject_requires"] = build_requires + creation_kwargs["pyproject_requires"] = tuple(build_requires) if not creation_kwargs.get("pyproject_backend") and build_backend: creation_kwargs["pyproject_backend"] = build_backend - creation_kwargs["setup_info"] = setup_info + if setup_info is None and parsed_line and parsed_line.setup_info: + setup_info = parsed_line.setup_info + creation_kwargs["setup_info"] = setup_info if path or relpath: creation_kwargs["path"] = relpath if relpath else path - if req: + if req is not None: creation_kwargs["req"] = req - if creation_kwargs.get("req") and line and not getattr(creation_kwargs["req"], "line", None): - creation_kwargs["req"].line = line + creation_req = creation_kwargs.get("req") + if creation_kwargs.get("req") is not None: + creation_req_line = getattr(creation_req, "line", None) + if creation_req_line is None and line is not None: + creation_kwargs["req"].line = line # type: ignore + if parsed_line and parsed_line.name: + if name and len(parsed_line.name) != 7 and len(name) == 7: + name = parsed_line.name if name: creation_kwargs["name"] = name - cls_inst = cls(**creation_kwargs) - if not _line: - if editable and uri_scheme == "path": - _line = relpath if relpath else path - else: - _line = unquote(cls_inst.link.url_without_fragment) or cls_inst.uri - _line = "{0}#egg={1}".format(line, cls_inst.name) if not cls_inst._has_hashed_name else _line - cls_inst.req.line = line if line else _line + cls_inst = cls(**creation_kwargs) # type: ignore return cls_inst @classmethod - def from_line(cls, line, extras=None): + def from_line(cls, line, extras=None, parsed_line=None): + # type: (Text, Optional[Tuple[Text]], Optional[Line]) -> FileRequirement line = line.strip('"').strip("'") link = None path = None @@ -515,7 +1682,9 @@ def from_line(cls, line, extras=None): name = None req = None if not extras: - extras = [] + extras = () + else: + extras = tuple(extras) if not any([is_installable_file(line), is_valid_url(line), is_file_url(line)]): try: req = init_requirement(line) @@ -535,8 +1704,13 @@ def from_line(cls, line, extras=None): "setup_path": setup_path, "uri_scheme": prefer, "line": line, - "extras": extras + "extras": extras, + # "name": name, } + if req is not None: + arg_dict["req"] = req + if parsed_line is not None: + arg_dict["parsed_line"] = parsed_line if link and link.is_wheel: from pip_shims import Wheel @@ -549,6 +1723,7 @@ def from_line(cls, line, extras=None): @classmethod def from_pipfile(cls, name, pipfile): + # type: (Text, Dict[Text, Any]) -> FileRequirement # Parse the values out. After this dance we should have two variables: # path - Local filesystem path. # uri - Absolute URI that is parsable with urlsplit. @@ -580,7 +1755,7 @@ def from_pipfile(cls, name, pipfile): if not uri: uri = pip_shims.shims.path_to_url(path) - link = create_link(uri) + link = cls.get_link_from_line(uri).link arg_dict = { "name": name, "path": path, @@ -588,116 +1763,157 @@ def from_pipfile(cls, name, pipfile): "editable": pipfile.get("editable", False), "link": link, "uri_scheme": uri_scheme, + "extras": pipfile.get("extras", None), } - if link.scheme != "file" and not pipfile.get("editable", False): - arg_dict["line"] = "{0}@ {1}".format(name, link.url_without_fragment) + + extras = pipfile.get("extras", ()) + if extras: + extras = tuple(extras) + line = "" + if pipfile.get("editable", False) and uri_scheme == "path": + line = "{0}".format(path) + if extras: + line = "{0}{1}".format(line, extras_to_string(extras)) + else: + if name: + if extras: + line_name = "{0}{1}".format(name, extras_to_string(extras)) + else: + line_name = "{0}".format(name) + line = "{0}#egg={1}".format(unquote(link.url_without_fragment), line_name) + else: + line = unquote(link.url) + if extras: + line = "{0}{1}".format(line, extras_to_string(extras)) + if "subdirectory" in pipfile: + arg_dict["subdirectory"] = pipfile["subdirectory"] + line = "{0}&subdirectory={1}".format(pipfile["subdirectory"]) + if pipfile.get("editable", False): + line = "-e {0}".format(line) + arg_dict["line"] = line return cls.create(**arg_dict) @property def line_part(self): + # type: () -> Text + link_url = None # type: Optional[Text] + seed = None # type: Optional[Text] + if self.link is not None: + link_url = unquote(self.link.url_without_fragment) if self._uri_scheme and self._uri_scheme == "path": # We may need any one of these for passing to pip - seed = self.path or unquote(self.link.url_without_fragment) or self.uri + seed = self.path or link_url or self.uri elif (self._uri_scheme and self._uri_scheme == "file") or ( (self.link.is_artifact or self.link.is_wheel) and self.link.url ): - seed = unquote(self.link.url_without_fragment) or self.uri + seed = link_url or self.uri # add egg fragments to remote artifacts (valid urls only) - if not self._has_hashed_name and self.is_remote_artifact: + if not self._has_hashed_name and self.is_remote_artifact and seed is not None: seed += "#egg={0}".format(self.name) editable = "-e " if self.editable else "" + if seed is None: + raise ValueError("Could not calculate url for {0!r}".format(self)) return "{0}{1}".format(editable, seed) @property def pipfile_part(self): + # type: () -> Dict[Text, Dict[Text, Any]] excludes = [ - "_base_line", "_has_hashed_name", "setup_path", "pyproject_path", - "pyproject_requires", "pyproject_backend", "setup_info" + "_base_line", "_has_hashed_name", "setup_path", "pyproject_path", "_uri_scheme", + "pyproject_requires", "pyproject_backend", "_setup_info", "_parsed_line" ] - filter_func = lambda k, v: bool(v) is True and k.name not in excludes + filter_func = lambda k, v: bool(v) is True and k.name not in excludes # noqa pipfile_dict = attr.asdict(self, filter=filter_func).copy() - name = pipfile_dict.pop("name") + name = pipfile_dict.pop("name", None) + if name is None: + if self.name: + name = self.name + elif self.parsed_line and self.parsed_line.name: + name = self.name = self.parsed_line.name + elif self.setup_info and self.setup_info.name: + name = self.name = self.setup_info.name if "_uri_scheme" in pipfile_dict: pipfile_dict.pop("_uri_scheme") # For local paths and remote installable artifacts (zipfiles, etc) collision_keys = {"file", "uri", "path"} + collision_order = ["file", "uri", "path"] # type: List[Text] + key_match = next(iter(k for k in collision_order if k in pipfile_dict.keys())) if self._uri_scheme: dict_key = self._uri_scheme target_key = ( dict_key if dict_key in pipfile_dict - else next( - (k for k in ("file", "uri", "path") if k in pipfile_dict), None - ) + else key_match ) - if target_key: + if target_key is not None: winning_value = pipfile_dict.pop(target_key) - collisions = (k for k in collision_keys if k in pipfile_dict) + collisions = [k for k in collision_keys if k in pipfile_dict] for key in collisions: pipfile_dict.pop(key) pipfile_dict[dict_key] = winning_value elif ( self.is_remote_artifact - or self.link.is_artifact + or (self.link is not None and self.link.is_artifact) and (self._uri_scheme and self._uri_scheme == "file") ): dict_key = "file" # Look for uri first because file is a uri format and this is designed # to make sure we add file keys to the pipfile as a replacement of uri - target_key = next( - (k for k in ("file", "uri", "path") if k in pipfile_dict), None - ) - winning_value = pipfile_dict.pop(target_key) + if key_match is not None: + winning_value = pipfile_dict.pop(key_match) key_to_remove = (k for k in collision_keys if k in pipfile_dict) for key in key_to_remove: pipfile_dict.pop(key) pipfile_dict[dict_key] = winning_value else: - collisions = [key for key in ["path", "file", "uri"] if key in pipfile_dict] + collisions = [key for key in collision_order if key in pipfile_dict.keys()] if len(collisions) > 1: for k in collisions[1:]: pipfile_dict.pop(k) return {name: pipfile_dict} -@attr.s(slots=True) +@attr.s(slots=True, hash=True) class VCSRequirement(FileRequirement): #: Whether the repository is editable - editable = attr.ib(default=None) + editable = attr.ib(default=None) # type: Optional[bool] #: URI for the repository - uri = attr.ib(default=None) + uri = attr.ib(default=None) # type: Optional[Text] #: path to the repository, if it's local - path = attr.ib(default=None, validator=attr.validators.optional(validate_path)) + path = attr.ib(default=None, validator=attr.validators.optional(validate_path)) # type: Optional[Text] #: vcs type, i.e. git/hg/svn - vcs = attr.ib(validator=attr.validators.optional(validate_vcs), default=None) + vcs = attr.ib(validator=attr.validators.optional(validate_vcs), default=None) # type: Optional[Text] #: vcs reference name (branch / commit / tag) - ref = attr.ib(default=None) + ref = attr.ib(default=None) # type: Optional[Text] #: Subdirectory to use for installation if applicable - subdirectory = attr.ib(default=None) - _repo = attr.ib(default=None) - _base_line = attr.ib(default=None) - name = attr.ib() - link = attr.ib() - req = attr.ib() + subdirectory = attr.ib(default=None) # type: Optional[Text] + _repo = attr.ib(default=None) # type: Optional[VCSRepository] + _base_line = attr.ib(default=None) # type: Optional[Text] + name = attr.ib() # type: Text + link = attr.ib() # type: Optional[pip_shims.shims.Link] + req = attr.ib() # type: Optional[RequirementType] def __attrs_post_init__(self): + # type: () -> None if not self.uri: if self.path: self.uri = pip_shims.shims.path_to_url(self.path) - split = urllib_parse.urlsplit(self.uri) - scheme, rest = split[0], split[1:] - vcs_type = "" - if "+" in scheme: - vcs_type, scheme = scheme.split("+", 1) - vcs_type = "{0}+".format(vcs_type) - new_uri = urllib_parse.urlunsplit((scheme,) + rest[:-1] + ("",)) - new_uri = "{0}{1}".format(vcs_type, new_uri) - self.uri = new_uri + if self.uri is not None: + split = urllib_parse.urlsplit(self.uri) + scheme, rest = split[0], split[1:] + vcs_type = "" + if "+" in scheme: + vcs_type, scheme = scheme.split("+", 1) + vcs_type = "{0}+".format(vcs_type) + new_uri = urllib_parse.urlunsplit((scheme,) + rest[:-1] + ("",)) + new_uri = "{0}{1}".format(vcs_type, new_uri) + self.uri = new_uri @link.default def get_link(self): + # type: () -> pip_shims.shims.Link uri = self.uri if self.uri else pip_shims.shims.path_to_url(self.path) - return build_vcs_link( + vcs_uri = build_vcs_uri( self.vcs, add_ssh_scheme_to_git_uri(uri), name=self.name, @@ -705,9 +1921,11 @@ def get_link(self): subdirectory=self.subdirectory, extras=self.extras, ) + return self.get_link_from_line(vcs_uri).link @name.default def get_name(self): + # type: () -> Optional[Text] return ( self.link.egg_fragment or self.req.name if getattr(self, "req", None) @@ -716,15 +1934,43 @@ def get_name(self): @property def vcs_uri(self): + # type: () -> Optional[Text] uri = self.uri if not any(uri.startswith("{0}+".format(vcs)) for vcs in VCS_LIST): uri = "{0}+{1}".format(self.vcs, uri) return uri + @property + def setup_info(self): + if self._parsed_line and self._parsed_line.setup_info: + if not self._parsed_line.setup_info.name: + self._parsed_line._setup_info.get_info() + return self._parsed_line.setup_info + if self._repo: + from .setup_info import SetupInfo + self._setup_info = SetupInfo.from_ireq(Line(self._repo.checkout_directory).ireq) + self._setup_info.get_info() + return self._setup_info + ireq = self.parsed_line.ireq + from .setup_info import SetupInfo + self._setup_info = SetupInfo.from_ireq(ireq) + return self._setup_info + + @setup_info.setter + def setup_info(self, setup_info): + self._setup_info = setup_info + if self._parsed_line: + self._parsed_line.setup_info = setup_info + @req.default def get_requirement(self): + # type: () -> PackagingRequirement name = self.name or self.link.egg_fragment - url = self.uri or self.link.url_without_fragment + url = None + if self.uri: + url = self.uri + elif self.link is not None: + url = self.link.url_without_fragment if not name: raise ValueError( "pipenv requires an #egg fragment for version controlled " @@ -733,9 +1979,19 @@ def get_requirement(self): ) req = init_requirement(canonicalize_name(self.name)) req.editable = self.editable - if not getattr(req, "url") and self.uri: - req.url = self.uri - req.line = self.link.url + if not getattr(req, "url"): + if url is not None: + url = add_ssh_scheme_to_git_uri(url) + elif self.uri is not None: + url = self.parse_link_from_line(self.uri).link.url_without_fragment + if url.startswith("git+file:/") and not url.startswith("git+file:///"): + url = url.replace("git+file:/", "git+file:///") + if url: + req.url = url + line = url if url else self.vcs_uri + if self.editable: + line = "-e {0}".format(line) + req.line = line if self.ref: req.revision = self.ref if self.extras: @@ -751,22 +2007,26 @@ def get_requirement(self): and "git+git@" in self.uri ): req.line = self.uri - req.url = self.uri + url = self.link.url_without_fragment + if url.startswith("git+file:/") and not url.startswith("git+file:///"): + url = url.replace("git+file:/", "git+file:///") + req.url = url return req - @property - def is_local(self): - if is_file_url(self.uri): - return True - return False - @property def repo(self): + # type: () -> VCSRepository if self._repo is None: - self._repo = self.get_vcs_repo() + if self._parsed_line and self._parsed_line.vcsrepo: + self._repo = self._parsed_line.vcsrepo + else: + self._repo = self.get_vcs_repo() + if self._parsed_line: + self._parsed_line.vcsrepo = self._repo return self._repo def get_checkout_dir(self, src_dir=None): + # type: (Optional[Text]) -> Text src_dir = os.environ.get("PIP_SRC", None) if not src_dir else src_dir checkout_dir = None if self.is_local: @@ -776,22 +2036,20 @@ def get_checkout_dir(self, src_dir=None): if path and os.path.exists(path): checkout_dir = os.path.abspath(path) return checkout_dir + if src_dir is not None: + checkout_dir = os.path.join(os.path.abspath(src_dir), self.name) + mkdir_p(src_dir) + return checkout_dir return os.path.join(create_tracked_tempdir(prefix="requirementslib"), self.name) - def get_vcs_repo(self, src_dir=None): + def get_vcs_repo(self, src_dir=None, checkout_dir=None): + # type: (Optional[Text], Optional[Text]) -> VCSRepository from .vcs import VCSRepository - checkout_dir = self.get_checkout_dir(src_dir=src_dir) - link = build_vcs_link( - self.vcs, - self.uri, - name=self.name, - ref=self.ref, - subdirectory=self.subdirectory, - extras=self.extras, - ) + if checkout_dir is None: + checkout_dir = self.get_checkout_dir(src_dir=src_dir) vcsrepo = VCSRepository( - url=link.url, + url=self.link.url, name=self.name, ref=self.ref if self.ref else None, checkout_directory=checkout_dir, @@ -811,16 +2069,18 @@ def get_vcs_repo(self, src_dir=None): pyproject_info = get_pyproject(checkout_dir) if pyproject_info is not None: pyproject_requires, pyproject_backend = pyproject_info - self.pyproject_requires = pyproject_requires + self.pyproject_requires = tuple(pyproject_requires) self.pyproject_backend = pyproject_backend return vcsrepo def get_commit_hash(self): + # type: () -> Text hash_ = None hash_ = self.repo.get_commit_hash() return hash_ def update_repo(self, src_dir=None, ref=None): + # type: (Optional[Text], Optional[Text]) -> Text if ref: self.ref = ref else: @@ -835,23 +2095,48 @@ def update_repo(self, src_dir=None, ref=None): @contextmanager def locked_vcs_repo(self, src_dir=None): + # type: (Optional[Text]) -> Generator[VCSRepository, None, None] if not src_dir: src_dir = create_tracked_tempdir(prefix="requirementslib-", suffix="-src") vcsrepo = self.get_vcs_repo(src_dir=src_dir) - self.req.revision = vcsrepo.get_commit_hash() + if not self.req: + if self.parsed_line is not None: + self.req = self.parsed_line.requirement + else: + self.req = self.get_requirement() + revision = self.req.revision = vcsrepo.get_commit_hash() # Remove potential ref in the end of uri after ref is parsed if "@" in self.link.show_url and "@" in self.uri: - uri, ref = self.uri.rsplit("@", 1) - checkout = self.req.revision - if checkout and ref in checkout: + uri, ref = split_ref_from_uri(self.uri) + checkout = revision + if checkout and ref and ref in checkout: self.uri = uri - - yield vcsrepo + orig_repo = self._repo self._repo = vcsrepo + if self._parsed_line: + self._parsed_line.vcsrepo = vcsrepo + if self._setup_info: + _old_setup_info = self._setup_info + self._setup_info = attr.evolve( + self._setup_info, requirements=(), _extras_requirements=(), + build_requires=(), setup_requires=(), version=None, metadata=None + ) + if self.parsed_line: + self._parsed_line.vcsrepo = vcsrepo + # self._parsed_line._specifier = "=={0}".format(self.setup_info.version) + # self._parsed_line.specifiers = self._parsed_line._specifier + if self.req: + self.req.specifier = SpecifierSet("=={0}".format(self.setup_info.version)) + try: + yield self._repo + except Exception: + self._repo = orig_repo + raise @classmethod def from_pipfile(cls, name, pipfile): + # type: (Text, Dict[Text, Union[List[Text], Text, bool]]) -> VCSRequirement creation_args = {} pipfile_keys = [ k @@ -894,28 +2179,69 @@ def from_pipfile(cls, name, pipfile): else: creation_args[key] = pipfile.get(key) creation_args["name"] = name - return cls(**creation_args) + cls_inst = cls(**creation_args) + return cls_inst @classmethod - def from_line(cls, line, editable=None, extras=None): + def from_line(cls, line, editable=None, extras=None, parsed_line=None): + # type: (Text, Optional[bool], Optional[Tuple[Text]], Optional[Line]) -> VCSRequirement relpath = None + if parsed_line is None: + parsed_line = Line(line) + if editable: + parsed_line.editable = editable + if extras: + parsed_line.extras = extras if line.startswith("-e "): editable = True line = line.split(" ", 1)[1] + if "@" in line: + parsed = urllib_parse.urlparse(add_ssh_scheme_to_git_uri(line)) + if not parsed.scheme: + possible_name, _, line = line.partition("@") + possible_name = possible_name.strip() + line = line.strip() + possible_name, extras = pip_shims.shims._strip_extras(possible_name) + name = possible_name + line = "{0}#egg={1}".format(line, name) vcs_type, prefer, relpath, path, uri, link = cls.get_link_from_line(line) if not extras and link.egg_fragment: name, extras = pip_shims.shims._strip_extras(link.egg_fragment) - if extras: - extras = parse_extras(extras) else: - name = link.egg_fragment + name, _ = pip_shims.shims._strip_extras(link.egg_fragment) + if extras: + extras = parse_extras(extras) + else: + line, extras = pip_shims.shims._strip_extras(line) + if extras: + extras = tuple(extras) subdirectory = link.subdirectory_fragment ref = None - if "@" in link.path and "@" in uri: - uri, _, ref = uri.rpartition("@") + if uri: + uri, ref = split_ref_from_uri(uri) + if path is not None and "@" in path: + path, _ref = split_ref_from_uri(path) + if ref is None: + ref = _ref if relpath and "@" in relpath: - relpath, ref = relpath.rsplit("@", 1) - return cls( + relpath, ref = split_ref_from_uri(relpath) + + creation_args = { + "name": name if name else parsed_line.name, + "path": relpath or path, + "editable": editable, + "extras": extras, + "link": link, + "vcs_type": vcs_type, + "line": line, + "uri": uri, + "uri_scheme": prefer, + "parsed_line": parsed_line + } + if relpath: + creation_args["relpath"] = relpath + # return cls.create(**creation_args) + cls_inst = cls( name=name, ref=ref, vcs=vcs_type, @@ -926,10 +2252,17 @@ def from_line(cls, line, editable=None, extras=None): uri=uri, extras=extras, base_line=line, + parsed_line=parsed_line ) + if cls_inst.req and ( + cls_inst._parsed_line.ireq and not cls_inst.parsed_line.ireq.req + ): + cls_inst._parsed_line._ireq.req = cls_inst.req + return cls_inst @property def line_part(self): + # type: () -> Text """requirements.txt compatible line part sans-extras""" if self.is_local: base_link = self.link @@ -941,21 +2274,26 @@ def line_part(self): else "{0}" ) base = final_format.format(self.vcs_uri) - elif self._base_line: + elif self._parsed_line is not None and self._parsed_line.is_direct_url: + return self._parsed_line.line_with_prefix + elif getattr(self, "_base_line", None): base = self._base_line else: - base = self.link.url - if base and self.extras and not extras_to_string(self.extras) in base: + base = getattr(self, "link", self.get_link()).url + if base and self.extras and extras_to_string(self.extras) not in base: if self.subdirectory: base = "{0}".format(self.get_link().url) else: base = "{0}{1}".format(base, extras_to_string(sorted(self.extras))) - if self.editable: + if "git+file:/" in base and "git+file:///" not in base: + base = base.replace("git+file:/", "git+file:///") + if self.editable and not base.startswith("-e "): base = "-e {0}".format(base) return base @staticmethod def _choose_vcs_source(pipfile): + # type: (Dict[Text, Union[List[Text], Text, bool]]) -> Dict[Text, Union[List[Text], Text, bool]] src_keys = [k for k in pipfile.keys() if k in ["path", "uri", "file"]] if src_keys: chosen_key = first(src_keys) @@ -968,41 +2306,79 @@ def _choose_vcs_source(pipfile): @property def pipfile_part(self): + # type: () -> Dict[Text, Dict[Text, Union[List[Text], Text, bool]]] excludes = [ "_repo", "_base_line", "setup_path", "_has_hashed_name", "pyproject_path", - "pyproject_requires", "pyproject_backend", "setup_info" + "pyproject_requires", "pyproject_backend", "_setup_info", "_parsed_line", + "_uri_scheme" ] - filter_func = lambda k, v: bool(v) is True and k.name not in excludes + filter_func = lambda k, v: bool(v) is True and k.name not in excludes # noqa pipfile_dict = attr.asdict(self, filter=filter_func).copy() + name = pipfile_dict.pop("name", None) + if name is None: + if self.name: + name = self.name + elif self.parsed_line and self.parsed_line.name: + name = self.name = self.parsed_line.name + elif self.setup_info and self.setup_info.name: + name = self.name = self.setup_info.name if "vcs" in pipfile_dict: pipfile_dict = self._choose_vcs_source(pipfile_dict) - name, _ = pip_shims.shims._strip_extras(pipfile_dict.pop("name")) + name, _ = pip_shims.shims._strip_extras(name) return {name: pipfile_dict} -@attr.s +@attr.s(cmp=True, hash=True) class Requirement(object): - name = attr.ib() - vcs = attr.ib(default=None, validator=attr.validators.optional(validate_vcs)) - req = attr.ib(default=None) - markers = attr.ib(default=None) - specifiers = attr.ib(validator=attr.validators.optional(validate_specifiers)) - index = attr.ib(default=None) - editable = attr.ib(default=None) - hashes = attr.ib(default=attr.Factory(list), converter=list) - extras = attr.ib(default=attr.Factory(list)) - abstract_dep = attr.ib(default=None) - _ireq = None - - @name.default + _name = attr.ib(cmp=True) # type: Text + vcs = attr.ib(default=None, validator=attr.validators.optional(validate_vcs), cmp=True) # type: Optional[Text] + req = attr.ib(default=None, cmp=True) # type: Optional[Union[VCSRequirement, FileRequirement, NamedRequirement]] + markers = attr.ib(default=None, cmp=True) # type: Optional[Text] + _specifiers = attr.ib(validator=attr.validators.optional(validate_specifiers), cmp=True) # type: Optional[Text] + index = attr.ib(default=None, cmp=True) # type: Optional[Text] + editable = attr.ib(default=None, cmp=True) # type: Optional[bool] + hashes = attr.ib(factory=frozenset, converter=frozenset, cmp=True) # type: Optional[Tuple[Text]] + extras = attr.ib(default=attr.Factory(tuple), cmp=True) # type: Optional[Tuple[Text]] + abstract_dep = attr.ib(default=None, cmp=False) # type: Optional[AbstractDependency] + _line_instance = attr.ib(default=None, cmp=False) # type: Optional[Line] + _ireq = attr.ib(default=None, cmp=False) # type: Optional[pip_shims.InstallRequirement] + + def __hash__(self): + return hash(self.as_line()) + + @_name.default def get_name(self): + # type: () -> Optional[Text] return self.req.name + @property + def name(self): + # type: () -> Optional[Text] + if self._name is not None: + return self._name + name = None + if self.req and self.req.name: + name = self.req.name + elif self.req and self.is_file_or_url and self.req.setup_info: + name = self.req.setup_info.name + self._name = name + return name + @property def requirement(self): + # type: () -> Optional[PackagingRequirement] return self.req.req + def add_hashes(self, hashes): + # type: (Union[List, Set, Tuple]) -> Requirement + if isinstance(hashes, six.string_types): + new_hashes = set(self.hashes).add(hashes) + else: + new_hashes = set(self.hashes) | set(hashes) + return attr.evolve(self, hashes=frozenset(new_hashes)) + def get_hashes_as_pip(self, as_list=False): + # type: (bool) -> Union[Text, List[Text]] if self.hashes: if as_list: return [HASH_STRING.format(h) for h in self.hashes] @@ -1011,10 +2387,12 @@ def get_hashes_as_pip(self, as_list=False): @property def hashes_as_pip(self): + # type: () -> Union[Text, List[Text]] self.get_hashes_as_pip() @property def markers_as_pip(self): + # type: () -> Text if self.markers: return " ; {0}".format(self.markers).replace('"', "'") @@ -1022,6 +2400,7 @@ def markers_as_pip(self): @property def extras_as_pip(self): + # type: () -> Text if self.extras: return "[{0}]".format( ",".join(sorted([extra.lower() for extra in self.extras])) @@ -1029,8 +2408,9 @@ def extras_as_pip(self): return "" - @property + @cached_property def commit_hash(self): + # type: () -> Optional[Text] if not self.is_vcs: return None commit_hash = None @@ -1038,125 +2418,189 @@ def commit_hash(self): commit_hash = repo.get_commit_hash() return commit_hash - @specifiers.default + @_specifiers.default def get_specifiers(self): - if self.req and self.req.req.specifier: + # type: () -> Text + if self.req and self.req.req and self.req.req.specifier: return specs_to_string(self.req.req.specifier) - return + return "" + + def update_name_from_path(self, path): + from .setup_info import get_metadata + metadata = get_metadata(path) + name = self.name + if metadata is not None: + name = metadata.get("name") + if name is not None: + if self.req.name is None: + self.req.name = name + if self.req.req and self.req.req.name is None: + self.req.req.name = name + if self._line_instance._name is None: + self._line_instance.name = name + if self.req._parsed_line._name is None: + self.req._parsed_line.name = name + if self.req._setup_info and self.req._setup_info.name is None: + self.req._setup_info.name = name + + @property + def line_instance(self): + # type: () -> Optional[Line] + if self._line_instance is None: + if self.req._parsed_line is not None: + self._line_instance = self.req.parsed_line + else: + include_extras = True + include_specifiers = True + if self.is_vcs: + include_extras = False + if self.is_file_or_url or self.is_vcs or not self._specifiers: + include_specifiers = False + + parts = [ + self.req.line_part, + self.extras_as_pip if include_extras else "", + self._specifiers if include_specifiers else "", + self.markers_as_pip, + ] + line = "".join(parts) + if line is None: + return None + self._line_instance = Line(line) + return self._line_instance + + @line_instance.setter + def line_instance(self, line_instance): + # type: (Line) -> None + if self.req and not self.req._parsed_line: + self.req._parsed_line = line_instance + self._line_instance = line_instance + + @property + def specifiers(self): + # type: () -> Optional[Text] + if self._specifiers: + return self._specifiers + else: + specs = self.get_specifiers() + if specs: + self._specifiers = specs + return specs + if self.is_named and not self._specifiers: + self._specifiers = self.req.version + elif not self.editable and not self.is_named: + if self.line_instance and self.line_instance.setup_info and self.line_instance.setup_info.version: + self._specifiers = "=={0}".format(self.req.setup_info.version) + elif self.req.parsed_line.specifiers and not self._specifiers: + self._specifiers = specs_to_string(self.req.parsed_line.specifiers) + elif self.line_instance.specifiers and not self._specifiers: + self._specifiers = specs_to_string(self.line_instance.specifiers) + elif not self._specifiers and (self.is_file_or_url or self.is_vcs): + try: + setupinfo_dict = self.run_requires() + except Exception: + setupinfo_dict = None + if setupinfo_dict is not None: + self._specifiers = "=={0}".format(setupinfo_dict.get("version")) + if self._specifiers: + specset = SpecifierSet(self._specifiers) + if self.line_instance and not self.line_instance.specifiers: + self.line_instance.specifiers = specset + if self.req and self.req.parsed_line and not self.req.parsed_line.specifiers: + self.req._parsed_line.specifiers = specset + if self.req and self.req.req and not self.req.req.specifier: + self.req.req.specifier = specset + return self._specifiers @property def is_vcs(self): + # type: () -> bool return isinstance(self.req, VCSRequirement) + @property + def build_backend(self): + # type: () -> Optional[Text] + if self.is_vcs or (self.is_file_or_url and self.req.is_local): + setup_info = self.run_requires() + build_backend = setup_info.get("build_backend") + return build_backend + return "setuptools.build_meta" + + @property + def uses_pep517(self): + # type: () -> bool + if self.build_backend: + return True + return False + @property def is_file_or_url(self): + # type: () -> bool return isinstance(self.req, FileRequirement) @property def is_named(self): + # type: () -> bool return isinstance(self.req, NamedRequirement) @property + def is_wheel(self): + # type: () -> bool + if not self.is_named and self.req.link is not None and self.req.link.is_wheel: + return True + return False + + @cached_property def normalized_name(self): + # type: () -> Text return canonicalize_name(self.name) def copy(self): return attr.evolve(self) @classmethod + @lru_cache() def from_line(cls, line): + # type: (Text) -> Requirement if isinstance(line, pip_shims.shims.InstallRequirement): line = format_requirement(line) - hashes = None - if "--hash=" in line: - hashes = line.split(" --hash=") - line, hashes = hashes[0], hashes[1:] - editable = line.startswith("-e ") - line = line.split(" ", 1)[1] if editable else line - line, markers = split_markers_from_line(line) - line, extras = pip_shims.shims._strip_extras(line) - if extras: - extras = parse_extras(extras) - line = line.strip('"').strip("'").strip() - line_with_prefix = "-e {0}".format(line) if editable else line - vcs = None - # Installable local files and installable non-vcs urls are handled - # as files, generally speaking - line_is_vcs = is_vcs(line) - # check for pep-508 compatible requirements - name, _, possible_url = line.partition("@") - if is_installable_file(line) or ( - (is_valid_url(possible_url) or is_file_url(line) or is_valid_url(line)) and - not (line_is_vcs or is_vcs(possible_url)) - ): - r = FileRequirement.from_line(line_with_prefix, extras=extras) - elif line_is_vcs: - r = VCSRequirement.from_line(line_with_prefix, extras=extras) - vcs = r.vcs + parsed_line = Line(line) + r = None # type: Optional[Union[VCSRequirement, FileRequirement, NamedRequirement]] + if ((parsed_line.is_file and parsed_line.is_installable) or parsed_line.is_url) and not parsed_line.is_vcs: + r = file_req_from_parsed_line(parsed_line) + elif parsed_line.is_vcs: + r = vcs_req_from_parsed_line(parsed_line) elif line == "." and not is_installable_file(line): raise RequirementError( "Error parsing requirement %s -- are you sure it is installable?" % line ) else: - specs = "!=<>~" - spec_matches = set(specs) & set(line) - version = None - name = line - if spec_matches: - spec_idx = min((line.index(match) for match in spec_matches)) - name = line[:spec_idx] - version = line[spec_idx:] - if not extras: - name, extras = pip_shims.shims._strip_extras(name) - if extras: - extras = parse_extras(extras) - if version: - name = "{0}{1}".format(name, version) - r = NamedRequirement.from_line(line) + r = named_req_from_parsed_line(parsed_line) req_markers = None - if markers: - req_markers = PackagingRequirement("fakepkg; {0}".format(markers)) - r.req.marker = getattr(req_markers, "marker", None) if req_markers else None - r.req.local_file = getattr(r.req, "local_file", False) - name = getattr(r.req, "name", None) - if not name: - name = getattr(r.req, "project_name", None) - r.req.name = name - if not name: - name = getattr(r.req, "key", None) - if name: - r.req.name = name + if parsed_line.markers: + req_markers = PackagingRequirement("fakepkg; {0}".format(parsed_line.markers)) + if r is not None and r.req is not None: + r.req.marker = getattr(req_markers, "marker", None) if req_markers else None args = { "name": r.name, - "vcs": vcs, + "vcs": parsed_line.vcs, "req": r, - "markers": markers, - "editable": editable, + "markers": parsed_line.markers, + "editable": parsed_line.editable, + "line_instance": parsed_line } - if extras: - extras = sorted(dedup([extra.lower() for extra in extras])) + if parsed_line.extras: + extras = tuple(sorted(dedup([extra.lower() for extra in parsed_line.extras]))) args["extras"] = extras - r.req.extras = extras - r.extras = extras - elif r.extras: - args["extras"] = sorted(dedup([extra.lower() for extra in r.extras])) - if hashes: - args["hashes"] = hashes + if r is not None: + r.extras = extras + elif r is not None and r.extras is not None: + args["extras"] = tuple(sorted(dedup([extra.lower() for extra in r.extras]))) # type: ignore + if r.req is not None: + r.req.extras = args["extras"] + if parsed_line.hashes: + args["hashes"] = tuple(parsed_line.hashes) # type: ignore cls_inst = cls(**args) - if not cls_inst.is_named and not cls_inst.editable and not name: - if cls_inst.is_vcs: - ireq = pip_shims.shims.install_req_from_req(cls_inst.as_line(include_hashes=False)) - info = SetupInfo.from_ireq(ireq) - if info is not None: - info_dict = info.as_dict() - cls_inst.req.setup_info = info - else: - info_dict = {} - else: - info_dict = cls_inst.run_requires() - found_name = info_dict.get("name", old_name) - if old_name != found_name: - cls_inst.req.req.line.replace(old_name, found_name) return cls_inst @classmethod @@ -1190,41 +2634,27 @@ def from_pipfile(cls, name, pipfile): if markers: markers = str(markers) req_markers = PackagingRequirement("fakepkg; {0}".format(markers)) - r.req.marker = getattr(req_markers, "marker", None) - r.req.specifier = SpecifierSet(_pipfile["version"]) + if r.req is not None: + r.req.marker = req_markers.marker extras = _pipfile.get("extras") - r.req.extras = ( - sorted(dedup([extra.lower() for extra in extras])) if extras else [] - ) + if r.req: + if r.req.specifier: + r.req.specifier = SpecifierSet(_pipfile["version"]) + r.req.extras = ( + tuple(sorted(dedup([extra.lower() for extra in extras]))) if extras else () + ) args = { "name": r.name, "vcs": vcs, "req": r, "markers": markers, - "extras": _pipfile.get("extras"), + "extras": tuple(_pipfile.get("extras", ())), "editable": _pipfile.get("editable", False), "index": _pipfile.get("index"), } if any(key in _pipfile for key in ["hash", "hashes"]): args["hashes"] = _pipfile.get("hashes", [pipfile.get("hash")]) cls_inst = cls(**args) - if cls_inst.is_named: - cls_inst.req.req.line = cls_inst.as_line() - old_name = cls_inst.req.req.name or cls_inst.req.name - if not cls_inst.is_named and not cls_inst.editable and not name: - if cls_inst.is_vcs: - ireq = pip_shims.shims.install_req_from_req(cls_inst.as_line(include_hashes=False)) - info = SetupInfo.from_ireq(ireq) - if info is not None: - info_dict = info.as_dict() - cls_inst.req.setup_info = info - else: - info_dict = {} - else: - info_dict = cls_inst.run_requires() - found_name = info_dict.get("name", old_name) - if old_name != found_name: - cls_inst.req.req.line.replace(old_name, found_name) return cls_inst def as_line( @@ -1281,13 +2711,15 @@ def as_line( return line def get_markers(self): + # type: () -> Marker markers = self.markers if markers: fake_pkg = PackagingRequirement("fakepkg; {0}".format(markers)) - markers = fake_pkg.markers + markers = fake_pkg.marker return markers def get_specifier(self): + # type: () -> Union[SpecifierSet, LegacySpecifier] try: return Specifier(self.specifiers) except InvalidSpecifier: @@ -1315,7 +2747,9 @@ def constraint_line(self): @property def is_direct_url(self): - return self.is_file_or_url and self.req.is_direct_url + return self.is_file_or_url and self.req.is_direct_url or ( + self.line_instance.is_direct_url or self.req.parsed_line.is_direct_url + ) def as_pipfile(self): good_keys = ( @@ -1334,10 +2768,18 @@ def as_pipfile(self): name = self.name if "markers" in req_dict and req_dict["markers"]: req_dict["markers"] = req_dict["markers"].replace('"', "'") + if not self.req.name: + name_carriers = (self.req, self, self.line_instance, self.req.parsed_line) + name_options = [ + getattr(carrier, "name", None) + for carrier in name_carriers if carrier is not None + ] + req_name = next(iter(n for n in name_options if n is not None), None) + self.req.name = req_name + req_name, dict_from_subreq = self.req.pipfile_part.popitem() base_dict = { - k: v - for k, v in self.req.pipfile_part[name].items() - if k not in ["req", "link"] + k: v for k, v in dict_from_subreq.items() + if k not in ["req", "link", "_setup_info"] } base_dict.update(req_dict) conflicting_keys = ("file", "path", "uri") @@ -1354,24 +2796,33 @@ def as_pipfile(self): except AttributeError: hashes.append(_hash) base_dict["hashes"] = sorted(hashes) + if "extras" in base_dict: + base_dict["extras"] = list(base_dict["extras"]) if len(base_dict.keys()) == 1 and "version" in base_dict: base_dict = base_dict.get("version") return {name: base_dict} def as_ireq(self): - ireq_line = self.as_line(include_hashes=False) - if self.editable or self.req.editable: - if ireq_line.startswith("-e "): - ireq_line = ireq_line[len("-e ") :] - with ensure_setup_py(self.req.setup_path): - ireq = pip_shims.shims.install_req_from_editable(ireq_line) - else: - ireq = pip_shims.shims.install_req_from_line(ireq_line) + if self.line_instance and self.line_instance.ireq: + return self.line_instance.ireq + elif getattr(self.req, "_parsed_line", None) and self.req._parsed_line.ireq: + return self.req._parsed_line.ireq + kwargs = { + "include_hashes": False, + } + if (self.is_file_or_url and self.req.is_local) or self.is_vcs: + kwargs["include_markers"] = False + ireq_line = self.as_line(**kwargs) + ireq = Line(ireq_line).ireq if not getattr(ireq, "req", None): ireq.req = self.req.req + if (self.is_file_or_url and self.req.is_local) or self.is_vcs: + if getattr(ireq, "req", None) and getattr(ireq.req, "marker", None): + ireq.req.marker = None else: ireq.req.extras = self.req.req.extras - ireq.req.marker = self.req.req.marker + if not ((self.is_file_or_url and self.req.is_local) or self.is_vcs): + ireq.req.marker = self.req.req.marker return ireq @property @@ -1455,6 +2906,8 @@ def find_all_matches(self, sources=None, finder=None): def run_requires(self, sources=None, finder=None): if self.req and self.req.setup_info is not None: info_dict = self.req.setup_info.as_dict() + elif self.line_instance and self.line_instance.setup_info is not None: + info_dict = self.line_instance.setup_info.as_dict() else: from .setup_info import SetupInfo if not finder: @@ -1465,7 +2918,7 @@ def run_requires(self, sources=None, finder=None): return {} info_dict = info.get_info() if self.req and not self.req.setup_info: - self.req.setup_info = info + self.req._setup_info = info if self.req._has_hashed_name and info_dict.get("name"): self.req.name = self.name = info_dict["name"] if self.req.req.name != info_dict["name"]: @@ -1480,3 +2933,69 @@ def merge_markers(self, markers): new_markers = Marker(" or ".join([str(m) for m in sorted(_markers)])) self.markers = str(new_markers) self.req.req.marker = new_markers + + +def file_req_from_parsed_line(parsed_line): + # type: (Line) -> FileRequirement + path = parsed_line.relpath if parsed_line.relpath else parsed_line.path + return FileRequirement( + setup_path=parsed_line.setup_py, + path=path, + editable=parsed_line.editable, + extras=parsed_line.extras, + uri_scheme=parsed_line.preferred_scheme, + link=parsed_line.link, + uri=parsed_line.uri, + pyproject_requires=tuple(parsed_line.pyproject_requires) if parsed_line.pyproject_requires else None, + pyproject_backend=parsed_line.pyproject_backend, + pyproject_path=Path(parsed_line.pyproject_toml) if parsed_line.pyproject_toml else None, + parsed_line=parsed_line, + name=parsed_line.name, + req=parsed_line.requirement + ) + + +def vcs_req_from_parsed_line(parsed_line): + # type: (Line) -> VCSRequirement + line = "{0}".format(parsed_line.line) + if parsed_line.editable: + line = "-e {0}".format(line) + link = create_link(build_vcs_uri( + vcs=parsed_line.vcs, + uri=parsed_line.url, + name=parsed_line.name, + ref=parsed_line.ref, + subdirectory=parsed_line.subdirectory, + extras=parsed_line.extras + )) + return VCSRequirement( + setup_path=parsed_line.setup_py, + path=parsed_line.path, + editable=parsed_line.editable, + vcs=parsed_line.vcs, + ref=parsed_line.ref, + subdirectory=parsed_line.subdirectory, + extras=parsed_line.extras, + uri_scheme=parsed_line.preferred_scheme, + link=link, + uri=parsed_line.uri, + pyproject_requires=tuple(parsed_line.pyproject_requires) if parsed_line.pyproject_requires else None, + pyproject_backend=parsed_line.pyproject_backend, + pyproject_path=Path(parsed_line.pyproject_toml) if parsed_line.pyproject_toml else None, + parsed_line=parsed_line, + name=parsed_line.name, + req=parsed_line.requirement, + base_line=line, + ) + + +def named_req_from_parsed_line(parsed_line): + # type: (Line) -> NamedRequirement + return NamedRequirement( + name=parsed_line.name, + version=parsed_line.specifier, + req=parsed_line.requirement, + extras=parsed_line.extras, + editable=parsed_line.editable, + parsed_line=parsed_line + ) diff --git a/pipenv/vendor/requirementslib/models/setup_info.py b/pipenv/vendor/requirementslib/models/setup_info.py index ec631e67b5..b0b55d4775 100644 --- a/pipenv/vendor/requirementslib/models/setup_info.py +++ b/pipenv/vendor/requirementslib/models/setup_info.py @@ -1,28 +1,47 @@ # -*- coding=utf-8 -*- +from __future__ import absolute_import, print_function + +import atexit import contextlib import os +import shutil import sys import attr -import packaging.version import packaging.specifiers import packaging.utils +import packaging.version +import pep517.envbuild +import pep517.wrappers import six +from appdirs import user_cache_dir +from cached_property import cached_property +from distlib.wheel import Wheel +from packaging.markers import Marker +from six.moves import configparser +from six.moves.urllib.parse import unquote, urlparse, urlunparse + +from vistir.compat import Iterable, Path, lru_cache +from vistir.contextmanagers import cd, temp_path +from vistir.misc import run +from vistir.path import create_tracked_tempdir, ensure_mkdir_p, mkdir_p, rmtree + +from ..environment import MYPY_RUNNING +from ..exceptions import RequirementError +from .utils import ( + get_name_variants, + get_pyproject, + init_requirement, + split_vcs_method_from_uri, + strip_extras_markers_from_requirement, + get_default_pyproject_backend +) try: from setuptools.dist import distutils except ImportError: import distutils -from appdirs import user_cache_dir -from six.moves import configparser -from six.moves.urllib.parse import unquote -from vistir.compat import Path, Iterable -from vistir.contextmanagers import cd -from vistir.misc import run -from vistir.path import create_tracked_tempdir, ensure_mkdir_p, mkdir_p - -from .utils import init_requirement, get_pyproject try: from os import scandir @@ -30,6 +49,18 @@ from scandir import scandir +if MYPY_RUNNING: + from typing import Any, Dict, List, Generator, Optional, Union, Tuple, TypeVar, Text, Set + from pip_shims.shims import InstallRequirement, PackageFinder + from pkg_resources import ( + PathMetadata, DistInfoDistribution, Requirement as PkgResourcesRequirement + ) + from packaging.requirements import Requirement as PackagingRequirement + TRequirement = TypeVar("TRequirement") + RequirementType = TypeVar('RequirementType', covariant=True, bound=PackagingRequirement) + MarkerType = TypeVar('MarkerType', covariant=True, bound=Marker) + + CACHE_DIR = os.environ.get("PIPENV_CACHE_DIR", user_cache_dir("pipenv")) # The following are necessary for people who like to use "if __name__" conditionals @@ -38,8 +69,35 @@ _setup_distribution = None +def pep517_subprocess_runner(cmd, cwd=None, extra_environ=None): + # type: (List[Text], Optional[Text], Optional[Dict[Text, Text]]) -> None + """The default method of calling the wrapper subprocess.""" + env = os.environ.copy() + if extra_environ: + env.update(extra_environ) + + run(cmd, cwd=cwd, env=env, block=True, combine_stderr=True, return_object=False, + write_to_stdout=False, nospin=True) + + +class BuildEnv(pep517.envbuild.BuildEnvironment): + def pip_install(self, reqs): + cmd = [sys.executable, '-m', 'pip', 'install', '--ignore-installed', '--prefix', + self.path] + list(reqs) + run(cmd, block=True, combine_stderr=True, return_object=False, + write_to_stdout=False, nospin=True) + + +class HookCaller(pep517.wrappers.Pep517HookCaller): + def __init__(self, source_dir, build_backend): + self.source_dir = os.path.abspath(source_dir) + self.build_backend = build_backend + self._subprocess_runner = pep517_subprocess_runner + + @contextlib.contextmanager def _suppress_distutils_logs(): + # type: () -> Generator[None, None, None] """Hack to hide noise generated by `setup.py develop`. There isn't a good way to suppress them now, so let's monky-patch. @@ -57,18 +115,45 @@ def _log(log, level, msg, args): distutils.log.Log._log = f +def build_pep517(source_dir, build_dir, config_settings=None, dist_type="wheel"): + if config_settings is None: + config_settings = {} + requires, backend = get_pyproject(source_dir) + hookcaller = HookCaller(source_dir, backend) + if dist_type == "sdist": + get_requires_fn = hookcaller.get_requires_for_build_sdist + build_fn = hookcaller.build_sdist + else: + get_requires_fn = hookcaller.get_requires_for_build_wheel + build_fn = hookcaller.build_wheel + + with BuildEnv() as env: + env.pip_install(requires) + reqs = get_requires_fn(config_settings) + env.pip_install(reqs) + return build_fn(build_dir, config_settings) + + @ensure_mkdir_p(mode=0o775) -def _get_src_dir(): +def _get_src_dir(root): + # type: (Text) -> Text src = os.environ.get("PIP_SRC") if src: return src virtual_env = os.environ.get("VIRTUAL_ENV") - if virtual_env: + if virtual_env is not None: return os.path.join(virtual_env, "src") - return os.path.join(os.getcwd(), "src") # Match pip's behavior. + if root is not None: + # Intentionally don't match pip's behavior here -- this is a temporary copy + src_dir = create_tracked_tempdir(prefix="requirementslib-", suffix="-src") + else: + src_dir = os.path.join(root, "src") + return src_dir +@lru_cache() def ensure_reqs(reqs): + # type: (List[Union[Text, PkgResourcesRequirement]]) -> List[PkgResourcesRequirement] import pkg_resources if not isinstance(reqs, Iterable): raise TypeError("Expecting an Iterable, got %r" % reqs) @@ -78,31 +163,33 @@ def ensure_reqs(reqs): continue if isinstance(req, six.string_types): req = pkg_resources.Requirement.parse("{0}".format(str(req))) + # req = strip_extras_markers_from_requirement(req) new_reqs.append(req) return new_reqs -def _prepare_wheel_building_kwargs(ireq): - download_dir = os.path.join(CACHE_DIR, "pkgs") +def _prepare_wheel_building_kwargs(ireq=None, src_root=None, src_dir=None, editable=False): + # type: (Optional[InstallRequirement], Optional[Text], Optional[Text], bool) -> Dict[Text, Text] + download_dir = os.path.join(CACHE_DIR, "pkgs") # type: Text mkdir_p(download_dir) - wheel_download_dir = os.path.join(CACHE_DIR, "wheels") + wheel_download_dir = os.path.join(CACHE_DIR, "wheels") # type: Text mkdir_p(wheel_download_dir) - if ireq.source_dir is not None: - src_dir = ireq.source_dir - elif ireq.editable: - src_dir = _get_src_dir() - else: - src_dir = create_tracked_tempdir(prefix="reqlib-src") + if src_dir is None: + if editable and src_root is not None: + src_dir = src_root + elif ireq is None and src_root is not None: + src_dir = _get_src_dir(root=src_root) # type: Text + elif ireq is not None and ireq.editable and src_root is not None: + src_dir = _get_src_dir(root=src_root) + else: + src_dir = create_tracked_tempdir(prefix="reqlib-src") - # This logic matches pip's behavior, although I don't fully understand the - # intention. I guess the idea is to build editables in-place, otherwise out - # of the source tree? - if ireq.editable: - build_dir = src_dir - else: - build_dir = create_tracked_tempdir(prefix="reqlib-build") + # Let's always resolve in isolation + if src_dir is None: + src_dir = create_tracked_tempdir(prefix="reqlib-src") + build_dir = create_tracked_tempdir(prefix="reqlib-build") return { "build_dir": build_dir, @@ -112,95 +199,275 @@ def _prepare_wheel_building_kwargs(ireq): } -def iter_egginfos(path, pkg_name=None): +def iter_metadata(path, pkg_name=None, metadata_type="egg-info"): + # type: (Text, Optional[Text], Text) -> Generator + if pkg_name is not None: + pkg_variants = get_name_variants(pkg_name) + non_matching_dirs = [] for entry in scandir(path): if entry.is_dir(): - if not entry.name.endswith("egg-info"): - for dir_entry in iter_egginfos(entry.path, pkg_name=pkg_name): - yield dir_entry - elif pkg_name is None or entry.name.startswith(pkg_name.replace("-", "_")): - yield entry + entry_name, ext = os.path.splitext(entry.name) + if ext.endswith(metadata_type): + if pkg_name is None or entry_name.lower() in pkg_variants: + yield entry + elif not entry.name.endswith(metadata_type): + non_matching_dirs.append(entry) + for entry in non_matching_dirs: + for dir_entry in iter_metadata(entry.path, pkg_name=pkg_name, metadata_type=metadata_type): + yield dir_entry def find_egginfo(target, pkg_name=None): - egg_dirs = (egg_dir for egg_dir in iter_egginfos(target, pkg_name=pkg_name)) + # type: (Text, Optional[Text]) -> Generator + egg_dirs = ( + egg_dir for egg_dir in iter_metadata(target, pkg_name=pkg_name) + if egg_dir is not None + ) if pkg_name: - yield next(iter(egg_dirs), None) + yield next(iter(eggdir for eggdir in egg_dirs if eggdir is not None), None) else: for egg_dir in egg_dirs: yield egg_dir -def get_metadata(path, pkg_name=None): +def find_distinfo(target, pkg_name=None): + # type: (Text, Optional[Text]) -> Generator + dist_dirs = ( + dist_dir for dist_dir in iter_metadata(target, pkg_name=pkg_name, metadata_type="dist-info") + if dist_dir is not None + ) if pkg_name: - pkg_name = packaging.utils.canonicalize_name(pkg_name) + yield next(iter(dist for dist in dist_dirs if dist is not None), None) + else: + for dist_dir in dist_dirs: + yield dist_dir + + +def get_metadata(path, pkg_name=None, metadata_type=None): + # type: (Text, Optional[Text], Optional[Text]) -> Dict[Text, Union[Text, List[RequirementType], Dict[Text, RequirementType]]] + metadata_dirs = [] + wheel_allowed = metadata_type == "wheel" or metadata_type is None + egg_allowed = metadata_type == "egg" or metadata_type is None egg_dir = next(iter(find_egginfo(path, pkg_name=pkg_name)), None) - if egg_dir is not None: + dist_dir = next(iter(find_distinfo(path, pkg_name=pkg_name)), None) + if dist_dir and wheel_allowed: + metadata_dirs.append(dist_dir) + if egg_dir and egg_allowed: + metadata_dirs.append(egg_dir) + matched_dir = next(iter(d for d in metadata_dirs if d is not None), None) + metadata_dir = None + base_dir = None + if matched_dir is not None: import pkg_resources + metadata_dir = os.path.abspath(matched_dir.path) + base_dir = os.path.dirname(metadata_dir) + dist = None + distinfo_dist = None + egg_dist = None + if wheel_allowed and dist_dir is not None: + distinfo_dist = next(iter(pkg_resources.find_distributions(base_dir)), None) + if egg_allowed and egg_dir is not None: + path_metadata = pkg_resources.PathMetadata(base_dir, metadata_dir) + egg_dist = next( + iter(pkg_resources.distributions_from_metadata(path_metadata.egg_info)), + None, + ) + dist = next(iter(d for d in (distinfo_dist, egg_dist) if d is not None), None) + if dist is not None: + return get_metadata_from_dist(dist) + return {} + + +@lru_cache() +def get_extra_name_from_marker(marker): + # type: (MarkerType) -> Optional[Text] + if not marker: + raise ValueError("Invalid value for marker: {0!r}".format(marker)) + if not getattr(marker, "_markers", None): + raise TypeError("Expecting a marker instance, received {0!r}".format(marker)) + for elem in marker._markers: + if isinstance(elem, tuple) and elem[0].value == "extra": + return elem[2].value + return None + + +def get_metadata_from_wheel(wheel_path): + # type: (Text) -> Dict[Any, Any] + if not isinstance(wheel_path, six.string_types): + raise TypeError("Expected string instance, received {0!r}".format(wheel_path)) + try: + dist = Wheel(wheel_path) + except Exception: + pass + metadata = dist.metadata + name = metadata.name + version = metadata.version + requires = [] + extras_keys = getattr(metadata, "extras", None) + extras = {} + for req in getattr(metadata, "run_requires", []): + parsed_req = init_requirement(req) + parsed_marker = parsed_req.marker + if parsed_marker: + extra = get_extra_name_from_marker(parsed_marker) + if extra is None: + requires.append(parsed_req) + continue + if extra not in extras: + extras[extra] = [] + parsed_req = strip_extras_markers_from_requirement(parsed_req) + extras[extra].append(parsed_req) + else: + requires.append(parsed_req) + return { + "name": name, + "version": version, + "requires": requires, + "extras": extras + } - egg_dir = os.path.abspath(egg_dir.path) - base_dir = os.path.dirname(egg_dir) - path_metadata = pkg_resources.PathMetadata(base_dir, egg_dir) - dist = next( - iter(pkg_resources.distributions_from_metadata(path_metadata.egg_info)), - None, - ) - if dist: - try: - requires = dist.requires() - except exception: - requires = [] - try: - dep_map = dist._build_dep_map() - except Exception: - dep_map = {} - deps = [] - extras = {} - for k in dep_map.keys(): - if k is None: - deps.extend(dep_map.get(k)) - continue - else: - extra = None - _deps = dep_map.get(k) - if k.startswith(":python_version"): - marker = k.replace(":", "; ") - else: - marker = "" - extra = "{0}".format(k) - _deps = ["{0}{1}".format(str(req), marker) for req in _deps] - _deps = ensure_reqs(_deps) - if extra: - extras[extra] = _deps - else: - deps.extend(_deps) - return { - "name": dist.project_name, - "version": dist.version, - "requires": requires, - "extras": extras - } +def get_metadata_from_dist(dist): + # type: (Union[PathMetadata, DistInfoDistribution]) -> Dict[Text, Union[Text, List[RequirementType], Dict[Text, RequirementType]]] + try: + requires = dist.requires() + except Exception: + requires = [] + try: + dep_map = dist._build_dep_map() + except Exception: + dep_map = {} + deps = [] + extras = {} + for k in dep_map.keys(): + if k is None: + deps.extend(dep_map.get(k)) + continue + else: + extra = None + _deps = dep_map.get(k) + if k.startswith(":python_version"): + marker = k.replace(":", "; ") + else: + marker = "" + extra = "{0}".format(k) + _deps = ["{0}{1}".format(str(req), marker) for req in _deps] + _deps = ensure_reqs(tuple(_deps)) + if extra: + extras[extra] = _deps + else: + deps.extend(_deps) + return { + "name": dist.project_name, + "version": dist.version, + "requires": requires, + "extras": extras + } + + +@attr.s(slots=True, frozen=True) +class BaseRequirement(object): + name = attr.ib(default="", cmp=True) # type: Text + requirement = attr.ib(default=None, cmp=True) # type: Optional[PkgResourcesRequirement] + + def __str__(self): + # type: () -> Text + return "{0}".format(str(self.requirement)) + + def as_dict(self): + # type: () -> Dict[Text, Optional[PkgResourcesRequirement]] + return {self.name: self.requirement} + + def as_tuple(self): + # type: () -> Tuple[Text, Optional[PkgResourcesRequirement]] + return (self.name, self.requirement) -@attr.s(slots=True) + @classmethod + @lru_cache() + def from_string(cls, line): + # type: (Text) -> BaseRequirement + line = line.strip() + req = init_requirement(line) + return cls.from_req(req) + + @classmethod + @lru_cache() + def from_req(cls, req): + # type: (PkgResourcesRequirement) -> BaseRequirement + name = None + key = getattr(req, "key", None) + name = getattr(req, "name", None) + project_name = getattr(req, "project_name", None) + if key is not None: + name = key + if name is None: + name = project_name + return cls(name=name, requirement=req) + + +@attr.s(slots=True, frozen=True) +class Extra(object): + name = attr.ib(default=None, cmp=True) # type: Text + requirements = attr.ib(factory=frozenset, cmp=True, type=frozenset) + + def __str__(self): + # type: () -> Text + return "{0}: {{{1}}}".format(self.section, ", ".join([r.name for r in self.requirements])) + + def add(self, req): + # type: (BaseRequirement) -> None + if req not in self.requirements: + return attr.evolve(self, requirements=frozenset(set(self.requirements).add(req))) + return self + + def as_dict(self): + # type: () -> Dict[Text, Tuple[RequirementType, ...]] + return {self.name: tuple([r.requirement for r in self.requirements])} + + +@attr.s(slots=True, cmp=True, hash=True) class SetupInfo(object): - name = attr.ib(type=str, default=None) - base_dir = attr.ib(type=Path, default=None) - version = attr.ib(type=packaging.version.Version, default=None) - requires = attr.ib(type=dict, default=attr.Factory(dict)) - build_requires = attr.ib(type=list, default=attr.Factory(list)) - build_backend = attr.ib(type=list, default=attr.Factory(list)) - setup_requires = attr.ib(type=dict, default=attr.Factory(list)) - python_requires = attr.ib(type=packaging.specifiers.SpecifierSet, default=None) - extras = attr.ib(type=dict, default=attr.Factory(dict)) - setup_cfg = attr.ib(type=Path, default=None) - setup_py = attr.ib(type=Path, default=None) - pyproject = attr.ib(type=Path, default=None) - ireq = attr.ib(default=None) - extra_kwargs = attr.ib(default=attr.Factory(dict), type=dict) + name = attr.ib(default=None, cmp=True) # type: Text + base_dir = attr.ib(default=None, cmp=True, hash=False) # type: Text + version = attr.ib(default=None, cmp=True) # type: Text + _requirements = attr.ib(type=frozenset, factory=frozenset, cmp=True, hash=True) + build_requires = attr.ib(type=tuple, default=attr.Factory(tuple), cmp=True) + build_backend = attr.ib(cmp=True) # type: Text + setup_requires = attr.ib(type=tuple, default=attr.Factory(tuple), cmp=True) + python_requires = attr.ib(type=packaging.specifiers.SpecifierSet, default=None, cmp=True) + _extras_requirements = attr.ib(type=tuple, default=attr.Factory(tuple), cmp=True) + setup_cfg = attr.ib(type=Path, default=None, cmp=True, hash=False) + setup_py = attr.ib(type=Path, default=None, cmp=True, hash=False) + pyproject = attr.ib(type=Path, default=None, cmp=True, hash=False) + ireq = attr.ib(default=None, cmp=True, hash=False) # type: Optional[InstallRequirement] + extra_kwargs = attr.ib(default=attr.Factory(dict), type=dict, cmp=False, hash=False) + metadata = attr.ib(default=None) # type: Optional[Tuple[Text]] + + @build_backend.default + def get_build_backend(self): + return get_default_pyproject_backend() + + @property + def requires(self): + # type: () -> Dict[Text, RequirementType] + return {req.name: req.requirement for req in self._requirements} + + @property + def extras(self): + # type: () -> Dict[Text, Optional[Any]] + extras_dict = {} + extras = set(self._extras_requirements) + for section, deps in extras: + if isinstance(deps, BaseRequirement): + extras_dict[section] = deps.requirement + elif isinstance(deps, (list, tuple)): + extras_dict[section] = [d.requirement for d in deps] + return extras_dict - def parse_setup_cfg(self): - if self.setup_cfg is not None and self.setup_cfg.exists(): + @classmethod + def get_setup_cfg(cls, setup_cfg_path): + # type: (Text) -> Dict[Text, Union[Text, None, Set[BaseRequirement], List[Text], Tuple[Text, Tuple[BaseRequirement]]]] + if os.path.exists(setup_cfg_path): default_opts = { "metadata": {"name": "", "version": ""}, "options": { @@ -212,56 +479,96 @@ def parse_setup_cfg(self): }, } parser = configparser.ConfigParser(default_opts) - parser.read(self.setup_cfg.as_posix()) + parser.read(setup_cfg_path) + results = {} if parser.has_option("metadata", "name"): - name = parser.get("metadata", "name") - if not self.name and name is not None: - self.name = name + results["name"] = parser.get("metadata", "name") if parser.has_option("metadata", "version"): - version = parser.get("metadata", "version") - if not self.version and version is not None: - self.version = version + results["version"] = parser.get("metadata", "version") + install_requires = set() # type: Set[BaseRequirement] if parser.has_option("options", "install_requires"): - self.requires.update( - { - dep.strip(): init_requirement(dep.strip()) - for dep in parser.get("options", "install_requires").split("\n") - if dep - } - ) + install_requires = set([ + BaseRequirement.from_string(dep) + for dep in parser.get("options", "install_requires").split("\n") + if dep + ]) + results["install_requires"] = install_requires if parser.has_option("options", "python_requires"): - python_requires = parser.get("options", "python_requires") - if python_requires and not self.python_requires: - self.python_requires = python_requires + results["python_requires"] = parser.get("options", "python_requires") + if parser.has_option("options", "build_requires"): + results["build_requires"] = parser.get("options", "build_requires") + extras_require = () if "options.extras_require" in parser.sections(): - self.extras.update( - { - section: [ - init_requirement(dep.strip()) - for dep in parser.get( - "options.extras_require", section - ).split("\n") - if dep - ] - for section in parser.options("options.extras_require") - if section not in ["options", "metadata"] - } - ) - if self.ireq.extras: - self.requires.update({ - extra: self.extras[extra] - for extra in self.ireq.extras if extra in self.extras - }) + extras_require = tuple([ + (section, tuple([ + BaseRequirement.from_string(dep) + for dep in parser.get( + "options.extras_require", section + ).split("\n") + if dep + ])) + for section in parser.options("options.extras_require") + if section not in ["options", "metadata"] + ]) + results["extras_require"] = extras_require + return results + + @property + def egg_base(self): + base = None # type: Optional[Text] + if self.setup_py.exists(): + base = self.setup_py.parent + elif self.pyproject.exists(): + base = self.pyproject.parent + elif self.setup_cfg.exists(): + base = self.setup_cfg.parent + if base is None: + base = Path(self.base_dir) + if base is None: + base = Path(self.extra_kwargs["src_dir"]) + egg_base = base.joinpath("reqlib-metadata") + if not egg_base.exists(): + atexit.register(rmtree, egg_base.as_posix()) + egg_base.mkdir(parents=True, exist_ok=True) + return egg_base.as_posix() + + def parse_setup_cfg(self): + # type: () -> None + if self.setup_cfg is not None and self.setup_cfg.exists(): + parsed = self.get_setup_cfg(self.setup_cfg.as_posix()) + if self.name is None: + self.name = parsed.get("name") + if self.version is None: + self.version = parsed.get("version") + build_requires = parsed.get("build_requires", []) + if self.build_requires: + self.build_requires = tuple(set(self.build_requires) | set(build_requires)) + self._requirements = frozenset( + set(self._requirements) | set(parsed["install_requires"]) + ) + if self.python_requires is None: + self.python_requires = parsed.get("python_requires") + if not self._extras_requirements: + self._extras_requirements = (parsed["extras_require"]) + else: + self._extras_requirements = self._extras_requirements + parsed["extras_require"] + if self.ireq is not None and self.ireq.extras: + for extra in self.ireq.extras: + if extra in self.extras: + extras_tuple = tuple([BaseRequirement.from_req(req) for req in self.extras[extra]]) + self._extras_requirements += ((extra, extras_tuple),) def run_setup(self): + # type: () -> None if self.setup_py is not None and self.setup_py.exists(): target_cwd = self.setup_py.parent.as_posix() - with cd(target_cwd), _suppress_distutils_logs(): + with temp_path(), cd(target_cwd), _suppress_distutils_logs(): # This is for you, Hynek # see https://github.com/hynek/environ_config/blob/69b1c8a/setup.py script_name = self.setup_py.as_posix() - args = ["egg_info"] + args = ["egg_info", "--egg-base", self.egg_base] g = {"__file__": script_name, "__name__": "__main__"} + sys.path.insert(0, os.path.dirname(os.path.abspath(script_name))) local_dict = {} if sys.version_info < (3, 5): save_argv = sys.argv @@ -297,67 +604,203 @@ def run_setup(self): self.python_requires = packaging.specifiers.SpecifierSet( dist.python_requires ) + if not self._extras_requirements: + self._extras_requirements = () if dist.extras_require and not self.extras: - self.extras = dist.extras_require + for extra, extra_requires in dist.extras_require: + extras_tuple = tuple( + BaseRequirement.from_req(req) for req in extra_requires + ) + self._extras_requirements += ((extra, extras_tuple),) install_requires = dist.get_requires() if not install_requires: install_requires = dist.install_requires if install_requires and not self.requires: - requirements = [init_requirement(req) for req in install_requires] - self.requires.update({req.key: req for req in requirements}) + requirements = set([ + BaseRequirement.from_req(req) for req in install_requires + ]) + if getattr(self.ireq, "extras", None): + for extra in self.ireq.extras: + requirements |= set(list(self.extras.get(extra, []))) + self._requirements = frozenset( + set(self._requirements) | requirements + ) if dist.setup_requires and not self.setup_requires: - self.setup_requires = dist.setup_requires + self.setup_requires = tuple(dist.setup_requires) if not self.version: self.version = dist.get_version() - def get_egg_metadata(self): - if self.setup_py is not None and self.setup_py.exists(): - metadata = get_metadata(self.setup_py.parent.as_posix(), pkg_name=self.name) - if metadata: - if not self.name: - self.name = metadata.get("name", self.name) - if not self.version: - self.version = metadata.get("version", self.version) - self.requires.update( - {req.key: req for req in metadata.get("requires", {})} - ) - if getattr(self.ireq, "extras", None): - for extra in self.ireq.extras: - extras = metadata.get("extras", {}).get(extra, []) - if extras: - extras = ensure_reqs(extras) - self.extras[extra] = set(extras) - self.requires.update( - {req.key: req for req in extras if req is not None} - ) + @property + @lru_cache() + def pep517_config(self): + config = {} + config.setdefault("--global-option", []) + return config + + def build_wheel(self): + # type: () -> Text + if not self.pyproject.exists(): + build_requires = ", ".join(['"{0}"'.format(r) for r in self.build_requires]) + self.pyproject.write_text(u""" +[build-system] +requires = [{0}] +build-backend = "{1}" + """.format(build_requires, self.build_backend).strip()) + return build_pep517( + self.base_dir, self.extra_kwargs["build_dir"], + config_settings=self.pep517_config, + dist_type="wheel" + ) + + def build_sdist(self): + # type: () -> Text + if not self.pyproject.exists(): + build_requires = ", ".join(['"{0}"'.format(r) for r in self.build_requires]) + self.pyproject.write_text(u""" +[build-system] +requires = [{0}] +build-backend = "{1}" + """.format(build_requires, self.build_backend).strip()) + return build_pep517( + self.base_dir, self.extra_kwargs["build_dir"], + config_settings=self.pep517_config, + dist_type="sdist" + ) + + def build(self): + # type: () -> Optional[Text] + dist_path = None + try: + dist_path = self.build_wheel() + except Exception: + try: + dist_path = self.build_sdist() + self.get_egg_metadata(metadata_type="egg") + except Exception: + pass + else: + self.get_metadata_from_wheel( + os.path.join(self.extra_kwargs["build_dir"], dist_path) + ) + if not self.metadata or not self.name: + self.get_egg_metadata() + if not self.metadata or not self.name: + self.run_setup() + + def reload(self): + # type: () -> Dict[Text, Any] + """ + Wipe existing distribution info metadata for rebuilding. + """ + for metadata_dir in os.listdir(self.egg_base): + shutil.rmtree(metadata_dir, ignore_errors=True) + self.metadata = None + self._requirements = frozenset() + self._extras_requirements = () + self.get_info() + + def get_metadata_from_wheel(self, wheel_path): + # type: (Text) -> Dict[Any, Any] + metadata_dict = get_metadata_from_wheel(wheel_path) + if metadata_dict: + self.populate_metadata(metadata_dict) + + def get_egg_metadata(self, metadata_dir=None, metadata_type=None): + # type: (Optional[Text], Optional[Text]) -> None + package_indicators = [self.pyproject, self.setup_py, self.setup_cfg] + # if self.setup_py is not None and self.setup_py.exists(): + metadata_dirs = [] + if any([fn is not None and fn.exists() for fn in package_indicators]): + metadata_dirs = [self.extra_kwargs["build_dir"], self.egg_base, self.extra_kwargs["src_dir"]] + if metadata_dir is not None: + metadata_dirs = [metadata_dir] + metadata_dirs + metadata = [ + get_metadata(d, pkg_name=self.name, metadata_type=metadata_type) + for d in metadata_dirs if os.path.exists(d) + ] + metadata = next(iter(d for d in metadata if d is not None), None) + if metadata is not None: + self.populate_metadata(metadata) + + def populate_metadata(self, metadata): + # type: (Dict[Any, Any]) -> None + _metadata = () + for k, v in metadata.items(): + if k == "extras" and isinstance(v, dict): + extras = () + for extra, reqs in v.items(): + extras += ((extra, tuple(reqs)),) + _metadata += extras + elif isinstance(v, (list, tuple)): + _metadata += (k, tuple(v)) + else: + _metadata += (k, v) + self.metadata = _metadata + if self.name is None: + self.name = metadata.get("name", self.name) + if not self.version: + self.version = metadata.get("version", self.version) + self._requirements = frozenset( + set(self._requirements) | set([ + BaseRequirement.from_req(req) + for req in metadata.get("requires", []) + ]) + ) + if getattr(self.ireq, "extras", None): + for extra in self.ireq.extras: + extras = metadata.get("extras", {}).get(extra, []) + if extras: + extras_tuple = tuple([ + BaseRequirement.from_req(req) + for req in ensure_reqs(tuple(extras)) + if req is not None + ]) + self._extras_requirements += ((extra, extras_tuple),) + self._requirements = frozenset( + set(self._requirements) | set(extras_tuple) + ) def run_pyproject(self): + # type: () -> None if self.pyproject and self.pyproject.exists(): result = get_pyproject(self.pyproject.parent) if result is not None: requires, backend = result if backend: self.build_backend = backend - if requires and not self.build_requires: - self.build_requires = requires + else: + self.build_backend = get_default_pyproject_backend() + if requires: + self.build_requires = tuple(set(requires) | set(self.build_requires)) + else: + self.build_requires = ("setuptools", "wheel") def get_info(self): + # type: () -> Dict[Text, Any] if self.setup_cfg and self.setup_cfg.exists(): - self.parse_setup_cfg() - if self.setup_py and self.setup_py.exists(): + with cd(self.base_dir): + self.parse_setup_cfg() + + with cd(self.base_dir): + self.run_pyproject() + self.build() + + if self.setup_py and self.setup_py.exists() and self.metadata is None: if not self.requires or not self.name: try: - self.run_setup() + with cd(self.base_dir): + self.run_setup() except Exception: - self.get_egg_metadata() - if not self.requires or not self.name: - self.get_egg_metadata() + with cd(self.base_dir): + self.get_egg_metadata() + if self.metadata is None or not self.name: + with cd(self.base_dir): + self.get_egg_metadata() - if self.pyproject and self.pyproject.exists(): - self.run_pyproject() return self.as_dict() def as_dict(self): + # type: () -> Dict[Text, Any] prop_dict = { "name": self.name, "version": self.version, @@ -378,23 +821,39 @@ def as_dict(self): @classmethod def from_requirement(cls, requirement, finder=None): + # type: (TRequirement, Optional[PackageFinder]) -> Optional[SetupInfo] ireq = requirement.as_ireq() subdir = getattr(requirement.req, "subdirectory", None) return cls.from_ireq(ireq, subdir=subdir, finder=finder) @classmethod + @lru_cache() def from_ireq(cls, ireq, subdir=None, finder=None): + # type: (InstallRequirement, Optional[Text], Optional[PackageFinder]) -> Optional[SetupInfo] import pip_shims.shims - + if not ireq.link: + return if ireq.link.is_wheel: return if not finder: from .dependencies import get_finder finder = get_finder() + vcs_method, uri = split_vcs_method_from_uri(unquote(ireq.link.url_without_fragment)) + parsed = urlparse(uri) + if "file" in parsed.scheme: + url_path = parsed.path + if "@" in url_path: + url_path, _, _ = url_path.rpartition("@") + parsed = parsed._replace(path=url_path) + uri = urlunparse(parsed) + path = None + if ireq.link.scheme == "file" or uri.startswith("file://"): + if "file:/" in uri and "file:///" not in uri: + uri = uri.replace("file:/", "file:///") + path = pip_shims.shims.url_to_path(uri) kwargs = _prepare_wheel_building_kwargs(ireq) - ireq.populate_link(finder, False, False) - ireq.ensure_has_source_dir(kwargs["build_dir"]) + ireq.source_dir = kwargs["src_dir"] if not ( ireq.editable and pip_shims.shims.is_file_url(ireq.link) @@ -406,36 +865,33 @@ def from_ireq(cls, ireq, subdir=None, finder=None): else: only_download = False download_dir = kwargs["download_dir"] - ireq_src_dir = None - if ireq.link.scheme == "file": - path = pip_shims.shims.url_to_path(unquote(ireq.link.url_without_fragment)) - if pip_shims.shims.is_installable_dir(path): - ireq_src_dir = path - if not ireq.editable or not (pip_shims.is_file_url(ireq.link) and ireq_src_dir): - pip_shims.shims.unpack_url( - ireq.link, - ireq.source_dir, - download_dir, - only_download=only_download, - session=finder.session, - hashes=ireq.hashes(False), - progress_bar="off", - ) - if ireq.editable: - created = cls.create( - ireq.source_dir, subdirectory=subdir, ireq=ireq, kwargs=kwargs + elif path is not None and os.path.isdir(path): + raise RequirementError( + "The file URL points to a directory not installable: {}" + .format(ireq.link) ) - else: - build_dir = ireq.build_location(kwargs["build_dir"]) - ireq._temp_build_dir.path = kwargs["build_dir"] - created = cls.create( - build_dir, subdirectory=subdir, ireq=ireq, kwargs=kwargs - ) - created.get_info() + build_dir = ireq.build_location(kwargs["build_dir"]) + src_dir = ireq.ensure_has_source_dir(kwargs["src_dir"]) + ireq._temp_build_dir.path = kwargs["build_dir"] + + ireq.populate_link(finder, False, False) + pip_shims.shims.unpack_url( + ireq.link, + src_dir, + download_dir, + only_download=only_download, + session=finder.session, + hashes=ireq.hashes(False), + progress_bar="off", + ) + created = cls.create( + src_dir, subdirectory=subdir, ireq=ireq, kwargs=kwargs + ) return created @classmethod def create(cls, base_dir, subdirectory=None, ireq=None, kwargs=None): + # type: (Text, Optional[Text], Optional[InstallRequirement], Optional[Dict[Text, Text]]) -> Optional[SetupInfo] if not base_dir or base_dir is None: return @@ -454,4 +910,6 @@ def create(cls, base_dir, subdirectory=None, ireq=None, kwargs=None): creation_kwargs["setup_cfg"] = setup_cfg if ireq: creation_kwargs["ireq"] = ireq - return cls(**creation_kwargs) + created = cls(**creation_kwargs) + created.get_info() + return created diff --git a/pipenv/vendor/requirementslib/models/utils.py b/pipenv/vendor/requirementslib/models/utils.py index 0fac2aa3f8..c9852a7b55 100644 --- a/pipenv/vendor/requirementslib/models/utils.py +++ b/pipenv/vendor/requirementslib/models/utils.py @@ -1,8 +1,10 @@ # -*- coding: utf-8 -*- -from __future__ import absolute_import +from __future__ import absolute_import, print_function import io import os +import re +import string import sys from collections import defaultdict @@ -16,31 +18,96 @@ from first import first from packaging.markers import InvalidMarker, Marker, Op, Value, Variable from packaging.specifiers import InvalidSpecifier, Specifier, SpecifierSet +from packaging.version import parse as parse_version +from six.moves.urllib import parse as urllib_parse +from urllib3 import util as urllib3_util +from vistir.compat import lru_cache from vistir.misc import dedup +from vistir.path import is_valid_url from ..utils import SCHEME_LIST, VCS_LIST, is_star, add_ssh_scheme_to_git_uri +from ..environment import MYPY_RUNNING + +if MYPY_RUNNING: + from typing import Union, Optional, List, Set, Any, TypeVar, Tuple, Sequence, Dict, Text + from attr import _ValidatorType + from packaging.requirements import Requirement as PackagingRequirement + from pkg_resources import Requirement as PkgResourcesRequirement + from pkg_resources.extern.packaging.markers import ( + Op as PkgResourcesOp, Variable as PkgResourcesVariable, + Value as PkgResourcesValue, Marker as PkgResourcesMarker + ) + from pip_shims.shims import Link + from vistir.compat import Path + _T = TypeVar("_T") + TMarker = Union[Marker, PkgResourcesMarker] + TVariable = TypeVar("TVariable", PkgResourcesVariable, Variable) + TValue = TypeVar("TValue", PkgResourcesValue, Value) + TOp = TypeVar("TOp", PkgResourcesOp, Op) + MarkerTuple = Tuple[TVariable, TOp, TValue] + TRequirement = Union[PackagingRequirement, PkgResourcesRequirement] + HASH_STRING = " --hash={0}" +ALPHA_NUMERIC = r"[{0}{1}]".format(string.ascii_letters, string.digits) +PUNCTUATION = r"[\-_\.]" +ALPHANUM_PUNCTUATION = r"[{0}{1}\-_\.]".format(string.ascii_letters, string.digits) +NAME = r"{0}+{1}*{2}".format(ALPHANUM_PUNCTUATION, PUNCTUATION, ALPHA_NUMERIC) +REF = r"[{0}{1}\-\_\./]".format(string.ascii_letters, string.digits) +EXTRAS = r"(?P\[{0}(?:,{0})*\])".format(NAME) +NAME_WITH_EXTRAS = r"(?P{0}){1}?".format(NAME, EXTRAS) +NAME_RE = re.compile(NAME_WITH_EXTRAS) +SUBDIR_RE = r"(?:[&#]subdirectory=(?P.*))" +URL_NAME = r"(?:#egg={0})".format(NAME_WITH_EXTRAS) +REF_RE = r"(?:@(?P{0}+)?)".format(REF) +URL = r"(?P[^ ]+://)(?:(?P[^ ]+?\.?{0}+(?P:\d+)?))?(?P[:/])(?P[^ @]+){1}?".format(ALPHA_NUMERIC, REF_RE) +URL_RE = re.compile(r"{0}(?:{1}?{2}?)?".format(URL, URL_NAME, SUBDIR_RE)) +DIRECT_URL_RE = re.compile(r"{0}\s?@\s?{1}".format(NAME_WITH_EXTRAS, URL)) + def filter_none(k, v): + # type: (Text, Any) -> bool if v: return True return False def optional_instance_of(cls): + # type: (Any) -> _ValidatorType[Optional[_T]] return validators.optional(validators.instance_of(cls)) def create_link(link): - from pip_shims import Link + # type: (Text) -> Link + + if not isinstance(link, six.string_types): + raise TypeError("must provide a string to instantiate a new link") + from pip_shims.shims import Link return Link(link) +def get_url_name(url): + # type: (Text) -> Text + """ + Given a url, derive an appropriate name to use in a pipfile. + + :param str url: A url to derive a string from + :returns: The name of the corresponding pipfile entry + :rtype: Text + """ + if not isinstance(url, six.string_types): + raise TypeError("Expected a string, got {0!r}".format(url)) + return urllib3_util.parse_url(url).host + + def init_requirement(name): + # type: (Text) -> TRequirement + + if not isinstance(name, six.string_types): + raise TypeError("must supply a name to generate a requirement") from pkg_resources import Requirement req = Requirement.parse(name) req.vcs = None @@ -51,25 +118,35 @@ def init_requirement(name): def extras_to_string(extras): + # type: (Sequence) -> Text """Turn a list of extras into a string""" if isinstance(extras, six.string_types): if extras.startswith("["): return extras - else: extras = [extras] - return "[{0}]".format(",".join(sorted(extras))) + if not extras: + return "" + return "[{0}]".format(",".join(sorted(set(extras)))) def parse_extras(extras_str): - """Turn a string of extras into a parsed extras list""" + # type: (Text) -> List + """ + Turn a string of extras into a parsed extras list + """ + from pkg_resources import Requirement extras = Requirement.parse("fakepkg{0}".format(extras_to_string(extras_str))).extras return sorted(dedup([extra.lower() for extra in extras])) def specs_to_string(specs): - """Turn a list of specifier tuples into a string""" + # type: (List[Union[Text, Specifier]]) -> Text + """ + Turn a list of specifier tuples into a string + """ + if specs: if isinstance(specs, six.string_types): return specs @@ -81,40 +158,202 @@ def specs_to_string(specs): return "" -def build_vcs_link(vcs, uri, name=None, ref=None, subdirectory=None, extras=None): +def build_vcs_uri( + vcs, # type: Optional[Text] + uri, # type: Text + name=None, # type: Optional[Text] + ref=None, # type: Optional[Text] + subdirectory=None, # type: Optional[Text] + extras=None # type: Optional[List[Text]] +): + # type: (...) -> Text if extras is None: extras = [] - vcs_start = "{0}+".format(vcs) - if not uri.startswith(vcs_start): - uri = "{0}{1}".format(vcs_start, uri) - uri = add_ssh_scheme_to_git_uri(uri) + vcs_start = "" + if vcs is not None: + vcs_start = "{0}+".format(vcs) + if not uri.startswith(vcs_start): + uri = "{0}{1}".format(vcs_start, uri) if ref: uri = "{0}@{1}".format(uri, ref) if name: uri = "{0}#egg={1}".format(uri, name) if extras: - extras = extras_to_string(extras) - uri = "{0}{1}".format(uri, extras) + extras_string = extras_to_string(extras) + uri = "{0}{1}".format(uri, extras_string) if subdirectory: uri = "{0}&subdirectory={1}".format(uri, subdirectory) - return create_link(uri) + return uri + + +def convert_direct_url_to_url(direct_url): + # type: (Text) -> Text + """ + Given a direct url as defined by *PEP 508*, convert to a :class:`~pip_shims.shims.Link` + compatible URL by moving the name and extras into an **egg_fragment**. + + :param str direct_url: A pep-508 compliant direct url. + :return: A reformatted URL for use with Link objects and :class:`~pip_shims.shims.InstallRequirement` objects. + :rtype: Text + """ + direct_match = DIRECT_URL_RE.match(direct_url) + if direct_match is None: + url_match = URL_RE.match(direct_url) + if url_match or is_valid_url(direct_url): + return direct_url + match_dict = direct_match.groupdict() + if not match_dict: + raise ValueError("Failed converting value to normal URL, is it a direct URL? {0!r}".format(direct_url)) + url_segments = [match_dict.get(s) for s in ("scheme", "host", "path", "pathsep")] + url = "".join([s for s in url_segments if s is not None]) + new_url = build_vcs_uri( + None, + url, + ref=match_dict.get("ref"), + name=match_dict.get("name"), + extras=match_dict.get("extras"), + subdirectory=match_dict.get("subdirectory") + ) + return new_url + + +def convert_url_to_direct_url(url, name=None): + # type: (Text, Optional[Text]) -> Text + """ + Given a :class:`~pip_shims.shims.Link` compatible URL, convert to a direct url as + defined by *PEP 508* by extracting the name and extras from the **egg_fragment**. + + :param Text url: A :class:`~pip_shims.shims.InstallRequirement` compliant URL. + :param Optiona[Text] name: A name to use in case the supplied URL doesn't provide one. + :return: A pep-508 compliant direct url. + :rtype: Text + + :raises ValueError: Raised when the URL can't be parsed or a name can't be found. + :raises TypeError: When a non-string input is provided. + """ + if not isinstance(url, six.string_types): + raise TypeError( + "Expected a string to convert to a direct url, got {0!r}".format(url) + ) + direct_match = DIRECT_URL_RE.match(url) + if direct_match: + return url + url_match = URL_RE.match(url) + if url_match is None or not url_match.groupdict(): + raise ValueError("Failed parse a valid URL from {0!r}".format(url)) + match_dict = url_match.groupdict() + url_segments = [match_dict.get(s) for s in ("scheme", "host", "path", "pathsep")] + name = match_dict.get("name", name) + extras = match_dict.get("extras") + new_url = "" + if extras and not name: + url_segments.append(extras) + elif extras and name: + new_url = "{0}{1}@ ".format(name, extras) + else: + if name is not None: + new_url = "{0}@ ".format(name) + else: + raise ValueError( + "Failed to construct direct url: " + "No name could be parsed from {0!r}".format(url) + ) + if match_dict.get("ref"): + url_segments.append("@{0}".format(match_dict.get("ref"))) + url = "".join([s for s in url if s is not None]) + url = "{0}{1}".format(new_url, url) + return url def get_version(pipfile_entry): + # type: (Union[Text, Dict[Text, bool, List[Text]]]) -> Text if str(pipfile_entry) == "{}" or is_star(pipfile_entry): return "" elif hasattr(pipfile_entry, "keys") and "version" in pipfile_entry: if is_star(pipfile_entry.get("version")): return "" - return pipfile_entry.get("version", "") + return pipfile_entry.get("version", "").strip().lstrip("(").rstrip(")") if isinstance(pipfile_entry, six.string_types): - return pipfile_entry + return pipfile_entry.strip().lstrip("(").rstrip(")") return "" +def strip_extras_markers_from_requirement(req): + # type: (TRequirement) -> TRequirement + """ + Given a :class:`~packaging.requirements.Requirement` instance with markers defining + *extra == 'name'*, strip out the extras from the markers and return the cleaned + requirement + + :param PackagingRequirement req: A pacakaging requirement to clean + :return: A cleaned requirement + :rtype: PackagingRequirement + """ + if req is None: + raise TypeError("Must pass in a valid requirement, received {0!r}".format(req)) + if getattr(req, "marker", None) is not None: + marker = req.marker # type: TMarker + req.marker._markers = _strip_extras_markers(req.marker._markers) + if not req.marker._markers: + req.marker = None + return req + + +def _strip_extras_markers(marker): + # type: (Union[MarkerTuple, List[Union[MarkerTuple, str]]]) -> List[Union[MarkerTuple, str]] + if marker is None or not isinstance(marker, (list, tuple)): + raise TypeError("Expecting a marker type, received {0!r}".format(marker)) + markers_to_remove = [] + # iterate forwards and generate a list of indexes to remove first, then reverse the + # list so we can remove the text that normally occurs after (but we will already + # be past it in the loop) + for i, marker_list in enumerate(marker): + if isinstance(marker_list, list): + cleaned = _strip_extras_markers(marker_list) + if not cleaned: + markers_to_remove.append(i) + elif isinstance(marker_list, tuple) and marker_list[0].value == "extra": + markers_to_remove.append(i) + for i in reversed(markers_to_remove): + del marker[i] + if i > 0 and marker[i - 1] == "and": + del marker[i - 1] + return marker + + +@lru_cache() +def get_setuptools_version(): + # type: () -> Optional[Text] + import pkg_resources + setuptools_dist = pkg_resources.get_distribution( + pkg_resources.Requirement("setuptools") + ) + return getattr(setuptools_dist, "version", None) + + +def get_default_pyproject_backend(): + # type: () -> Text + st_version = get_setuptools_version() + if st_version is not None: + parsed_st_version = parse_version(st_version) + if parsed_st_version >= parse_version("40.6.0"): + return "setuptools.build_meta:__legacy__" + return "setuptools.build_meta" + + def get_pyproject(path): + # type: (Union[Text, Path]) -> Tuple[List[Text], Text] + """ + Given a base path, look for the corresponding ``pyproject.toml`` file and return its + build_requires and build_backend. + + :param Text path: The root path of the project, should be a directory (will be truncated) + :return: A 2 tuple of build requirements and the build backend + :rtype: Tuple[List[Text], Text] + """ + from vistir.compat import Path if not path: return @@ -125,8 +364,10 @@ def get_pyproject(path): pp_toml = path.joinpath("pyproject.toml") setup_py = path.joinpath("setup.py") if not pp_toml.exists(): - if setup_py.exists(): + if not setup_py.exists(): return None + requires = ["setuptools>=40.6", "wheel"] + backend = get_default_pyproject_backend() else: pyproject_data = {} with io.open(pp_toml.as_posix(), encoding="utf-8") as fh: @@ -134,23 +375,24 @@ def get_pyproject(path): build_system = pyproject_data.get("build-system", None) if build_system is None: if setup_py.exists(): - requires = ["setuptools", "wheel"] - backend = "setuptools.build_meta" + requires = ["setuptools>=40.6", "wheel"] + backend = get_default_pyproject_backend() else: - requires = ["setuptools>=38.2.5", "wheel"] - backend = "setuptools.build_meta" + requires = ["setuptools>=40.6", "wheel"] + backend = get_default_pyproject_backend() build_system = { "requires": requires, "build-backend": backend } pyproject_data["build_system"] = build_system else: - requires = build_system.get("requires") - backend = build_system.get("build-backend") - return (requires, backend) + requires = build_system.get("requires", ["setuptools>=40.6", "wheel"]) + backend = build_system.get("build-backend", get_default_pyproject_backend()) + return (requires, backend) def split_markers_from_line(line): + # type: (Text) -> Tuple[Text, Optional[Text]] """Split markers from a dependency""" if not any(line.startswith(uri_prefix) for uri_prefix in SCHEME_LIST): marker_sep = ";" @@ -164,6 +406,7 @@ def split_markers_from_line(line): def split_vcs_method_from_uri(uri): + # type: (Text) -> Tuple[Optional[Text], Text] """Split a vcs+uri formatted uri into (vcs, uri)""" vcs_start = "{0}+" vcs = first([vcs for vcs in VCS_LIST if uri.startswith(vcs_start.format(vcs))]) @@ -172,6 +415,27 @@ def split_vcs_method_from_uri(uri): return vcs, uri +def split_ref_from_uri(uri): + # type: (Text) -> Tuple[Text, Optional[Text]] + """ + Given a path or URI, check for a ref and split it from the path if it is present, + returning a tuple of the original input and the ref or None. + + :param Text uri: The path or URI to split + :returns: A 2-tuple of the path or URI and the ref + :rtype: Tuple[Text, Optional[Text]] + """ + if not isinstance(uri, six.string_types): + raise TypeError("Expected a string, received {0!r}".format(uri)) + parsed = urllib_parse.urlparse(uri) + path = parsed.path + ref = None + if "@" in path: + path, _, ref = path.rpartition("@") + parsed = parsed._replace(path=path) + return (urllib_parse.urlunparse(parsed), ref) + + def validate_vcs(instance, attr_, value): if value not in VCS_LIST: raise ValueError("Invalid vcs {0!r}".format(value)) @@ -232,6 +496,7 @@ def _requirement_to_str_lowercase_name(requirement): important stuff that should not be lowercased (such as the marker). See this issue for more information: https://github.com/pypa/pipenv/issues/2113. """ + parts = [requirement.name.lower()] if requirement.extras: @@ -254,6 +519,7 @@ def format_requirement(ireq): Generic formatter for pretty printing InstallRequirements to the terminal in a less verbose way than using its `__str__` method. """ + if ireq.editable: line = '-e {}'.format(ireq.link) else: @@ -282,7 +548,8 @@ def format_specifier(ireq): def get_pinned_version(ireq): - """Get the pinned version of an InstallRequirement. + """ + Get the pinned version of an InstallRequirement. An InstallRequirement is considered pinned if: @@ -300,6 +567,7 @@ def get_pinned_version(ireq): Raises `TypeError` if the input is not a valid InstallRequirement, or `ValueError` if the InstallRequirement is not pinned. """ + try: specifier = ireq.specifier except AttributeError: @@ -324,7 +592,8 @@ def get_pinned_version(ireq): def is_pinned_requirement(ireq): - """Returns whether an InstallRequirement is a "pinned" requirement. + """ + Returns whether an InstallRequirement is a "pinned" requirement. An InstallRequirement is considered pinned if: @@ -339,6 +608,7 @@ def is_pinned_requirement(ireq): django~=1.8 # NOT pinned django==1.* # NOT pinned """ + try: get_pinned_version(ireq) except (TypeError, ValueError): @@ -350,6 +620,7 @@ def as_tuple(ireq): """ Pulls out the (name: str, version:str, extras:(str)) tuple from the pinned InstallRequirement. """ + if not is_pinned_requirement(ireq): raise TypeError('Expected a pinned InstallRequirement, got {}'.format(ireq)) @@ -360,12 +631,18 @@ def as_tuple(ireq): def full_groupby(iterable, key=None): - """Like groupby(), but sorts the input on the group key first.""" + """ + Like groupby(), but sorts the input on the group key first. + """ + return groupby(sorted(iterable, key=key), key=key) def flat_map(fn, collection): - """Map a function over a collection and flatten the result by one-level""" + """ + Map a function over a collection and flatten the result by one-level + """ + return chain.from_iterable(map(fn, collection)) @@ -385,8 +662,7 @@ def lookup_table(values, key=None, keyval=None, unique=False, use_lists=False): For key functions that uniquely identify values, set unique=True: >>> assert lookup_table( - ... ['foo', 'bar', 'baz', 'qux', 'quux'], lambda s: s[0], - ... unique=True) == { + ... ['foo', 'bar', 'baz', 'qux', 'quux'], lambda s: s[0], unique=True) == { ... 'b': 'baz', ... 'f': 'foo', ... 'q': 'quux' @@ -404,8 +680,8 @@ def lookup_table(values, key=None, keyval=None, unique=False, use_lists=False): ... 'f': {'oo'}, ... 'q': {'uux', 'ux'} ... } - """ + if keyval is None: if key is None: keyval = (lambda v: v) @@ -443,7 +719,8 @@ def name_from_req(req): def make_install_requirement(name, version, extras, markers, constraint=False): - """make_install_requirement Generates an :class:`~pip._internal.req.req_install.InstallRequirement`. + """ + Generates an :class:`~pip._internal.req.req_install.InstallRequirement`. Create an InstallRequirement from the supplied metadata. @@ -539,12 +816,42 @@ def fix_requires_python_marker(requires_python): def normalize_name(pkg): + # type: (Text) -> Text """Given a package name, return its normalized, non-canonicalized form. - :param str pkg: The name of a package + :param Text pkg: The name of a package :return: A normalized package name - :rtype: str + :rtype: Text """ assert isinstance(pkg, six.string_types) return pkg.replace("_", "-").lower() + + +def get_name_variants(pkg): + # type: (Text) -> Set[Text] + """ + Given a packager name, get the variants of its name for both the canonicalized + and "safe" forms. + + :param Text pkg: The package to lookup + :returns: A list of names. + :rtype: Set + """ + + if not isinstance(pkg, six.string_types): + raise TypeError("must provide a string to derive package names") + from pkg_resources import safe_name + from packaging.utils import canonicalize_name + pkg = pkg.lower() + names = {safe_name(pkg), canonicalize_name(pkg), pkg.replace("-", "_")} + return names + + +SETUPTOOLS_SHIM = ( + "import setuptools, tokenize;__file__=%r;" + "f=getattr(tokenize, 'open', open)(__file__);" + "code=f.read().replace('\\r\\n', '\\n');" + "f.close();" + "exec(compile(code, __file__, 'exec'))" +) diff --git a/pipenv/vendor/requirementslib/models/vcs.py b/pipenv/vendor/requirementslib/models/vcs.py index 6a15db3f85..9296f605b2 100644 --- a/pipenv/vendor/requirementslib/models/vcs.py +++ b/pipenv/vendor/requirementslib/models/vcs.py @@ -1,11 +1,18 @@ # -*- coding=utf-8 -*- +from __future__ import absolute_import, print_function + import attr +import importlib import os import pip_shims +import six +import sys -@attr.s +@attr.s(hash=True) class VCSRepository(object): + DEFAULT_RUN_ARGS = None + url = attr.ib() name = attr.ib() checkout_directory = attr.ib() @@ -14,13 +21,21 @@ class VCSRepository(object): commit_sha = attr.ib(default=None) ref = attr.ib(default=None) repo_instance = attr.ib() + clone_log = attr.ib(default=None) @repo_instance.default def get_repo_instance(self): - from pip_shims import VcsSupport + if self.DEFAULT_RUN_ARGS is None: + default_run_args = self.monkeypatch_pip() + else: + default_run_args = self.DEFAULT_RUN_ARGS + from pip_shims.shims import VcsSupport VCS_SUPPORT = VcsSupport() backend = VCS_SUPPORT._registry.get(self.vcs_type) - return backend(url=self.url) + repo = backend(url=self.url) + if repo.run_command.__func__.__defaults__ != default_run_args: + repo.run_command.__func__.__defaults__ = default_run_args + return repo @property def is_local(self): @@ -58,3 +73,22 @@ def update(self, ref): def get_commit_hash(self, ref=None): return self.repo_instance.get_revision(self.checkout_directory) + + @classmethod + def monkeypatch_pip(cls): + target_module = pip_shims.shims.VcsSupport.__module__ + pip_vcs = importlib.import_module(target_module) + run_command_defaults = pip_vcs.VersionControl.run_command.__defaults__ + # set the default to not write stdout, the first option sets this value + new_defaults = [False,] + list(run_command_defaults)[1:] + new_defaults = tuple(new_defaults) + if six.PY3: + try: + pip_vcs.VersionControl.run_command.__defaults__ = new_defaults + except AttributeError: + pip_vcs.VersionControl.run_command.__func__.__defaults__ = new_defaults + else: + pip_vcs.VersionControl.run_command.__func__.__defaults__ = new_defaults + sys.modules[target_module] = pip_vcs + cls.DEFAULT_RUN_ARGS = new_defaults + return new_defaults diff --git a/pipenv/vendor/requirementslib/utils.py b/pipenv/vendor/requirementslib/utils.py index f3653e32c1..a3ddfddea5 100644 --- a/pipenv/vendor/requirementslib/utils.py +++ b/pipenv/vendor/requirementslib/utils.py @@ -1,5 +1,5 @@ # -*- coding=utf-8 -*- -from __future__ import absolute_import +from __future__ import absolute_import, print_function import contextlib import logging @@ -8,18 +8,24 @@ import six import sys import tomlkit +import vistir -six.add_move(six.MovedAttribute("Mapping", "collections", "collections.abc")) -six.add_move(six.MovedAttribute("Sequence", "collections", "collections.abc")) -six.add_move(six.MovedAttribute("Set", "collections", "collections.abc")) -six.add_move(six.MovedAttribute("ItemsView", "collections", "collections.abc")) -from six.moves import Mapping, Sequence, Set, ItemsView -from six.moves.urllib.parse import urlparse, urlsplit +six.add_move(six.MovedAttribute("Mapping", "collections", "collections.abc")) # type: ignore # noqa +six.add_move(six.MovedAttribute("Sequence", "collections", "collections.abc")) # type: ignore # noqa +six.add_move(six.MovedAttribute("Set", "collections", "collections.abc")) # type: ignore # noqa +six.add_move(six.MovedAttribute("ItemsView", "collections", "collections.abc")) # type: ignore # noqa +from six.moves import Mapping, Sequence, Set, ItemsView # type: ignore # noqa +from six.moves.urllib.parse import urlparse, urlsplit, urlunparse import pip_shims.shims from vistir.compat import Path from vistir.path import is_valid_url, ensure_mkdir_p, create_tracked_tempdir +from .environment import MYPY_RUNNING + +if MYPY_RUNNING: + from typing import Dict, Any, Optional, Union, Tuple, List, Iterable, Generator, Text + VCS_LIST = ("git", "svn", "hg", "bzr") @@ -68,11 +74,12 @@ def setup_logger(): def is_installable_dir(path): + # type: (Text) -> bool if pip_shims.shims.is_installable_dir(path): return True - path = Path(path) - pyproject = path.joinpath("pyproject.toml") - if pyproject.exists(): + pyproject_path = os.path.join(path, "pyproject.toml") + if os.path.exists(pyproject_path): + pyproject = Path(pyproject_path) pyproject_toml = tomlkit.loads(pyproject.read_text()) build_system = pyproject_toml.get("build-system", {}).get("build-backend", "") if build_system: @@ -81,22 +88,39 @@ def is_installable_dir(path): def strip_ssh_from_git_uri(uri): + # type: (Text) -> Text """Return git+ssh:// formatted URI to git+git@ format""" if isinstance(uri, six.string_types): - uri = uri.replace("git+ssh://", "git+", 1) + if "git+ssh://" in uri: + parsed = urlparse(uri) + # split the path on the first separating / so we can put the first segment + # into the 'netloc' section with a : separator + path_part, _, path = parsed.path.lstrip("/").partition("/") + path = "/{0}".format(path) + parsed = parsed._replace( + netloc="{0}:{1}".format(parsed.netloc, path_part), path=path + ) + uri = urlunparse(parsed).replace("git+ssh://", "git+", 1) return uri def add_ssh_scheme_to_git_uri(uri): - """Cleans VCS uris from pipenv.patched.notpip format""" + # type: (Text) -> Text + """Cleans VCS uris from pip format""" if isinstance(uri, six.string_types): # Add scheme for parsing purposes, this is also what pip does if uri.startswith("git+") and "://" not in uri: uri = uri.replace("git+", "git+ssh://", 1) + parsed = urlparse(uri) + if ":" in parsed.netloc: + netloc, _, path_start = parsed.netloc.rpartition(":") + path = "/{0}{1}".format(path_start, parsed.path) + uri = urlunparse(parsed._replace(netloc=netloc, path=path)) return uri def is_vcs(pipfile_entry): + # type: (Union[Text, Dict[Text, Union[Text, bool, Tuple[Text], List[Text]]]]) -> bool """Determine if dictionary entry from Pipfile is for a vcs dependency.""" if isinstance(pipfile_entry, Mapping): return any(key for key in pipfile_entry.keys() if key in VCS_LIST) @@ -111,12 +135,16 @@ def is_vcs(pipfile_entry): def is_editable(pipfile_entry): + # type: (Union[Text, Dict[Text, Union[Text, bool, Tuple[Text], List[Text]]]]) -> bool if isinstance(pipfile_entry, Mapping): return pipfile_entry.get("editable", False) is True + if isinstance(pipfile_entry, six.string_types): + return pipfile_entry.startswith("-e ") return False def multi_split(s, split): + # type: (Text, Iterable[Text]) -> List[Text] """Splits on multiple given separators.""" for r in split: s = s.replace(r, "|") @@ -124,21 +152,37 @@ def multi_split(s, split): def is_star(val): + # type: (Union[Text, Dict[Text, Union[Text, bool, Tuple[Text], List[Text]]]]) -> bool return (isinstance(val, six.string_types) and val == "*") or ( isinstance(val, Mapping) and val.get("version", "") == "*" ) +def convert_entry_to_path(path): + # type: (Dict[Text, Union[Text, bool, Tuple[Text], List[Text]]]) -> Text + """Convert a pipfile entry to a string""" + + if not isinstance(path, Mapping): + raise TypeError("expecting a mapping, received {0!r}".format(path)) + + if not any(key in path for key in ["file", "path"]): + raise ValueError("missing path-like entry in supplied mapping {0!r}".format(path)) + + if "file" in path: + path = vistir.path.url_to_path(path["file"]) + + elif "path" in path: + path = path["path"] + return path + + def is_installable_file(path): + # type: (Union[Text, Dict[Text, Union[Text, bool, Tuple[Text], List[Text]]]]) -> bool """Determine if a path can potentially be installed""" from packaging import specifiers - if hasattr(path, "keys") and any( - key for key in path.keys() if key in ["file", "path"] - ): - path = urlparse(path["file"]).path if "file" in path else path["path"] - if not isinstance(path, six.string_types) or path == "*": - return False + if isinstance(path, Mapping): + path = convert_entry_to_path(path) # If the string starts with a valid specifier operator, test if it is a valid # specifier set before making a path object (to avoid breaking windows) @@ -152,41 +196,84 @@ def is_installable_file(path): return False parsed = urlparse(path) - if parsed.scheme == "file": - path = parsed.path - - if not os.path.exists(os.path.abspath(path)): + is_local = (not parsed.scheme or parsed.scheme == "file" or (len(parsed.scheme) == 1 and os.name == "nt")) + if parsed.scheme and parsed.scheme == "file": + path = vistir.compat.fs_decode(vistir.path.url_to_path(path)) + normalized_path = vistir.path.normalize_path(path) + if is_local and not os.path.exists(normalized_path): return False - lookup_path = Path(path) - absolute_path = "{0}".format(lookup_path.absolute()) - if lookup_path.is_dir() and is_installable_dir(absolute_path): + is_archive = pip_shims.shims.is_archive_file(normalized_path) + is_local_project = os.path.isdir(normalized_path) and is_installable_dir(normalized_path) + if is_local and is_local_project or is_archive: return True - elif lookup_path.is_file() and pip_shims.shims.is_archive_file(absolute_path): + if not is_local and pip_shims.shims.is_archive_file(parsed.path): return True return False +def get_dist_metadata(dist): + import pkg_resources + from email.parser import FeedParser + if (isinstance(dist, pkg_resources.DistInfoDistribution) and + dist.has_metadata('METADATA')): + metadata = dist.get_metadata('METADATA') + elif dist.has_metadata('PKG-INFO'): + metadata = dist.get_metadata('PKG-INFO') + else: + metadata = "" + + feed_parser = FeedParser() + feed_parser.feed(metadata) + return feed_parser.close() + + +def get_setup_paths(base_path, subdirectory=None): + # type: (Text, Optional[Text]) -> Dict[Text, Optional[Text]] + if base_path is None: + raise TypeError("must provide a path to derive setup paths from") + setup_py = os.path.join(base_path, "setup.py") + setup_cfg = os.path.join(base_path, "setup.cfg") + pyproject_toml = os.path.join(base_path, "pyproject.toml") + if subdirectory is not None: + base_path = os.path.join(base_path, subdirectory) + subdir_setup_py = os.path.join(subdirectory, "setup.py") + subdir_setup_cfg = os.path.join(subdirectory, "setup.cfg") + subdir_pyproject_toml = os.path.join(subdirectory, "pyproject.toml") + if subdirectory and os.path.exists(subdir_setup_py): + setup_py = subdir_setup_py + if subdirectory and os.path.exists(subdir_setup_cfg): + setup_cfg = subdir_setup_cfg + if subdirectory and os.path.exists(subdir_pyproject_toml): + pyproject_toml = subdir_pyproject_toml + return { + "setup_py": setup_py if os.path.exists(setup_py) else None, + "setup_cfg": setup_cfg if os.path.exists(setup_cfg) else None, + "pyproject_toml": pyproject_toml if os.path.exists(pyproject_toml) else None + } + + def prepare_pip_source_args(sources, pip_args=None): + # type: (List[Dict[Text, Union[Text, bool]]], Optional[List[Text]]) -> List[Text] if pip_args is None: pip_args = [] if sources: # Add the source to pip9. - pip_args.extend(["-i", sources[0]["url"]]) + pip_args.extend(["-i", sources[0]["url"]]) # type: ignore # Trust the host if it's not verified. if not sources[0].get("verify_ssl", True): - pip_args.extend(["--trusted-host", urlparse(sources[0]["url"]).hostname]) + pip_args.extend(["--trusted-host", urlparse(sources[0]["url"]).hostname]) # type: ignore # Add additional sources as extra indexes. if len(sources) > 1: for source in sources[1:]: - pip_args.extend(["--extra-index-url", source["url"]]) + pip_args.extend(["--extra-index-url", source["url"]]) # type: ignore # Trust the host if it's not verified. if not source.get("verify_ssl", True): pip_args.extend( ["--trusted-host", urlparse(source["url"]).hostname] - ) + ) # type: ignore return pip_args @@ -196,10 +283,11 @@ def _ensure_dir(path): @contextlib.contextmanager -def ensure_setup_py(base_dir): - if not base_dir: - base_dir = create_tracked_tempdir(prefix="requirementslib-setup") - base_dir = Path(base_dir) +def ensure_setup_py(base): + # type: (Text) -> Generator[None, None, None] + if not base: + base = create_tracked_tempdir(prefix="requirementslib-setup") + base_dir = Path(base) if base_dir.exists() and base_dir.name == "setup.py": base_dir = base_dir.parent elif not (base_dir.exists() and base_dir.is_dir()): diff --git a/pipenv/vendor/shellingham/__init__.py b/pipenv/vendor/shellingham/__init__.py index 576c422496..f879cf9d32 100644 --- a/pipenv/vendor/shellingham/__init__.py +++ b/pipenv/vendor/shellingham/__init__.py @@ -4,7 +4,7 @@ from ._core import ShellDetectionFailure -__version__ = '1.2.7' +__version__ = '1.2.8' def detect_shell(pid=None, max_depth=6): diff --git a/pipenv/vendor/shellingham/nt.py b/pipenv/vendor/shellingham/nt.py index 7b3cc6b4dc..d66bc33f8c 100644 --- a/pipenv/vendor/shellingham/nt.py +++ b/pipenv/vendor/shellingham/nt.py @@ -75,7 +75,18 @@ def _iter_process(): # looking for. We can fix this when it actually matters. (#8) continue raise WinError() - info = {'executable': str(pe.szExeFile.decode('utf-8'))} + + # The executable name would be encoded with the current code page if + # we're in ANSI mode (usually). Try to decode it into str/unicode, + # replacing invalid characters to be safe (not thoeratically necessary, + # I think). Note that we need to use 'mbcs' instead of encoding + # settings from sys because this is from the Windows API, not Python + # internals (which those settings reflect). (pypa/pipenv#3382) + executable = pe.szExeFile + if isinstance(executable, bytes): + executable = executable.decode('mbcs', 'replace') + + info = {'executable': executable} if pe.th32ParentProcessID: info['parent_pid'] = pe.th32ParentProcessID yield pe.th32ProcessID, info diff --git a/pipenv/vendor/shellingham/posix.py b/pipenv/vendor/shellingham/posix.py index 0bbf988b90..b25dd87456 100644 --- a/pipenv/vendor/shellingham/posix.py +++ b/pipenv/vendor/shellingham/posix.py @@ -21,7 +21,7 @@ def _get_process_mapping(): processes = {} for line in output.split('\n'): try: - pid, ppid, args = line.strip().split(None, 2) + pid, ppid, args = line.strip().split(maxsplit=2) except ValueError: continue processes[pid] = Process( diff --git a/pipenv/vendor/shellingham/posix/_default.py b/pipenv/vendor/shellingham/posix/_default.py new file mode 100644 index 0000000000..8694427611 --- /dev/null +++ b/pipenv/vendor/shellingham/posix/_default.py @@ -0,0 +1,27 @@ +import collections +import shlex +import subprocess +import sys + + +Process = collections.namedtuple('Process', 'args pid ppid') + + +def get_process_mapping(): + """Try to look up the process tree via the output of `ps`. + """ + output = subprocess.check_output([ + 'ps', '-ww', '-o', 'pid=', '-o', 'ppid=', '-o', 'args=', + ]) + if not isinstance(output, str): + output = output.decode(sys.stdout.encoding) + processes = {} + for line in output.split('\n'): + try: + pid, ppid, args = line.strip().split(None, 2) + except ValueError: + continue + processes[pid] = Process( + args=tuple(shlex.split(args)), pid=pid, ppid=ppid, + ) + return processes diff --git a/pipenv/vendor/shellingham/posix/_proc.py b/pipenv/vendor/shellingham/posix/_proc.py index e3a6e46db0..921f250819 100644 --- a/pipenv/vendor/shellingham/posix/_proc.py +++ b/pipenv/vendor/shellingham/posix/_proc.py @@ -1,34 +1,40 @@ import os import re -from ._default import Process +from ._core import Process STAT_PPID = 3 STAT_TTY = 6 +STAT_PATTERN = re.compile(r'\(.+\)|\S+') + + +def _get_stat(pid): + with open(os.path.join('/proc', str(pid), 'stat')) as f: + parts = STAT_PATTERN.findall(f.read()) + return parts[STAT_TTY], parts[STAT_PPID] + + +def _get_cmdline(pid): + with open(os.path.join('/proc', str(pid), 'cmdline')) as f: + return tuple(f.read().split('\0')[:-1]) + def get_process_mapping(): """Try to look up the process tree via the /proc interface. """ - with open('/proc/{0}/stat'.format(os.getpid())) as f: - self_tty = f.read().split()[STAT_TTY] + self_tty = _get_stat(os.getpid())[0] processes = {} for pid in os.listdir('/proc'): if not pid.isdigit(): continue try: - stat = '/proc/{0}/stat'.format(pid) - cmdline = '/proc/{0}/cmdline'.format(pid) - with open(stat) as fstat, open(cmdline) as fcmdline: - stat = re.findall(r'\(.+\)|\S+', fstat.read()) - cmd = fcmdline.read().split('\x00')[:-1] - ppid = stat[STAT_PPID] - tty = stat[STAT_TTY] - if tty == self_tty: - processes[pid] = Process( - args=tuple(cmd), pid=pid, ppid=ppid, - ) + tty, ppid = _get_stat(pid) + if tty != self_tty: + continue + args = _get_cmdline(pid) + processes[pid] = Process(args=args, pid=pid, ppid=ppid) except IOError: # Process has disappeared - just ignore it. continue diff --git a/pipenv/vendor/shellingham/posix/_ps.py b/pipenv/vendor/shellingham/posix/_ps.py index 8694427611..e96278cf5f 100644 --- a/pipenv/vendor/shellingham/posix/_ps.py +++ b/pipenv/vendor/shellingham/posix/_ps.py @@ -1,10 +1,8 @@ -import collections import shlex import subprocess import sys - -Process = collections.namedtuple('Process', 'args pid ppid') +from ._core import Process def get_process_mapping(): diff --git a/pipenv/vendor/shellingham/posix/linux.py b/pipenv/vendor/shellingham/posix/linux.py new file mode 100644 index 0000000000..6db9783481 --- /dev/null +++ b/pipenv/vendor/shellingham/posix/linux.py @@ -0,0 +1,35 @@ +import os +import re + +from ._default import Process + + +STAT_PPID = 3 +STAT_TTY = 6 + + +def get_process_mapping(): + """Try to look up the process tree via Linux's /proc + """ + with open('/proc/{0}/stat'.format(os.getpid())) as f: + self_tty = f.read().split()[STAT_TTY] + processes = {} + for pid in os.listdir('/proc'): + if not pid.isdigit(): + continue + try: + stat = '/proc/{0}/stat'.format(pid) + cmdline = '/proc/{0}/cmdline'.format(pid) + with open(stat) as fstat, open(cmdline) as fcmdline: + stat = re.findall(r'\(.+\)|\S+', fstat.read()) + cmd = fcmdline.read().split('\x00')[:-1] + ppid = stat[STAT_PPID] + tty = stat[STAT_TTY] + if tty == self_tty: + processes[pid] = Process( + args=tuple(cmd), pid=pid, ppid=ppid, + ) + except IOError: + # Process has disappeared - just ignore it. + continue + return processes diff --git a/pipenv/vendor/shellingham/posix/ps.py b/pipenv/vendor/shellingham/posix/ps.py index 4a155ed5cb..cf7e56f29d 100644 --- a/pipenv/vendor/shellingham/posix/ps.py +++ b/pipenv/vendor/shellingham/posix/ps.py @@ -1,5 +1,4 @@ import errno -import shlex import subprocess import sys @@ -34,9 +33,13 @@ def get_process_mapping(): for line in output.split('\n'): try: pid, ppid, args = line.strip().split(None, 2) - processes[pid] = Process( - args=tuple(shlex.split(args)), pid=pid, ppid=ppid, - ) + # XXX: This is not right, but we are really out of options. + # ps does not offer a sane way to decode the argument display, + # and this is "Good Enough" for obtaining shell names. Hopefully + # people don't name their shell with a space, or have something + # like "/usr/bin/xonsh is uber". (sarugaku/shellingham#14) + args = tuple(a.strip() for a in args.split(' ')) except ValueError: continue + processes[pid] = Process(args=args, pid=pid, ppid=ppid) return processes diff --git a/pipenv/vendor/six.LICENSE b/pipenv/vendor/six.LICENSE index f3068bfd9e..365d10741b 100644 --- a/pipenv/vendor/six.LICENSE +++ b/pipenv/vendor/six.LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2010-2017 Benjamin Peterson +Copyright (c) 2010-2018 Benjamin Peterson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/pipenv/vendor/six.py b/pipenv/vendor/six.py index 6bf4fd3810..89b2188fd6 100644 --- a/pipenv/vendor/six.py +++ b/pipenv/vendor/six.py @@ -1,4 +1,4 @@ -# Copyright (c) 2010-2017 Benjamin Peterson +# Copyright (c) 2010-2018 Benjamin Peterson # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal @@ -29,7 +29,7 @@ import types __author__ = "Benjamin Peterson " -__version__ = "1.11.0" +__version__ = "1.12.0" # Useful for very coarse version differentiation. @@ -844,10 +844,71 @@ def wrapper(cls): orig_vars.pop(slots_var) orig_vars.pop('__dict__', None) orig_vars.pop('__weakref__', None) + if hasattr(cls, '__qualname__'): + orig_vars['__qualname__'] = cls.__qualname__ return metaclass(cls.__name__, cls.__bases__, orig_vars) return wrapper +def ensure_binary(s, encoding='utf-8', errors='strict'): + """Coerce **s** to six.binary_type. + + For Python 2: + - `unicode` -> encoded to `str` + - `str` -> `str` + + For Python 3: + - `str` -> encoded to `bytes` + - `bytes` -> `bytes` + """ + if isinstance(s, text_type): + return s.encode(encoding, errors) + elif isinstance(s, binary_type): + return s + else: + raise TypeError("not expecting type '%s'" % type(s)) + + +def ensure_str(s, encoding='utf-8', errors='strict'): + """Coerce *s* to `str`. + + For Python 2: + - `unicode` -> encoded to `str` + - `str` -> `str` + + For Python 3: + - `str` -> `str` + - `bytes` -> decoded to `str` + """ + if not isinstance(s, (text_type, binary_type)): + raise TypeError("not expecting type '%s'" % type(s)) + if PY2 and isinstance(s, text_type): + s = s.encode(encoding, errors) + elif PY3 and isinstance(s, binary_type): + s = s.decode(encoding, errors) + return s + + +def ensure_text(s, encoding='utf-8', errors='strict'): + """Coerce *s* to six.text_type. + + For Python 2: + - `unicode` -> `unicode` + - `str` -> `unicode` + + For Python 3: + - `str` -> `str` + - `bytes` -> decoded to `str` + """ + if isinstance(s, binary_type): + return s.decode(encoding, errors) + elif isinstance(s, text_type): + return s + else: + raise TypeError("not expecting type '%s'" % type(s)) + + + def python_2_unicode_compatible(klass): """ A decorator that defines __unicode__ and __str__ methods under Python 2. diff --git a/pipenv/vendor/tomlkit/__init__.py b/pipenv/vendor/tomlkit/__init__.py index 92bfa27cbc..9ab90e0a70 100644 --- a/pipenv/vendor/tomlkit/__init__.py +++ b/pipenv/vendor/tomlkit/__init__.py @@ -22,4 +22,4 @@ from .api import ws -__version__ = "0.5.2" +__version__ = "0.5.3" diff --git a/pipenv/vendor/tomlkit/container.py b/pipenv/vendor/tomlkit/container.py index 9b5db5cb66..340491c1d2 100644 --- a/pipenv/vendor/tomlkit/container.py +++ b/pipenv/vendor/tomlkit/container.py @@ -1,5 +1,7 @@ from __future__ import unicode_literals +import copy + from ._compat import decode from .exceptions import KeyAlreadyPresent from .exceptions import NonExistentKey @@ -600,3 +602,16 @@ def __setstate__(self, state): self._map = state[0] self._body = state[1] self._parsed = state[2] + + def copy(self): # type: () -> Container + return copy.copy(self) + + def __copy__(self): # type: () -> Container + c = self.__class__(self._parsed) + for k, v in super(Container, self).copy().items(): + super(Container, c).__setitem__(k, v) + + c._body += self.body + c._map.update(self._map) + + return c diff --git a/pipenv/vendor/tomlkit/items.py b/pipenv/vendor/tomlkit/items.py index cccfd4a18e..f199e8dfbd 100644 --- a/pipenv/vendor/tomlkit/items.py +++ b/pipenv/vendor/tomlkit/items.py @@ -527,7 +527,10 @@ def __add__(self, other): def __sub__(self, other): result = super(DateTime, self).__sub__(other) - return self._new(result) + if isinstance(result, datetime): + result = self._new(result) + + return result def _new(self, result): raw = result.isoformat() diff --git a/pipenv/vendor/tomlkit/source.py b/pipenv/vendor/tomlkit/source.py index dcfdafd0a5..ddb580e466 100644 --- a/pipenv/vendor/tomlkit/source.py +++ b/pipenv/vendor/tomlkit/source.py @@ -45,10 +45,6 @@ def __exit__(self, exception_type, exception_val, trace): if self._save_marker: self._source._marker = self._marker - # Restore exceptions are silently consumed, other exceptions need to - # propagate - return exception_type is None - class _StateHandler: """ diff --git a/pipenv/vendor/urllib3/__init__.py b/pipenv/vendor/urllib3/__init__.py index 75725167e0..148a9c31a7 100644 --- a/pipenv/vendor/urllib3/__init__.py +++ b/pipenv/vendor/urllib3/__init__.py @@ -27,7 +27,7 @@ __author__ = 'Andrey Petrov (andrey.petrov@shazow.net)' __license__ = 'MIT' -__version__ = '1.24' +__version__ = '1.24.1' __all__ = ( 'HTTPConnectionPool', diff --git a/pipenv/vendor/urllib3/response.py b/pipenv/vendor/urllib3/response.py index f0cfbb5499..c112690b0a 100644 --- a/pipenv/vendor/urllib3/response.py +++ b/pipenv/vendor/urllib3/response.py @@ -69,9 +69,9 @@ def __getattr__(self, name): return getattr(self._obj, name) def decompress(self, data): - ret = b'' + ret = bytearray() if self._state == GzipDecoderState.SWALLOW_DATA or not data: - return ret + return bytes(ret) while True: try: ret += self._obj.decompress(data) @@ -81,11 +81,11 @@ def decompress(self, data): self._state = GzipDecoderState.SWALLOW_DATA if previous_state == GzipDecoderState.OTHER_MEMBERS: # Allow trailing garbage acceptable in other gzip clients - return ret + return bytes(ret) raise data = self._obj.unused_data if not data: - return ret + return bytes(ret) self._state = GzipDecoderState.OTHER_MEMBERS self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS) diff --git a/pipenv/vendor/urllib3/util/ssl_.py b/pipenv/vendor/urllib3/util/ssl_.py index 24ee26d632..64ea192a85 100644 --- a/pipenv/vendor/urllib3/util/ssl_.py +++ b/pipenv/vendor/urllib3/util/ssl_.py @@ -263,6 +263,8 @@ def create_urllib3_context(ssl_version=None, cert_reqs=None, """ context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23) + context.set_ciphers(ciphers or DEFAULT_CIPHERS) + # Setting the default here, as we may have no ssl module on import cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs diff --git a/pipenv/vendor/vendor.txt b/pipenv/vendor/vendor.txt index 1a419eef37..f4232b6351 100644 --- a/pipenv/vendor/vendor.txt +++ b/pipenv/vendor/vendor.txt @@ -5,44 +5,43 @@ blindspin==2.0.1 click==7.0 click-completion==0.5.0 click-didyoumean==0.0.3 -colorama==0.3.9 +colorama==0.4.1 delegator.py==0.1.1 pexpect==4.6.0 ptyprocess==0.6.0 -python-dotenv==0.9.1 +python-dotenv==0.10.1 first==2.0.1 iso8601==0.1.12 jinja2==2.10 markupsafe==1.0 parse==1.9.0 -pathlib2==2.3.2 +pathlib2==2.3.3 scandir==1.9 -pipdeptree==0.13.0 +pipdeptree==0.13.1 pipreqs==0.4.9 docopt==0.6.2 yarg==0.1.9 pythonfinder==1.1.10 -requests==2.20.1 +requests==2.21.0 chardet==3.0.4 - idna==2.7 - urllib3==1.24 - certifi==2018.10.15 -requirementslib==1.3.3 + idna==2.8 + urllib3==1.24.1 + certifi==2018.11.29 +requirementslib==1.4.0 attrs==18.2.0 distlib==0.2.8 - packaging==18.0 - pyparsing==2.2.2 + packaging==19.0 + pyparsing==2.3.1 git+https://github.com/sarugaku/plette.git@master#egg=plette - tomlkit==0.5.2 -shellingham==1.2.7 -six==1.11.0 + tomlkit==0.5.3 +shellingham==1.2.8 +six==1.12.0 semver==2.8.1 shutilwhich==1.1.0 toml==0.10.0 -cached-property==1.4.3 -vistir==0.2.5 +cached-property==1.5.1 +vistir==0.3.0 pip-shims==0.3.2 -ptyprocess==0.6.0 enum34==1.1.6 yaspin==0.14.0 cerberus==1.2 @@ -50,3 +49,5 @@ git+https://github.com/sarugaku/passa.git@master#egg=passa cursor==1.2.0 resolvelib==0.2.2 backports.functools_lru_cache==1.5 +pep517==0.5.0 + pytoml==0.1.20 diff --git a/pipenv/vendor/vistir/__init__.py b/pipenv/vendor/vistir/__init__.py index 4c5c906142..9bb369bb74 100644 --- a/pipenv/vendor/vistir/__init__.py +++ b/pipenv/vendor/vistir/__init__.py @@ -6,6 +6,7 @@ TemporaryDirectory, partialmethod, to_native_string, + StringIO, ) from .contextmanagers import ( atomic_open_for_write, @@ -14,6 +15,7 @@ temp_environ, temp_path, spinner, + replaced_stream ) from .misc import ( load_path, @@ -26,12 +28,14 @@ take, chunked, divide, + get_wrapped_stream, + StreamWrapper ) from .path import mkdir_p, rmtree, create_tracked_tempdir, create_tracked_tempfile -from .spin import VistirSpinner, create_spinner +from .spin import create_spinner -__version__ = '0.2.5' +__version__ = '0.3.0' __all__ = [ @@ -50,7 +54,6 @@ "NamedTemporaryFile", "partialmethod", "spinner", - "VistirSpinner", "create_spinner", "create_tracked_tempdir", "create_tracked_tempfile", @@ -61,4 +64,8 @@ "take", "chunked", "divide", + "StringIO", + "get_wrapped_stream", + "StreamWrapper", + "replaced_stream" ] diff --git a/pipenv/vendor/vistir/compat.py b/pipenv/vendor/vistir/compat.py index 27d1e75cee..260959e136 100644 --- a/pipenv/vendor/vistir/compat.py +++ b/pipenv/vendor/vistir/compat.py @@ -1,6 +1,7 @@ # -*- coding=utf-8 -*- -from __future__ import absolute_import, unicode_literals +from __future__ import absolute_import, print_function, unicode_literals +import codecs import errno import os import sys @@ -19,7 +20,6 @@ "JSONDecodeError", "FileNotFoundError", "ResourceWarning", - "FileNotFoundError", "PermissionError", "IsADirectoryError", "fs_str", @@ -27,6 +27,15 @@ "TemporaryDirectory", "NamedTemporaryFile", "to_native_string", + "Iterable", + "Mapping", + "Sequence", + "Set", + "ItemsView", + "fs_encode", + "fs_decode", + "_fs_encode_errors", + "_fs_decode_errors" ] if sys.version_info >= (3, 5): @@ -47,20 +56,22 @@ try: from weakref import finalize except ImportError: - from pipenv.vendor.backports.weakref import finalize + from pipenv.vendor.backports.weakref import finalize # type: ignore try: from functools import partialmethod except Exception: - from .backports.functools import partialmethod + from .backports.functools import partialmethod # type: ignore try: from json import JSONDecodeError except ImportError: # Old Pythons. - JSONDecodeError = ValueError + JSONDecodeError = ValueError # type: ignore if six.PY2: + from io import BytesIO as StringIO + class ResourceWarning(Warning): pass @@ -80,12 +91,24 @@ class IsADirectoryError(OSError): """The command does not work on directories""" pass -else: - from builtins import ResourceWarning, FileNotFoundError, PermissionError, IsADirectoryError - -six.add_move(six.MovedAttribute("Iterable", "collections", "collections.abc")) -from six.moves import Iterable + class FileExistsError(OSError): + def __init__(self, *args, **kwargs): + self.errno = errno.EEXIST + super(FileExistsError, self).__init__(*args, **kwargs) +else: + from builtins import ( + ResourceWarning, FileNotFoundError, PermissionError, IsADirectoryError, + FileExistsError + ) + from io import StringIO + +six.add_move(six.MovedAttribute("Iterable", "collections", "collections.abc")) # type: ignore +six.add_move(six.MovedAttribute("Mapping", "collections", "collections.abc")) # type: ignore +six.add_move(six.MovedAttribute("Sequence", "collections", "collections.abc")) # type: ignore +six.add_move(six.MovedAttribute("Set", "collections", "collections.abc")) # type: ignore +six.add_move(six.MovedAttribute("ItemsView", "collections", "collections.abc")) # type: ignore +from six.moves import Iterable, Mapping, Sequence, Set, ItemsView # type: ignore # noqa if not sys.warnoptions: warnings.simplefilter("default", ResourceWarning) @@ -179,13 +202,82 @@ def fs_str(string): Borrowed from pip-tools """ + if isinstance(string, str): return string assert not isinstance(string, bytes) return string.encode(_fs_encoding) -_fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding() +def _get_path(path): + """ + Fetch the string value from a path-like object + + Returns **None** if there is no string value. + """ + + if isinstance(path, (six.string_types, bytes)): + return path + path_type = type(path) + try: + path_repr = path_type.__fspath__(path) + except AttributeError: + return + if isinstance(path_repr, (six.string_types, bytes)): + return path_repr + return + + +def fs_encode(path): + """ + Encode a filesystem path to the proper filesystem encoding + + :param Union[str, bytes] path: A string-like path + :returns: A bytes-encoded filesystem path representation + """ + + path = _get_path(path) + if path is None: + raise TypeError("expected a valid path to encode") + if isinstance(path, six.text_type): + path = path.encode(_fs_encoding, _fs_encode_errors) + return path + + +def fs_decode(path): + """ + Decode a filesystem path using the proper filesystem encoding + + :param path: The filesystem path to decode from bytes or string + :return: [description] + :rtype: [type] + """ + + path = _get_path(path) + if path is None: + raise TypeError("expected a valid path to decode") + if isinstance(path, six.binary_type): + path = path.decode(_fs_encoding, _fs_decode_errors) + return path + + +if sys.version_info >= (3, 3) and os.name != "nt": + _fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding() +else: + _fs_encoding = "utf-8" + +if six.PY3: + if os.name == "nt": + _fs_error_fn = None + alt_strategy = "surrogatepass" + else: + alt_strategy = "surrogateescape" + _fs_error_fn = getattr(sys, "getfilesystemencodeerrors", None) + _fs_encode_errors = _fs_error_fn() if _fs_error_fn is not None else alt_strategy + _fs_decode_errors = _fs_error_fn() if _fs_error_fn is not None else alt_strategy +else: + _fs_encode_errors = "backslashreplace" + _fs_decode_errors = "replace" def to_native_string(string): diff --git a/pipenv/vendor/vistir/contextmanagers.py b/pipenv/vendor/vistir/contextmanagers.py index 77fbb9df38..b2627d2d11 100644 --- a/pipenv/vendor/vistir/contextmanagers.py +++ b/pipenv/vendor/vistir/contextmanagers.py @@ -1,5 +1,5 @@ # -*- coding=utf-8 -*- -from __future__ import absolute_import, unicode_literals +from __future__ import absolute_import, unicode_literals, print_function import io import os @@ -15,7 +15,8 @@ __all__ = [ - "temp_environ", "temp_path", "cd", "atomic_open_for_write", "open_file", "spinner" + "temp_environ", "temp_path", "cd", "atomic_open_for_write", "open_file", "spinner", + "dummy_spinner", "replaced_stream" ] @@ -286,3 +287,56 @@ def open_file(link, session=None, stream=True): if conn is not None: conn.close() result.close() + + +@contextmanager +def replaced_stream(stream_name): + """ + Context manager to temporarily swap out *stream_name* with a stream wrapper. + + :param str stream_name: The name of a sys stream to wrap + :returns: A ``StreamWrapper`` replacement, temporarily + + >>> orig_stdout = sys.stdout + >>> with replaced_stream("stdout") as stdout: + ... sys.stdout.write("hello") + ... assert stdout.getvalue() == "hello" + + >>> sys.stdout.write("hello") + 'hello' + """ + orig_stream = getattr(sys, stream_name) + new_stream = six.StringIO() + try: + setattr(sys, stream_name, new_stream) + yield getattr(sys, stream_name) + finally: + setattr(sys, stream_name, orig_stream) + + +@contextmanager +def replaced_streams(): + """ + Context manager to replace both ``sys.stdout`` and ``sys.stderr`` using + ``replaced_stream`` + + returns: *(stdout, stderr)* + + >>> import sys + >>> with vistir.contextmanagers.replaced_streams() as streams: + >>> stdout, stderr = streams + >>> sys.stderr.write("test") + >>> sys.stdout.write("hello") + >>> assert stdout.getvalue() == "hello" + >>> assert stderr.getvalue() == "test" + + >>> stdout.getvalue() + 'hello' + + >>> stderr.getvalue() + 'test' + """ + + with replaced_stream("stdout") as stdout: + with replaced_stream("stderr") as stderr: + yield (stdout, stderr) diff --git a/pipenv/vendor/vistir/misc.py b/pipenv/vendor/vistir/misc.py index 11480e2fb9..cf30123363 100644 --- a/pipenv/vendor/vistir/misc.py +++ b/pipenv/vendor/vistir/misc.py @@ -1,6 +1,7 @@ # -*- coding=utf-8 -*- -from __future__ import absolute_import, unicode_literals +from __future__ import absolute_import, unicode_literals, print_function +import io import json import logging import locale @@ -15,7 +16,7 @@ import six from .cmdparse import Script -from .compat import Path, fs_str, partialmethod, to_native_string, Iterable +from .compat import Path, fs_str, partialmethod, to_native_string, Iterable, StringIO from .contextmanagers import spinner as spinner if os.name != "nt": @@ -38,6 +39,9 @@ class WindowsError(OSError): "divide", "getpreferredencoding", "decode_for_output", + "get_canonical_encoding_name", + "get_wrapped_stream", + "StreamWrapper", ] @@ -159,7 +163,10 @@ def _create_subprocess( c = _spawn_subprocess(cmd, env=env, block=block, cwd=cwd, combine_stderr=combine_stderr) except Exception as exc: - sys.stderr.write("Error %s while executing command %s", exc, " ".join(cmd._parts)) + import traceback + formatted_tb = "".join(traceback.format_exception(*sys.exc_info())) + sys.stderr.write("Error while executing command %s:" % " ".join(cmd._parts)) + sys.stderr.write(formatted_tb) raise if not block: c.stdin.close() @@ -279,14 +286,11 @@ def run( _env = os.environ.copy() if env: _env.update(env) - env = _env if six.PY2: fs_encode = partial(to_bytes, encoding=locale_encoding) - _env = {fs_encode(k): fs_encode(v) for k, v in os.environ.items()} - for key, val in env.items(): - _env[fs_encode(key)] = fs_encode(val) + _env = {fs_encode(k): fs_encode(v) for k, v in _env.items()} else: - _env = {k: fs_str(v) for k, v in os.environ.items()} + _env = {k: fs_str(v) for k, v in _env.items()} if not spinner_name: spinner_name = "bouncingBar" if six.PY2: @@ -315,7 +319,6 @@ def run( ) - def load_path(python): """Load the :mod:`sys.path` from the given python executable's environment as json @@ -329,7 +332,7 @@ def load_path(python): python = Path(python).as_posix() out, err = run([python, "-c", "import json, sys; print(json.dumps(sys.path))"], - nospin=True) + nospin=True) if out: return json.loads(out) else: @@ -515,19 +518,184 @@ def getpreferredencoding(): PREFERRED_ENCODING = getpreferredencoding() -def decode_for_output(output): +def get_output_encoding(source_encoding): + """ + Given a source encoding, determine the preferred output encoding. + + :param str source_encoding: The encoding of the source material. + :returns: The output encoding to decode to. + :rtype: str + """ + + if source_encoding is not None: + if get_canonical_encoding_name(source_encoding) == 'ascii': + return 'utf-8' + return get_canonical_encoding_name(source_encoding) + return get_canonical_encoding_name(PREFERRED_ENCODING) + + +def _encode(output, encoding=None, errors=None, translation_map=None): + if encoding is None: + encoding = PREFERRED_ENCODING + try: + output = output.encode(encoding) + except (UnicodeDecodeError, UnicodeEncodeError): + if translation_map is not None: + if six.PY2: + output = unicode.translate( + to_text(output, encoding=encoding, errors=errors), translation_map + ) + else: + output = output.translate(translation_map) + else: + output = to_text(output, encoding=encoding, errors=errors) + except AttributeError: + pass + return output + + +def decode_for_output(output, target_stream=None, translation_map=None): """Given a string, decode it for output to a terminal :param str output: A string to print to a terminal + :param target_stream: A stream to write to, we will encode to target this stream if possible. + :param dict translation_map: A mapping of unicode character ordinals to replacement strings. :return: A re-encoded string using the preferred encoding :rtype: str """ if not isinstance(output, six.string_types): return output + encoding = None + if target_stream is not None: + encoding = getattr(target_stream, "encoding", None) + encoding = get_output_encoding(encoding) try: - output = output.encode(PREFERRED_ENCODING) - except AttributeError: - pass - output = output.decode(PREFERRED_ENCODING) - return output + output = _encode(output, encoding=encoding, translation_map=translation_map) + except (UnicodeDecodeError, UnicodeEncodeError): + output = _encode(output, encoding=encoding, errors="replace", + translation_map=translation_map) + return to_text(output, encoding=encoding, errors="replace") + + +def get_canonical_encoding_name(name): + # type: (str) -> str + """ + Given an encoding name, get the canonical name from a codec lookup. + + :param str name: The name of the codec to lookup + :return: The canonical version of the codec name + :rtype: str + """ + + import codecs + try: + codec = codecs.lookup(name) + except LookupError: + return name + else: + return codec.name + + +def get_wrapped_stream(stream): + """ + Given a stream, wrap it in a `StreamWrapper` instance and return the wrapped stream. + + :param stream: A stream instance to wrap + :returns: A new, wrapped stream + :rtype: :class:`StreamWrapper` + """ + + if stream is None: + raise TypeError("must provide a stream to wrap") + encoding = getattr(stream, "encoding", None) + encoding = get_output_encoding(encoding) + return StreamWrapper(stream, encoding, "replace", line_buffering=True) + + +class StreamWrapper(io.TextIOWrapper): + + """ + This wrapper class will wrap a provided stream and supply an interface + for compatibility. + """ + + def __init__(self, stream, encoding, errors, line_buffering=True, **kwargs): + self._stream = stream = _StreamProvider(stream) + io.TextIOWrapper.__init__( + self, stream, encoding, errors, line_buffering=line_buffering, **kwargs + ) + + # borrowed from click's implementation of stream wrappers, see + # https://github.com/pallets/click/blob/6cafd32/click/_compat.py#L64 + if six.PY2: + def write(self, x): + if isinstance(x, (str, buffer, bytearray)): + try: + self.flush() + except Exception: + pass + return self.buffer.write(str(x)) + return io.TextIOWrapper.write(self, x) + + def writelines(self, lines): + for line in lines: + self.write(line) + + def __del__(self): + try: + self.detach() + except Exception: + pass + + def isatty(self): + return self._stream.isatty() + + +# More things borrowed from click, this is because we are using `TextIOWrapper` instead of +# just a normal StringIO +class _StreamProvider(object): + def __init__(self, stream): + self._stream = stream + super(_StreamProvider, self).__init__() + + def __getattr__(self, name): + return getattr(self._stream, name) + + def read1(self, size): + fn = getattr(self._stream, "read1", None) + if fn is not None: + return fn(size) + if six.PY2: + return self._stream.readline(size) + return self._stream.read(size) + + def readable(self): + fn = getattr(self._stream, "readable", None) + if fn is not None: + return fn() + try: + self._stream.read(0) + except Exception: + return False + return True + + def writable(self): + fn = getattr(self._stream, "writable", None) + if fn is not None: + return fn() + try: + self._stream.write(b"") + except Exception: + return False + return True + + def seekable(self): + fn = getattr(self._stream, "seekable", None) + if fn is not None: + return fn() + try: + self._stream.seek(self._stream.tell()) + except Exception: + return False + return True diff --git a/pipenv/vendor/vistir/path.py b/pipenv/vendor/vistir/path.py index 6e9a7f654f..d3c8befe8a 100644 --- a/pipenv/vendor/vistir/path.py +++ b/pipenv/vendor/vistir/path.py @@ -23,6 +23,8 @@ TemporaryDirectory, _fs_encoding, finalize, + fs_decode, + fs_encode ) @@ -195,9 +197,8 @@ def is_readonly_path(fn): Permissions check is `bool(path.stat & stat.S_IREAD)` or `not os.access(path, os.W_OK)` """ - from .compat import to_native_string - fn = to_native_string(fn) + fn = fs_encode(fn) if os.path.exists(fn): file_stat = os.stat(fn).st_mode return not bool(file_stat & stat.S_IWRITE) or not os.access(fn, os.W_OK) @@ -212,20 +213,19 @@ def mkdir_p(newdir, mode=0o777): :raises: OSError if a file is encountered along the way """ # http://code.activestate.com/recipes/82465-a-friendly-mkdir/ - from .misc import to_bytes, to_text - newdir = to_bytes(newdir, "utf-8") + newdir = fs_encode(newdir) if os.path.exists(newdir): if not os.path.isdir(newdir): raise OSError( "a file with the same name as the desired dir, '{0}', already exists.".format( - newdir + fs_decode(newdir) ) ) else: - head, tail = os.path.split(to_bytes(newdir, encoding="utf-8")) + head, tail = os.path.split(newdir) # Make sure the tail doesn't point to the asame place as the head - curdir = to_bytes(".", encoding="utf-8") + curdir = fs_encode(".") tail_and_head_match = ( os.path.relpath(tail, start=os.path.basename(head)) == curdir ) @@ -234,7 +234,7 @@ def mkdir_p(newdir, mode=0o777): if os.path.exists(target) and os.path.isfile(target): raise OSError( "A file with the same name as the desired dir, '{0}', already exists.".format( - to_text(newdir, encoding="utf-8") + fs_decode(newdir) ) ) os.makedirs(os.path.join(head, tail), mode) @@ -296,9 +296,7 @@ def set_write_bit(fn): :param str fn: The target filename or path """ - from .compat import to_native_string - - fn = to_native_string(fn) + fn = fs_encode(fn) if not os.path.exists(fn): return file_stat = os.stat(fn).st_mode @@ -330,9 +328,7 @@ def rmtree(directory, ignore_errors=False, onerror=None): Setting `ignore_errors=True` may cause this to silently fail to delete the path """ - from .compat import to_native_string - - directory = to_native_string(directory) + directory = fs_encode(directory) if onerror is None: onerror = handle_remove_readonly try: @@ -341,9 +337,8 @@ def rmtree(directory, ignore_errors=False, onerror=None): ) except (IOError, OSError, FileNotFoundError) as exc: # Ignore removal failures where the file doesn't exist - if exc.errno == errno.ENOENT: - pass - raise + if exc.errno != errno.ENOENT: + raise def handle_remove_readonly(func, path, exc): @@ -361,7 +356,7 @@ def handle_remove_readonly(func, path, exc): """ # Check for read-only attribute from .compat import ( - ResourceWarning, FileNotFoundError, PermissionError, to_native_string + ResourceWarning, FileNotFoundError, PermissionError ) PERM_ERRORS = (errno.EACCES, errno.EPERM, errno.ENOENT) @@ -370,7 +365,6 @@ def handle_remove_readonly(func, path, exc): ) # split the initial exception out into its type, exception, and traceback exc_type, exc_exception, exc_tb = exc - path = to_native_string(path) if is_readonly_path(path): # Apply write permission and call original function set_write_bit(path) diff --git a/pipenv/vendor/vistir/spin.py b/pipenv/vendor/vistir/spin.py index a2455b7d1f..a9275055cf 100644 --- a/pipenv/vendor/vistir/spin.py +++ b/pipenv/vendor/vistir/spin.py @@ -1,4 +1,5 @@ # -*- coding=utf-8 -*- +from __future__ import absolute_import, print_function import functools import os @@ -8,41 +9,72 @@ import time import colorama -import cursor import six from .compat import to_native_string from .termcolors import COLOR_MAP, COLORS, colored, DISABLE_COLORS +from .misc import decode_for_output from io import StringIO try: import yaspin + import cursor except ImportError: yaspin = None Spinners = None + SpinBase = None + cursor = None else: - from yaspin.spinners import Spinners + import yaspin.spinners + import yaspin.core + Spinners = yaspin.spinners.Spinners + SpinBase = yaspin.core.Yaspin + +if os.name == "nt": + def handler(signum, frame, spinner): + """Signal handler, used to gracefully shut down the ``spinner`` instance + when specified signal is received by the process running the ``spinner``. + + ``signum`` and ``frame`` are mandatory arguments. Check ``signal.signal`` + function for more details. + """ + spinner.fail() + spinner.stop() + sys.exit(0) + +else: + def handler(signum, frame, spinner): + """Signal handler, used to gracefully shut down the ``spinner`` instance + when specified signal is received by the process running the ``spinner``. -handler = None -if yaspin and os.name == "nt": - handler = yaspin.signal_handlers.default_handler -elif yaspin and os.name != "nt": - handler = yaspin.signal_handlers.fancy_handler + ``signum`` and ``frame`` are mandatory arguments. Check ``signal.signal`` + function for more details. + """ + spinner.red.fail("✘") + spinner.stop() + sys.exit(0) CLEAR_LINE = chr(27) + "[K" +TRANSLATION_MAP = { + 10004: u"OK", + 10008: u"x", +} + + +decode_output = functools.partial(decode_for_output, translation_map=TRANSLATION_MAP) + class DummySpinner(object): def __init__(self, text="", **kwargs): - super(DummySpinner, self).__init__() if DISABLE_COLORS: colorama.init() - from .misc import decode_for_output - self.text = to_native_string(decode_for_output(text)) if text else "" + self.text = to_native_string(decode_output(text)) if text else "" self.stdout = kwargs.get("stdout", sys.stdout) self.stderr = kwargs.get("stderr", sys.stderr) self.out_buff = StringIO() self.write_to_stdout = kwargs.get("write_to_stdout", False) + super(DummySpinner, self).__init__() def __enter__(self): if self.text and self.text != "None": @@ -50,11 +82,11 @@ def __enter__(self): self.write(self.text) return self - def __exit__(self, exc_type, exc_val, traceback): + def __exit__(self, exc_type, exc_val, tb): if exc_type: import traceback - from .misc import decode_for_output - self.write_err(decode_for_output(traceback.format_exception(*sys.exc_info()))) + formatted_tb = traceback.format_exception(exc_type, exc_val, tb) + self.write_err("".join(formatted_tb)) self._close_output_buffer() return False @@ -76,56 +108,63 @@ def _close_output_buffer(self): pass def fail(self, exitcode=1, text="FAIL"): - from .misc import decode_for_output - if text and text != "None": + if text is not None and text != "None": if self.write_to_stdout: - self.write(decode_for_output(text)) + self.write(text) else: - self.write_err(decode_for_output(text)) + self.write_err(text) self._close_output_buffer() def ok(self, text="OK"): - if text and text != "None": + if text is not None and text != "None": if self.write_to_stdout: - self.stdout.write(self.text) + self.write(text) else: - self.stderr.write(self.text) + self.write_err(text) self._close_output_buffer() return 0 def hide_and_write(self, text, target=None): if not target: target = self.stdout - from .misc import decode_for_output if text is None or isinstance(text, six.string_types) and text == "None": pass - target.write(decode_for_output("\r")) + target.write(decode_output("\r", target_stream=target)) self._hide_cursor(target=target) - target.write(decode_for_output("{0}\n".format(text))) + target.write(decode_output("{0}\n".format(text), target_stream=target)) target.write(CLEAR_LINE) self._show_cursor(target=target) def write(self, text=None): if not self.write_to_stdout: return self.write_err(text) - from .misc import decode_for_output if text is None or isinstance(text, six.string_types) and text == "None": pass - text = decode_for_output(text) - self.stdout.write(decode_for_output("\r")) - line = decode_for_output("{0}\n".format(text)) - self.stdout.write(line) - self.stdout.write(CLEAR_LINE) + if not self.stdout.closed: + stdout = self.stdout + else: + stdout = sys.stdout + text = decode_output(text, target_stream=stdout) + stdout.write(decode_output("\r", target_stream=stdout)) + line = decode_output("{0}\n".format(text), target_stream=stdout) + stdout.write(line) + stdout.write(CLEAR_LINE) def write_err(self, text=None): - from .misc import decode_for_output if text is None or isinstance(text, six.string_types) and text == "None": pass - text = decode_for_output(text) - self.stderr.write(decode_for_output("\r")) - line = decode_for_output("{0}\n".format(text)) - self.stderr.write(line) - self.stderr.write(CLEAR_LINE) + if not self.stderr.closed: + stderr = self.stderr + else: + if sys.stderr.closed: + print(text) + return + stderr = sys.stderr + text = decode_output(text, target_stream=stderr) + stderr.write(decode_output("\r", target_stream=stderr)) + line = decode_output("{0}\n".format(text), target_stream=stderr) + stderr.write(line) + stderr.write(CLEAR_LINE) @staticmethod def _hide_cursor(target=None): @@ -136,10 +175,11 @@ def _show_cursor(target=None): pass -base_obj = yaspin.core.Yaspin if yaspin is not None else DummySpinner +if SpinBase is None: + SpinBase = DummySpinner -class VistirSpinner(base_obj): +class VistirSpinner(SpinBase): "A spinner class for handling spinners on windows and posix." def __init__(self, *args, **kwargs): @@ -182,6 +222,8 @@ def __init__(self, *args, **kwargs): self.write_to_stdout = write_to_stdout self.is_dummy = bool(yaspin is None) super(VistirSpinner, self).__init__(*args, **kwargs) + if DISABLE_COLORS: + colorama.deinit() def ok(self, text="OK", err=False): """Set Ok (success) finalizer to a spinner.""" @@ -204,38 +246,40 @@ def fail(self, text="FAIL", err=False): def hide_and_write(self, text, target=None): if not target: target = self.stdout - from .misc import decode_for_output if text is None or isinstance(text, six.string_types) and text == "None": pass - target.write(decode_for_output("\r")) + target.write(decode_output("\r")) self._hide_cursor(target=target) - target.write(decode_for_output("{0}\n".format(text))) + target.write(decode_output("{0}\n".format(text))) target.write(CLEAR_LINE) self._show_cursor(target=target) def write(self, text): if not self.write_to_stdout: return self.write_err(text) - from .misc import to_text - sys.stdout.write("\r") - self.stdout.write(CLEAR_LINE) + stdout = self.stdout + if self.stdout.closed: + stdout = sys.stdout + stdout.write(decode_output("\r", target_stream=stdout)) + stdout.write(decode_output(CLEAR_LINE, target_stream=stdout)) if text is None: text = "" - text = to_native_string("{0}\n".format(text)) - self.stdout.write(text) - self.out_buff.write(to_text(text)) + text = decode_output("{0}\n".format(text), target_stream=stdout) + stdout.write(text) + self.out_buff.write(decode_output(text, target_stream=self.out_buff)) def write_err(self, text): """Write error text in the terminal without breaking the spinner.""" - from .misc import to_text - - self.stderr.write("\r") - self.stderr.write(CLEAR_LINE) + stderr = self.stderr + if self.stderr.closed: + stderr = sys.stderr + stderr.write(decode_output("\r", target_stream=stderr)) + stderr.write(decode_output(CLEAR_LINE, target_stream=stderr)) if text is None: text = "" - text = to_native_string("{0}\n".format(text)) + text = decode_output("{0}\n".format(text), target_stream=stderr) self.stderr.write(text) - self.out_buff.write(to_text(text)) + self.out_buff.write(decode_output(text, target_stream=self.out_buff)) def start(self): if self._sigmap: @@ -270,26 +314,22 @@ def stop(self): if target.isatty(): self._show_cursor(target=target) - if self.stderr and self.stderr != sys.stderr: - self.stderr.close() - if self.stdout and self.stdout != sys.stdout: - self.stdout.close() self.out_buff.close() def _freeze(self, final_text, err=False): """Stop spinner, compose last frame and 'freeze' it.""" if not final_text: final_text = "" - text = to_native_string(final_text) + target = self.stderr if err else self.stdout + if target.closed: + target = sys.stderr if err else sys.stdout + text = decode_output(final_text, target_stream=target) self._last_frame = self._compose_out(text, mode="last") # Should be stopped here, otherwise prints after # self._freeze call will mess up the spinner self.stop() - if err or not self.write_to_stdout: - self.stderr.write(self._last_frame) - else: - self.stdout.write(self._last_frame) + target.write(self._last_frame) def _compose_color_func(self): fn = functools.partial( @@ -303,19 +343,19 @@ def _compose_color_func(self): def _compose_out(self, frame, mode=None): # Ensure Unicode input - frame = to_native_string(frame) + frame = decode_output(frame) if self._text is None: self._text = "" - text = to_native_string(self._text) + text = decode_output(self._text) if self._color_func is not None: frame = self._color_func(frame) if self._side == "right": frame, text = text, frame # Mode if not mode: - out = to_native_string("\r{0} {1}".format(frame, text)) + out = decode_output("\r{0} {1}".format(frame, text)) else: - out = to_native_string("{0} {1}\n".format(frame, text)) + out = decode_output("{0} {1}\n".format(frame, text)) return out def _spin(self):