From 68029650bea6b3fbb5ae286d505b7e5f960df8af Mon Sep 17 00:00:00 2001 From: Arnon Yaari Date: Tue, 28 Dec 2021 12:57:11 +0200 Subject: [PATCH 001/113] Fix inconsistency in dependency resolution documentation --- docs/html/topics/dependency-resolution.md | 54 ++++++++++++----------- 1 file changed, 28 insertions(+), 26 deletions(-) diff --git a/docs/html/topics/dependency-resolution.md b/docs/html/topics/dependency-resolution.md index 6deb71366e5..a1e7dee8d33 100644 --- a/docs/html/topics/dependency-resolution.md +++ b/docs/html/topics/dependency-resolution.md @@ -173,22 +173,24 @@ When you get a `ResolutionImpossible` error, you might see something like this: ```{pip-cli} -$ pip install "pytest < 4.6" pytest-cov==2.12.1 +$ pip install package_coffee==0.44.1 package_tea==4.3.0 [regular pip output] -ERROR: Cannot install pytest-cov==2.12.1 and pytest<4.6 because these package versions have conflicting dependencies. +ERROR: Cannot install package_coffee==0.44.1 and package_tea==4.3.0 because these package versions have conflicting dependencies. The conflict is caused by: - The user requested pytest<4.6 - pytest-cov 2.12.1 depends on pytest>=4.6 + package_coffee 0.44.1 depends on package_water<3.0.0,>=2.4.2 + package_tea 4.3.0 depends on package_water==2.3.1 ``` -In this example, pip cannot install the packages requested because they are -asking for conflicting versions of pytest. +In this example, pip cannot install the packages you have requested, +because they each depend on different versions of the same package +(``package_water``): -- `pytest-cov` version `2.12.1`, requires `pytest` with a version or equal to - `4.6`. -- `package_tea` version `4.3.0` depends on version `2.3.1` of - `package_water` +- ``package_coffee`` version ``0.44.1`` depends on a version of + ``package_water`` that is less than ``3.0.0`` but greater than or equal to + ``2.4.2`` +- ``package_tea`` version ``4.3.0`` depends on version ``2.3.1`` of + ``package_water`` Sometimes these messages are straightforward to read, because they use commonly understood comparison operators to specify the required version @@ -197,16 +199,16 @@ commonly understood comparison operators to specify the required version However, Python packaging also supports some more complex ways for specifying package versions (e.g. `~=` or `*`): -| Operator | Description | Example | -| -------- | -------------------------------------------------------------- | --------------------------------------------------- | -| `>` | Any version greater than the specified version. | `>3.1`: any version greater than `3.1`. | -| `<` | Any version less than the specified version. | `<3.1`: any version less than `3.1`. | -| `<=` | Any version less than or equal to the specified version. | `<=3.1`: any version less than or equal to `3.1`. | -| `>=` | Any version greater than or equal to the specified version. | `>=3.1`: version `3.1` and greater. | -| `==` | Exactly the specified version. | `==3.1`: only `3.1`. | -| `!=` | Any version not equal to the specified version. | `!=3.1`: any version other than `3.1`. | -| `~=` | Any compatible{sup}`1` version. | `~=3.1`: any version compatible{sup}`1` with `3.1`. | -| `*` | Can be used at the end of a version number to represent _all_. | `==3.1.*`: any version that starts with `3.1`. | +| Operator | Description | Example | +| -------- | -------------------------------------------------------------- | ---------------------------------------------------- | +| `>` | Any version greater than the specified version. | `>3.1`: any version greater than `3.1`. | +| `<` | Any version less than the specified version. | `<3.1`: any version less than `3.1`. | +| `<=` | Any version less than or equal to the specified version. | `<=3.1`: any version less than or equal to `3.1`. | +| `>=` | Any version greater than or equal to the specified version. | `>=3.1`: any version greater than or equal to `3.1`. | +| `==` | Exactly the specified version. | `==3.1`: only version `3.1`. | +| `!=` | Any version not equal to the specified version. | `!=3.1`: any version other than `3.1`. | +| `~=` | Any compatible{sup}`1` version. | `~=3.1`: any version compatible{sup}`1` with `3.1`. | +| `*` | Can be used at the end of a version number to represent _all_. | `==3.1.*`: any version that starts with `3.1`. | {sup}`1` Compatible versions are higher versions that only differ in the final segment. `~=3.1.2` is equivalent to `>=3.1.2, ==3.1.*`. `~=3.1` is equivalent to `>=3.1, ==3.*`. @@ -235,7 +237,7 @@ package version. In our first example both `package_coffee` and `package_tea` have been _pinned_ to use specific versions -(`package_coffee==0.44.1b0 package_tea==4.3.0`). +(`package_coffee==0.44.1 package_tea==4.3.0`). To find a version of both `package_coffee` and `package_tea` that depend on the same version of `package_water`, you might consider: @@ -250,20 +252,20 @@ In the second case, pip will automatically find a version of both `package_coffee` and `package_tea` that depend on the same version of `package_water`, installing: -- `package_coffee 0.46.0b0`, which depends on `package_water 2.6.1` -- `package_tea 4.3.0` which _also_ depends on `package_water 2.6.1` +- `package_coffee 0.44.1`, which depends on `package_water 2.6.1` +- `package_tea 4.4.3` which _also_ depends on `package_water 2.6.1` If you want to prioritize one package over another, you can add version specifiers to _only_ the more important package: ```{pip-cli} -$ pip install package_coffee==0.44.1b0 package_tea +$ pip install package_coffee==0.44.1 package_tea ``` This will result in: -- `package_coffee 0.44.1b0`, which depends on `package_water 2.6.1` -- `package_tea 4.1.3` which also depends on `package_water 2.6.1` +- `package_coffee 0.44.1`, which depends on `package_water 2.6.1` +- `package_tea 4.4.3` which _also_ depends on `package_water 2.6.1` Now that you have resolved the issue, you can repin the compatible package versions as required. From 932d83e747d3cf3dacadcc257354b6d38954724c Mon Sep 17 00:00:00 2001 From: Arnon Yaari Date: Tue, 28 Dec 2021 14:48:43 +0200 Subject: [PATCH 002/113] add trivial news entry --- news/10751.trivial.rst | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 news/10751.trivial.rst diff --git a/news/10751.trivial.rst b/news/10751.trivial.rst new file mode 100644 index 00000000000..e69de29bb2d From 8802ce9ed6a1f77f69b95ca653683e71159ff8e3 Mon Sep 17 00:00:00 2001 From: Ikko Ashimine Date: Thu, 29 Sep 2022 12:29:42 +0900 Subject: [PATCH 003/113] Fix typo in test_provider.py transative -> transitive --- tests/unit/resolution_resolvelib/test_provider.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/unit/resolution_resolvelib/test_provider.py b/tests/unit/resolution_resolvelib/test_provider.py index ab1dc74caa3..77d1d299a0e 100644 --- a/tests/unit/resolution_resolvelib/test_provider.py +++ b/tests/unit/resolution_resolvelib/test_provider.py @@ -50,29 +50,29 @@ def test_provider_known_depths(factory: Factory) -> None: ) assert provider._known_depths == {root_requirement_name: 1.0} - # Transative requirement is a dependency of root requirement + # Transitive requirement is a dependency of root requirement # theforefore has an inferred depth of 2 root_package_candidate = InstallationCandidate( root_requirement_name, "1.0", Link("https://{root_requirement_name}.com"), ) - transative_requirement_name = "my-transitive-package" + transitive_requirement_name = "my-transitive-package" - transative_package_information = build_requirement_information( - name=transative_requirement_name, parent=root_package_candidate + transitive_package_information = build_requirement_information( + name=transitive_requirement_name, parent=root_package_candidate ) provider.get_preference( - identifier=transative_requirement_name, + identifier=transitive_requirement_name, resolutions={}, candidates={}, information={ root_requirement_name: root_requirement_information, - transative_requirement_name: transative_package_information, + transitive_requirement_name: transitive_package_information, }, backtrack_causes=[], ) assert provider._known_depths == { - transative_requirement_name: 2.0, + transitive_requirement_name: 2.0, root_requirement_name: 1.0, } From 66903a32ff62fe5b51265ef49f01ce8fb1ec74dc Mon Sep 17 00:00:00 2001 From: Jakub Kuczys Date: Sun, 21 May 2023 00:01:30 +0200 Subject: [PATCH 004/113] Fix direct usage of pip.pyz example --- docs/html/installation.md | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/docs/html/installation.md b/docs/html/installation.md index 036a91397a5..126d7fdbd1e 100644 --- a/docs/html/installation.md +++ b/docs/html/installation.md @@ -67,9 +67,17 @@ $ python pip.pyz --help If run directly: -```{pip-cli} -$ pip.pyz --help +````{tab} Unix/macOS +```shell +./pip.pyz +``` +```` + +````{tab} Windows +```shell +.\pip.pyz ``` +```` then the currently active Python interpreter will be used. From 5102ca458a23c1945c99f96b25c50c9ac8be6a31 Mon Sep 17 00:00:00 2001 From: Jakub Kuczys Date: Sun, 21 May 2023 00:10:59 +0200 Subject: [PATCH 005/113] Create 12043.doc.rst --- news/12043.doc.rst | 1 + 1 file changed, 1 insertion(+) create mode 100644 news/12043.doc.rst diff --git a/news/12043.doc.rst b/news/12043.doc.rst new file mode 100644 index 00000000000..2c77f9b59dc --- /dev/null +++ b/news/12043.doc.rst @@ -0,0 +1 @@ +Fix the direct usage of zipapp showing up as ``python -m pip.pyz`` rather than ``./pip.pyz`` / ``.\pip.pyz`` From 881da76e64923234b93a795b80f06356e266b90c Mon Sep 17 00:00:00 2001 From: Jakub Kuczys Date: Sun, 21 May 2023 00:16:12 +0200 Subject: [PATCH 006/113] Update installation.md --- docs/html/installation.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/html/installation.md b/docs/html/installation.md index 126d7fdbd1e..e047b70158b 100644 --- a/docs/html/installation.md +++ b/docs/html/installation.md @@ -68,14 +68,14 @@ $ python pip.pyz --help If run directly: ````{tab} Unix/macOS -```shell -./pip.pyz +```console +$ ./pip.pyz ``` ```` ````{tab} Windows -```shell -.\pip.pyz +```console +C:\> .\pip.pyz ``` ```` From 3db673f31e694f8b1eafb6126d0bf43bf79ddd22 Mon Sep 17 00:00:00 2001 From: Jakub Kuczys Date: Sun, 21 May 2023 15:29:51 +0200 Subject: [PATCH 007/113] Update installation.md --- docs/html/installation.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/docs/html/installation.md b/docs/html/installation.md index e047b70158b..a179e145a92 100644 --- a/docs/html/installation.md +++ b/docs/html/installation.md @@ -67,8 +67,16 @@ $ python pip.pyz --help If run directly: -````{tab} Unix/macOS +````{tab} Linux ```console +$ chmod +x ./pip.pyz +$ ./pip.pyz +``` +```` + +````{tab} MacOS +```console +$ chmod +x ./pip.pyz $ ./pip.pyz ``` ```` From 39431b5db17dbd49e453ee78922b1ec271964f4d Mon Sep 17 00:00:00 2001 From: Jakub Kuczys Date: Sun, 28 May 2023 21:49:41 +0200 Subject: [PATCH 008/113] Use a single tab for both commands --- docs/html/installation.md | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/docs/html/installation.md b/docs/html/installation.md index a179e145a92..fee8a24807f 100644 --- a/docs/html/installation.md +++ b/docs/html/installation.md @@ -61,34 +61,40 @@ also zip applications for specific pip versions, named `pip-X.Y.Z.pyz`. The zip application can be run using any supported version of Python: -```{pip-cli} +````{tab} Linux +```console $ python pip.pyz --help ``` If run directly: - -````{tab} Linux ```console $ chmod +x ./pip.pyz $ ./pip.pyz ``` + +then the currently active Python interpreter will be used. ```` ````{tab} MacOS ```console +$ python pip.pyz --help +``` + +If run directly: +```console $ chmod +x ./pip.pyz $ ./pip.pyz ``` + +then the currently active Python interpreter will be used. ```` ````{tab} Windows ```console -C:\> .\pip.pyz +C:> py pip.pyz --help ``` ```` -then the currently active Python interpreter will be used. - ## Alternative Methods Depending on how you installed Python, there might be other mechanisms From b1ea8c149905ec28956a54f4f11a13c5d8234ca3 Mon Sep 17 00:00:00 2001 From: Jakub Kuczys Date: Sun, 28 May 2023 22:47:30 +0200 Subject: [PATCH 009/113] Revert "Use a single tab for both commands" This reverts commit 39431b5db17dbd49e453ee78922b1ec271964f4d. --- docs/html/installation.md | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/docs/html/installation.md b/docs/html/installation.md index fee8a24807f..a179e145a92 100644 --- a/docs/html/installation.md +++ b/docs/html/installation.md @@ -61,40 +61,34 @@ also zip applications for specific pip versions, named `pip-X.Y.Z.pyz`. The zip application can be run using any supported version of Python: -````{tab} Linux -```console +```{pip-cli} $ python pip.pyz --help ``` If run directly: + +````{tab} Linux ```console $ chmod +x ./pip.pyz $ ./pip.pyz ``` - -then the currently active Python interpreter will be used. ```` ````{tab} MacOS ```console -$ python pip.pyz --help -``` - -If run directly: -```console $ chmod +x ./pip.pyz $ ./pip.pyz ``` - -then the currently active Python interpreter will be used. ```` ````{tab} Windows ```console -C:> py pip.pyz --help +C:\> .\pip.pyz ``` ```` +then the currently active Python interpreter will be used. + ## Alternative Methods Depending on how you installed Python, there might be other mechanisms From cebb9f1bf9361fe5b569a2ffc4e6a1b48737d1a8 Mon Sep 17 00:00:00 2001 From: Jakub Kuczys Date: Sun, 28 May 2023 22:49:17 +0200 Subject: [PATCH 010/113] Add a note to Windows invocation --- docs/html/installation.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/docs/html/installation.md b/docs/html/installation.md index a179e145a92..6403c9e564b 100644 --- a/docs/html/installation.md +++ b/docs/html/installation.md @@ -72,6 +72,8 @@ If run directly: $ chmod +x ./pip.pyz $ ./pip.pyz ``` + +then the currently active Python interpreter will be used. ```` ````{tab} MacOS @@ -79,15 +81,19 @@ $ ./pip.pyz $ chmod +x ./pip.pyz $ ./pip.pyz ``` + +then the currently active Python interpreter will be used. ```` ````{tab} Windows ```console C:\> .\pip.pyz ``` -```` then the currently active Python interpreter will be used. +You may need to configure your system to recognise the ``.pyz`` extension +before this will work. +```` ## Alternative Methods From b96b21de1e0015ed40fe81925a33269fa48cd0cd Mon Sep 17 00:00:00 2001 From: Jakub Kuczys Date: Sun, 28 May 2023 23:12:24 +0200 Subject: [PATCH 011/113] Add newline to make it look less weird when changing tabs? --- docs/html/installation.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/html/installation.md b/docs/html/installation.md index 6403c9e564b..91f4c9cf8b4 100644 --- a/docs/html/installation.md +++ b/docs/html/installation.md @@ -91,6 +91,7 @@ C:\> .\pip.pyz ``` then the currently active Python interpreter will be used. + You may need to configure your system to recognise the ``.pyz`` extension before this will work. ```` From 8fe45a1e08c4b4de8d86d584dff06bc801639be6 Mon Sep 17 00:00:00 2001 From: Jakub Kuczys Date: Sun, 28 May 2023 23:15:32 +0200 Subject: [PATCH 012/113] Try fixing italics *again* --- docs/html/installation.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/html/installation.md b/docs/html/installation.md index 91f4c9cf8b4..3502f87e7a1 100644 --- a/docs/html/installation.md +++ b/docs/html/installation.md @@ -86,8 +86,8 @@ then the currently active Python interpreter will be used. ```` ````{tab} Windows -```console -C:\> .\pip.pyz +```shell +C:> .\pip.pyz ``` then the currently active Python interpreter will be used. From 8901f8750c90c92c6722ab49771efc69927d61a4 Mon Sep 17 00:00:00 2001 From: Jakub Kuczys Date: Sun, 28 May 2023 23:19:11 +0200 Subject: [PATCH 013/113] Remove the code block syntax altogether --- docs/html/installation.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/html/installation.md b/docs/html/installation.md index 3502f87e7a1..24fb3f8694b 100644 --- a/docs/html/installation.md +++ b/docs/html/installation.md @@ -86,7 +86,7 @@ then the currently active Python interpreter will be used. ```` ````{tab} Windows -```shell +``` C:> .\pip.pyz ``` From a3ce5aa1deb78c3e94bb58df42d61db50067fa45 Mon Sep 17 00:00:00 2001 From: Jakub Kuczys Date: Sun, 28 May 2023 23:26:38 +0200 Subject: [PATCH 014/113] Try doscon --- docs/html/installation.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/html/installation.md b/docs/html/installation.md index 24fb3f8694b..2fc550dfdb6 100644 --- a/docs/html/installation.md +++ b/docs/html/installation.md @@ -86,7 +86,7 @@ then the currently active Python interpreter will be used. ```` ````{tab} Windows -``` +```doscon C:> .\pip.pyz ``` From 2d6e6bec03156784c9275316a42fa2bab0f8d98d Mon Sep 17 00:00:00 2001 From: Jakub Kuczys Date: Sun, 28 May 2023 23:29:48 +0200 Subject: [PATCH 015/113] Check one thing about the prompt --- docs/html/installation.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/html/installation.md b/docs/html/installation.md index 2fc550dfdb6..07973ab6cde 100644 --- a/docs/html/installation.md +++ b/docs/html/installation.md @@ -87,7 +87,7 @@ then the currently active Python interpreter will be used. ````{tab} Windows ```doscon -C:> .\pip.pyz +C:\> .\pip.pyz ``` then the currently active Python interpreter will be used. From 67f79f2a3944218b87db9600a251a858d948889a Mon Sep 17 00:00:00 2001 From: Jakub Kuczys Date: Sun, 28 May 2023 23:32:35 +0200 Subject: [PATCH 016/113] Revert "Check one thing about the prompt" This reverts commit 2d6e6bec03156784c9275316a42fa2bab0f8d98d. --- docs/html/installation.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/html/installation.md b/docs/html/installation.md index 07973ab6cde..2fc550dfdb6 100644 --- a/docs/html/installation.md +++ b/docs/html/installation.md @@ -87,7 +87,7 @@ then the currently active Python interpreter will be used. ````{tab} Windows ```doscon -C:\> .\pip.pyz +C:> .\pip.pyz ``` then the currently active Python interpreter will be used. From 57af702500e30c3b592e7c6cc9eff8ea742de779 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20G=C3=B3rny?= Date: Sun, 2 Jul 2023 14:37:02 +0200 Subject: [PATCH 017/113] Add a warning to "Fallback behavior" doc snippet Add a warning noting that the "Fallback behavior" doc snippet does not provide a good example of what projects put in `pyproject.toml` and instead link to the respective setuptools documentation. In my experience, more than one project has either copied the snippets verbatim, or incorrectly included `wheel` in their `requires` based on it. --- docs/html/reference/build-system/pyproject-toml.md | 7 +++++++ news/12122.doc.rst | 3 +++ 2 files changed, 10 insertions(+) create mode 100644 news/12122.doc.rst diff --git a/docs/html/reference/build-system/pyproject-toml.md b/docs/html/reference/build-system/pyproject-toml.md index a42a3b8c484..525bca5021f 100644 --- a/docs/html/reference/build-system/pyproject-toml.md +++ b/docs/html/reference/build-system/pyproject-toml.md @@ -130,6 +130,13 @@ dealing with [the same challenges as pip has for legacy builds](build-output). ## Fallback Behaviour +```{warning} +The following snippet merely describes the fallback behavior. For valid +examples of `pyproject.toml` to use with setuptools, please refer to +[the setuptools documentation]( +https://setuptools.pypa.io/en/stable/userguide/quickstart.html#basic-use). +``` + If a project does not have a `pyproject.toml` file containing a `build-system` section, it will be assumed to have the following backend settings: diff --git a/news/12122.doc.rst b/news/12122.doc.rst new file mode 100644 index 00000000000..dbc0ebb53fd --- /dev/null +++ b/news/12122.doc.rst @@ -0,0 +1,3 @@ +Add a warning explaining that the snippet in "Fallback behavior" is not a valid +``pyproject.toml`` snippet for projects, and link to setuptools documentation +instead. From 170b2c18ae08e4f174fec539e3992e8f0c2c825d Mon Sep 17 00:00:00 2001 From: hauntsaninja Date: Sat, 23 Sep 2023 17:53:13 -0700 Subject: [PATCH 018/113] Use strict optional checking in legacy resolver The assert in _set_req_to_reinstall is definitely correct, the three call sites have guards The assert in _populate_link is a little trickier. It's not obvious to me how to prove it will never trigger. Note that if link were ever None, it could potentially cause AttributeError in Cache._get_cache_path_parts and direct_url_from_link The assert in _resolve_one is safe, if req_to_install.name was None it would raise TypeError in canonicalize_name I can't prove that req.name is not None in get_installation_order and the code would work fine if that were the case (since it's a defaultdict), so I opted to just ignore --- news/4CCE4788-B8B3-402E-9A88-2981AD074999.trivial.rst | 0 src/pip/_internal/resolution/legacy/resolver.py | 7 +++++-- 2 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 news/4CCE4788-B8B3-402E-9A88-2981AD074999.trivial.rst diff --git a/news/4CCE4788-B8B3-402E-9A88-2981AD074999.trivial.rst b/news/4CCE4788-B8B3-402E-9A88-2981AD074999.trivial.rst new file mode 100644 index 00000000000..e69de29bb2d diff --git a/src/pip/_internal/resolution/legacy/resolver.py b/src/pip/_internal/resolution/legacy/resolver.py index b17b7e4530b..9277e960dd3 100644 --- a/src/pip/_internal/resolution/legacy/resolver.py +++ b/src/pip/_internal/resolution/legacy/resolver.py @@ -11,7 +11,6 @@ """ # The following comment should be removed at some point in the future. -# mypy: strict-optional=False import logging import sys @@ -325,6 +324,7 @@ def _set_req_to_reinstall(self, req: InstallRequirement) -> None: """ # Don't uninstall the conflict if doing a user install and the # conflict is not a user install. + assert req.satisfied_by is not None if not self.use_user_site or req.satisfied_by.in_usersite: req.should_reinstall = True req.satisfied_by = None @@ -423,6 +423,8 @@ def _populate_link(self, req: InstallRequirement) -> None: if self.wheel_cache is None or self.preparer.require_hashes: return + + assert req.link is not None, "_find_requirement_link unexpectedly returned None" cache_entry = self.wheel_cache.get_cache_entry( link=req.link, package_name=req.name, @@ -536,6 +538,7 @@ def add_req(subreq: Requirement, extras_requested: Iterable[str]) -> None: with indent_log(): # We add req_to_install before its dependencies, so that we # can refer to it when adding dependencies. + assert req_to_install.name is not None if not requirement_set.has_requirement(req_to_install.name): # 'unnamed' requirements will get added here # 'unnamed' requirements can only come from being directly @@ -591,7 +594,7 @@ def schedule(req: InstallRequirement) -> None: if req.constraint: return ordered_reqs.add(req) - for dep in self._discovered_dependencies[req.name]: + for dep in self._discovered_dependencies[req.name]: # type: ignore[index] schedule(dep) order.append(req) From e261330e65e8f24ca412710dd78af0d4d26d577d Mon Sep 17 00:00:00 2001 From: hauntsaninja Date: Mon, 25 Sep 2023 12:00:51 -0700 Subject: [PATCH 019/113] old comment --- src/pip/_internal/resolution/legacy/resolver.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/pip/_internal/resolution/legacy/resolver.py b/src/pip/_internal/resolution/legacy/resolver.py index 9277e960dd3..b0fe2323b49 100644 --- a/src/pip/_internal/resolution/legacy/resolver.py +++ b/src/pip/_internal/resolution/legacy/resolver.py @@ -10,8 +10,6 @@ a. "first found, wins" (where the order is breadth first) """ -# The following comment should be removed at some point in the future. - import logging import sys from collections import defaultdict From 96424886eb3608fd1844a604bd4bedf04fde42c1 Mon Sep 17 00:00:00 2001 From: Richard Si Date: Mon, 15 Apr 2024 20:30:02 -0400 Subject: [PATCH 020/113] Gracefully skip VCS detection on broken PATH --- news/12567.bugfix.rst | 1 + src/pip/_internal/vcs/versioncontrol.py | 2 ++ tests/unit/test_vcs.py | 3 ++- 3 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 news/12567.bugfix.rst diff --git a/news/12567.bugfix.rst b/news/12567.bugfix.rst new file mode 100644 index 00000000000..55c6a93d6b9 --- /dev/null +++ b/news/12567.bugfix.rst @@ -0,0 +1 @@ +Gracefully skip VCS detection in pip freeze when PATH points to a non-directory path. diff --git a/src/pip/_internal/vcs/versioncontrol.py b/src/pip/_internal/vcs/versioncontrol.py index 7eef8ec898e..c0eb12ff428 100644 --- a/src/pip/_internal/vcs/versioncontrol.py +++ b/src/pip/_internal/vcs/versioncontrol.py @@ -652,6 +652,8 @@ def run_command( log_failed_cmd=log_failed_cmd, stdout_only=stdout_only, ) + except NotADirectoryError: + raise BadCommand(f"Cannot find command {cls.name!r} - invalid PATH") except FileNotFoundError: # errno.ENOENT = no such file or directory # In other words, the VCS executable isn't available diff --git a/tests/unit/test_vcs.py b/tests/unit/test_vcs.py index a52a6217e77..9fe63cc62e6 100644 --- a/tests/unit/test_vcs.py +++ b/tests/unit/test_vcs.py @@ -444,8 +444,9 @@ def test_version_control__get_url_rev_and_auth__no_revision(url: str) -> None: [ (FileNotFoundError, r"Cannot find command '{name}'"), (PermissionError, r"No permission to execute '{name}'"), + (NotADirectoryError, "Cannot find command '{name}' - invalid PATH"), ], - ids=["FileNotFoundError", "PermissionError"], + ids=["FileNotFoundError", "PermissionError", "NotADirectoryError"], ) def test_version_control__run_command__fails( vcs_cls: Type[VersionControl], exc_cls: Type[Exception], msg_re: str From 516007147dbe2be3f6d391272043300dc627a0ce Mon Sep 17 00:00:00 2001 From: Richard Si Date: Wed, 13 Mar 2024 11:27:03 -0400 Subject: [PATCH 021/113] Add codespell pre-commit hook --- .pre-commit-config.yaml | 7 +++++++ news/287f037c-108f-48dd-80a0-489921a6b2f3.trivial.rst | 1 + tools/codespell-ignore.txt | 0 3 files changed, 8 insertions(+) create mode 100644 news/287f037c-108f-48dd-80a0-489921a6b2f3.trivial.rst create mode 100644 tools/codespell-ignore.txt diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b932b146f18..939ddead334 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -54,6 +54,13 @@ repos: types: [file] exclude: NEWS.rst # The errors flagged in NEWS.rst are old. +- repo: https://github.com/codespell-project/codespell + rev: v2.2.6 + hooks: + - id: codespell + exclude: AUTHORS.txt|tests/data + args: ["--ignore-words", tools/codespell-ignore.txt] + - repo: local hooks: - id: news-fragment-filenames diff --git a/news/287f037c-108f-48dd-80a0-489921a6b2f3.trivial.rst b/news/287f037c-108f-48dd-80a0-489921a6b2f3.trivial.rst new file mode 100644 index 00000000000..446d4f9fe9a --- /dev/null +++ b/news/287f037c-108f-48dd-80a0-489921a6b2f3.trivial.rst @@ -0,0 +1 @@ +Add codespell pre-commit hook to catch common misspellings. diff --git a/tools/codespell-ignore.txt b/tools/codespell-ignore.txt new file mode 100644 index 00000000000..e69de29bb2d From 75b5cd38857a1a9ec202a7607a9391c345a39423 Mon Sep 17 00:00:00 2001 From: Richard Si Date: Mon, 15 Apr 2024 21:28:51 -0400 Subject: [PATCH 022/113] Address current codespell errors --- NEWS.rst | 2 +- src/pip/_internal/cache.py | 2 +- src/pip/_internal/resolution/resolvelib/candidates.py | 2 +- src/pip/_internal/utils/_jaraco_text.py | 2 +- tests/conftest.py | 2 +- tests/functional/test_install.py | 4 ++-- tests/functional/test_new_resolver_user.py | 2 +- tests/unit/test_collector.py | 2 +- tools/codespell-ignore.txt | 8 ++++++++ 9 files changed, 17 insertions(+), 9 deletions(-) diff --git a/NEWS.rst b/NEWS.rst index 6b1dff81e80..ce6d8e6dd3d 100644 --- a/NEWS.rst +++ b/NEWS.rst @@ -304,7 +304,7 @@ Improved Documentation - Cross-reference the ``--python`` flag from the ``--prefix`` flag, and mention limitations of ``--prefix`` regarding script installation. (`#11775 `_) -- Add SECURITY.md to make the policy offical. (`#11809 `_) +- Add SECURITY.md to make the policy official. (`#11809 `_) - Add username to Git over SSH example. (`#11838 `_) - Quote extras in the pip install docs to guard shells with default glob qualifiers, like zsh. (`#11842 `_) diff --git a/src/pip/_internal/cache.py b/src/pip/_internal/cache.py index f45ac23e95a..6b4512672db 100644 --- a/src/pip/_internal/cache.py +++ b/src/pip/_internal/cache.py @@ -44,7 +44,7 @@ def _get_cache_path_parts(self, link: Link) -> List[str]: """Get parts of part that must be os.path.joined with cache_dir""" # We want to generate an url to use as our cache key, we don't want to - # just re-use the URL because it might have other items in the fragment + # just reuse the URL because it might have other items in the fragment # and we don't care about those. key_parts = {"url": link.url_without_fragment} if link.hash_name is not None and link.hash is not None: diff --git a/src/pip/_internal/resolution/resolvelib/candidates.py b/src/pip/_internal/resolution/resolvelib/candidates.py index 4125cda2b7c..e0fa0bc6986 100644 --- a/src/pip/_internal/resolution/resolvelib/candidates.py +++ b/src/pip/_internal/resolution/resolvelib/candidates.py @@ -520,7 +520,7 @@ def _warn_invalid_extras( def _calculate_valid_requested_extras(self) -> FrozenSet[str]: """Get a list of valid extras requested by this candidate. - The user (or upstream dependant) may have specified extras that the + The user (or upstream dependent) may have specified extras that the candidate doesn't support. Any unsupported extras are dropped, and each cause a warning to be logged here. """ diff --git a/src/pip/_internal/utils/_jaraco_text.py b/src/pip/_internal/utils/_jaraco_text.py index e06947c051a..6ccf53b7ac5 100644 --- a/src/pip/_internal/utils/_jaraco_text.py +++ b/src/pip/_internal/utils/_jaraco_text.py @@ -88,7 +88,7 @@ def join_continuation(lines): ['foobarbaz'] Not sure why, but... - The character preceeding the backslash is also elided. + The character preceding the backslash is also elided. >>> list(join_continuation(['goo\\', 'dly'])) ['godly'] diff --git a/tests/conftest.py b/tests/conftest.py index 4ede5c70fc1..35101cef2c3 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -495,7 +495,7 @@ def virtualenv_template( dist_info, venv.site / dist_info.name, dirs_exist_ok=True, symlinks=True ) # Create placeholder ``easy-install.pth``, as several tests depend on its - # existance. TODO: Ensure ``tests.lib.TestPipResult.files_updated`` correctly + # existence. TODO: Ensure ``tests.lib.TestPipResult.files_updated`` correctly # detects changed files. venv.site.joinpath("easy-install.pth").touch() diff --git a/tests/functional/test_install.py b/tests/functional/test_install.py index c013f2eaf72..b65212f929c 100644 --- a/tests/functional/test_install.py +++ b/tests/functional/test_install.py @@ -1731,7 +1731,7 @@ def test_install_builds_wheels(script: PipTestEnvironment, data: TestData) -> No assert "Building wheel for wheelb" in str(res), str(res) assert "Failed to build wheelbroken" in str(res), str(res) # Wheels are built for local directories, but not cached. - assert "Building wheel for requir" in str(res), str(res) + assert "Building wheel for require" in str(res), str(res) # into the cache assert wheels != [], str(res) assert wheels == [ @@ -1754,7 +1754,7 @@ def test_install_no_binary_builds_wheels( ) # Wheels are built for all requirements assert "Building wheel for wheelb" in str(res), str(res) - assert "Building wheel for requir" in str(res), str(res) + assert "Building wheel for require" in str(res), str(res) assert "Building wheel for upper" in str(res), str(res) # Wheelbroken failed to build assert "Failed to build wheelbroken" in str(res), str(res) diff --git a/tests/functional/test_new_resolver_user.py b/tests/functional/test_new_resolver_user.py index 4cd06311348..1660924f28c 100644 --- a/tests/functional/test_new_resolver_user.py +++ b/tests/functional/test_new_resolver_user.py @@ -27,7 +27,7 @@ def test_new_resolver_install_user_satisfied_by_global_site( script: PipTestEnvironment, ) -> None: """ - An install a matching version to user site should re-use a global site + An install a matching version to user site should reuse a global site installation if it satisfies. """ create_basic_wheel_for_package(script, "base", "1.0.0") diff --git a/tests/unit/test_collector.py b/tests/unit/test_collector.py index 90064776ef3..7e178a20191 100644 --- a/tests/unit/test_collector.py +++ b/tests/unit/test_collector.py @@ -911,7 +911,7 @@ def test_collect_sources__non_existing_path() -> None: index_url="ignored-by-no-index", extra_index_urls=[], no_index=True, - find_links=[os.path.join("this", "doesnt", "exist")], + find_links=[os.path.join("this", "does", "not", "exist")], ), ) sources = collector.collect_sources( diff --git a/tools/codespell-ignore.txt b/tools/codespell-ignore.txt index e69de29bb2d..95d223b73bd 100644 --- a/tools/codespell-ignore.txt +++ b/tools/codespell-ignore.txt @@ -0,0 +1,8 @@ +# An actual English word +lousily +# A contributor first name +wil +# Codebase variable or class names +uptodate +afile +failer From 38fd2bb224a61812aceeb56c201a30c82dc0f338 Mon Sep 17 00:00:00 2001 From: Richard Si Date: Mon, 15 Apr 2024 21:55:43 -0400 Subject: [PATCH 023/113] Address even more codespell errors (after merging in main) --- .../research-results/improving-pips-documentation.md | 2 +- docs/html/ux-research-design/research-results/personas.md | 2 +- .../research-results/pip-upgrade-conflict.md | 2 +- .../research-results/prioritizing-features.md | 4 ++-- .../ux-research-design/research-results/users-and-security.md | 2 +- tests/functional/test_cli.py | 2 +- tools/codespell-ignore.txt | 1 + 7 files changed, 8 insertions(+), 7 deletions(-) diff --git a/docs/html/ux-research-design/research-results/improving-pips-documentation.md b/docs/html/ux-research-design/research-results/improving-pips-documentation.md index bec2120cda6..765a8f1069a 100644 --- a/docs/html/ux-research-design/research-results/improving-pips-documentation.md +++ b/docs/html/ux-research-design/research-results/improving-pips-documentation.md @@ -36,7 +36,7 @@ We also: 1. Asked for volunteers to participate in a diary study, documenting their experience solving pip problems. Unfortunately this was not completed due to lack of interest from the community. 2. Asked for user feedback on the pip documentation site: ![screenshot of user feedback mechanism on pip docs](https://i.imgur.com/WJVjl8N.png) - Unfortunatly, we did not gather any useful feedback via this effort + Unfortunately, we did not gather any useful feedback via this effort 3. [Installed analytics on the pip docs](https://github.com/pypa/pip/pull/9146). We are waiting for this to be merged and start providing useful data. ## Results diff --git a/docs/html/ux-research-design/research-results/personas.md b/docs/html/ux-research-design/research-results/personas.md index 06b84a8d4ef..e4f1a3df44b 100644 --- a/docs/html/ux-research-design/research-results/personas.md +++ b/docs/html/ux-research-design/research-results/personas.md @@ -180,7 +180,7 @@ Making software was as defined earlier as "are you working on something reusable > "I have written software, sometimes for business and personal reasons. At one point I worked on a django website project, that was being used by 1000s of people. I don't think any of my live projects are based. -> "Most of it is for sysadmin, automation. I lke to use python instead of shell scripting. I manage a server with wordpress sites. I wrote a script to update these sites, mailman list and sql DB management, and for different utilities." **- Participant 240313542** +> "Most of it is for sysadmin, automation. I [like] to use python instead of shell scripting. I manage a server with wordpress sites. I wrote a script to update these sites, mailman list and sql DB management, and for different utilities." **- Participant 240313542** > "I use Python for creating things - like outputs for data scientist, software engineer. I make software to look at patterns, and analyse stuff. I think I'm a maker because someone else is using - they are colleagues. Usually its non-technical colleagues. I produce outputs - make data understandable. They use the results, or a package it behind a flask app. Or analyse graphs." **- Participant 240426799** diff --git a/docs/html/ux-research-design/research-results/pip-upgrade-conflict.md b/docs/html/ux-research-design/research-results/pip-upgrade-conflict.md index 6f35a550ca0..9261a318518 100644 --- a/docs/html/ux-research-design/research-results/pip-upgrade-conflict.md +++ b/docs/html/ux-research-design/research-results/pip-upgrade-conflict.md @@ -64,5 +64,5 @@ From the 407 responses that answered "why" a particular solution was best, the f Based on the results of this research, the pip UX team has made the following recommendations to the development team: - While the current behaviour exists, [warn the user when conflicts are introduced](https://github.com/pypa/pip/issues/7744#issuecomment-717573440) -- [Change the current behaviour](https://github.com/pypa/pip/issues/9094), so that pip takes into account packages that are already installed when upgrading other packages. Show the user a warning when pip anticipates a depdenency conflict (as per option 4) +- [Change the current behaviour](https://github.com/pypa/pip/issues/9094), so that pip takes into account packages that are already installed when upgrading other packages. Show the user a warning when pip anticipates a dependency conflict (as per option 4) - Explore [the possibility of adding additional flags to the upgrade command](https://github.com/pypa/pip/issues/9095), to give users more control diff --git a/docs/html/ux-research-design/research-results/prioritizing-features.md b/docs/html/ux-research-design/research-results/prioritizing-features.md index 12e6b8b244e..3642042f333 100644 --- a/docs/html/ux-research-design/research-results/prioritizing-features.md +++ b/docs/html/ux-research-design/research-results/prioritizing-features.md @@ -85,9 +85,9 @@ Results varied by the amount of Python experience the user had. ![Screenshot of Warn about broken dependencies](https://i.imgur.com/uNv2tnG.png) -#### Upgrade packages to the lastest version +#### Upgrade packages to the latest version -![Screenshot of Upgrade packages to the lastest version](https://i.imgur.com/pQgCLBO.png) +![Screenshot of Upgrade packages to the latest version](https://i.imgur.com/pQgCLBO.png) #### Install packages from an alternative package index, or indexes diff --git a/docs/html/ux-research-design/research-results/users-and-security.md b/docs/html/ux-research-design/research-results/users-and-security.md index d9b79a057ff..fbc8f492f3c 100644 --- a/docs/html/ux-research-design/research-results/users-and-security.md +++ b/docs/html/ux-research-design/research-results/users-and-security.md @@ -62,7 +62,7 @@ Participants who spent a lot of their time writing Python code - either for comm They thought about where the software would be used, who would use it, and possible attack surfaces. -> "On the basic point, I have to think about attack surfaces. If I am writing the thing (software), I have to give a crap. I have to answer the emails! In the code I push to[ pypi.org](http://pypi.org/) I think about it doubley. What could people do with this code? Whether I do a good job, that's different! I am aware of it when publishing it or making it publically available. Whether I do a good job, that's different! I am aware of it when publishing it or making it publically available. I rely on community resources - Python security related, I follow security people blogs, Twitter. I use Hypothesis for fuzz-testing. I also rely on having security policies in place and a reporting mechanism. I steer clear of crypto, I rely on other peoples. There's a certain amount of knowledge in the Python community, I am actively involved in it. If something happens, I will hear about it. I use Twitter, if something happens, in the morning it can take me awhile to figure out what's happened. I have a lot of trust in the ecosystem to be self healing. As long as you don't stray too far-off the reservation (into using odd or uncommon or new packages), it's a better sense of security." **- Participant (data scientist turned Python developer)** +> "On the basic point, I have to think about attack surfaces. If I am writing the thing (software), I have to give a crap. I have to answer the emails! In the code I push to[ pypi.org](http://pypi.org/) I think about it doubley. What could people do with this code? Whether I do a good job, that's different! I am aware of it when publishing it or making it [publicly] available. Whether I do a good job, that's different! I am aware of it when publishing it or making it [publicly] available. I rely on community resources - Python security related, I follow security people blogs, Twitter. I use Hypothesis for fuzz-testing. I also rely on having security policies in place and a reporting mechanism. I steer clear of crypto, I rely on other peoples. There's a certain amount of knowledge in the Python community, I am actively involved in it. If something happens, I will hear about it. I use Twitter, if something happens, in the morning it can take me awhile to figure out what's happened. I have a lot of trust in the ecosystem to be self healing. As long as you don't stray too far-off the reservation (into using odd or uncommon or new packages), it's a better sense of security." **- Participant (data scientist turned Python developer)** > Yes, because I'm liable for that. If the problem is my code, and I deliver something and they get attacked. I'm screwed. **- Participant (professional Python developer and trainer)** diff --git a/tests/functional/test_cli.py b/tests/functional/test_cli.py index de3b1259f7f..3c166152111 100644 --- a/tests/functional/test_cli.py +++ b/tests/functional/test_cli.py @@ -69,7 +69,7 @@ def test_no_network_imports(command: str, tmp_path: Path) -> None: This helps to reduce the startup time of these commands. Note: This won't catch lazy network imports, but it'll catch top-level - network imports which were accidently added (which is the most likely way + network imports which were accidentally added (which is the most likely way to regress anyway). """ file = tmp_path / f"imported_modules_for_{command}.txt" diff --git a/tools/codespell-ignore.txt b/tools/codespell-ignore.txt index 95d223b73bd..288f597353b 100644 --- a/tools/codespell-ignore.txt +++ b/tools/codespell-ignore.txt @@ -1,5 +1,6 @@ # An actual English word lousily +followings # A contributor first name wil # Codebase variable or class names From de278b1b505e6041fb743c2f233bca4999669e6d Mon Sep 17 00:00:00 2001 From: Xianpeng Shen Date: Tue, 16 Apr 2024 15:43:26 +0800 Subject: [PATCH 024/113] add render: shell to bug-report.yml --- .github/ISSUE_TEMPLATE/bug-report.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index e28e5408208..200c7e60856 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -66,6 +66,7 @@ body: the line containing the command with `$ `. Please also ensure that the "How to reproduce" section contains matching instructions for reproducing this. + render: shell - type: checkboxes attributes: From a4155610c608c20afdea04a9a68116ff37fd13e5 Mon Sep 17 00:00:00 2001 From: Xianpeng Shen Date: Tue, 16 Apr 2024 16:17:09 +0800 Subject: [PATCH 025/113] add news file --- news/12630.trivial.rst | 1 + 1 file changed, 1 insertion(+) create mode 100644 news/12630.trivial.rst diff --git a/news/12630.trivial.rst b/news/12630.trivial.rst new file mode 100644 index 00000000000..8beb79ec366 --- /dev/null +++ b/news/12630.trivial.rst @@ -0,0 +1 @@ +Add ``render: shell`` to the bug report template to format output as code From db425ce4b49d9ff59954fb5c506bffc5e42f9c0f Mon Sep 17 00:00:00 2001 From: Peter Shen Date: Thu, 18 Apr 2024 10:47:31 +0800 Subject: [PATCH 026/113] update output description --- .github/ISSUE_TEMPLATE/bug-report.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index 200c7e60856..e593031d559 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -60,7 +60,8 @@ body: label: Output description: >- Provide the output of the steps above, including the commands - themselves and pip's output/traceback etc. + themselves and pip's output/traceback etc. + (The output will auto-rendered as code, no need for backticks.) If you want to present output from multiple commands, please prefix the line containing the command with `$ `. Please also ensure that From 7b61a10379169b5f7e8eed042aa16ffd7d76ad7b Mon Sep 17 00:00:00 2001 From: shenxianpeng Date: Thu, 18 Apr 2024 03:00:06 +0000 Subject: [PATCH 027/113] fixed trim trailing whitespace --- .github/ISSUE_TEMPLATE/bug-report.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index e593031d559..20e0a5dd991 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -60,7 +60,7 @@ body: label: Output description: >- Provide the output of the steps above, including the commands - themselves and pip's output/traceback etc. + themselves and pip's output/traceback etc. (The output will auto-rendered as code, no need for backticks.) If you want to present output from multiple commands, please prefix From 77d0ddc82acaeaafbe008f8860f51c146f1d40ee Mon Sep 17 00:00:00 2001 From: Richard Si Date: Fri, 19 Apr 2024 10:27:33 -0400 Subject: [PATCH 028/113] Replace captured_output() and get_url_scheme() with stdlib alternatives (#12639) Co-authored-by: Tzu-ping Chung --- ...e3-4844-4298-a46c-80768b38f652.trivial.rst | 1 + src/pip/_internal/operations/install/wheel.py | 6 ++- src/pip/_internal/req/req_file.py | 4 +- src/pip/_internal/utils/misc.py | 37 ------------------- src/pip/_internal/utils/urls.py | 7 ---- src/pip/_internal/vcs/versioncontrol.py | 5 +-- tests/unit/test_logging.py | 9 +++-- tests/unit/test_urls.py | 16 +------- 8 files changed, 14 insertions(+), 71 deletions(-) create mode 100644 news/c678d9e3-4844-4298-a46c-80768b38f652.trivial.rst diff --git a/news/c678d9e3-4844-4298-a46c-80768b38f652.trivial.rst b/news/c678d9e3-4844-4298-a46c-80768b38f652.trivial.rst new file mode 100644 index 00000000000..8837b629849 --- /dev/null +++ b/news/c678d9e3-4844-4298-a46c-80768b38f652.trivial.rst @@ -0,0 +1 @@ +Replace ``captured_output()`` and ``get_url_scheme()`` with stdlib alternatives. diff --git a/src/pip/_internal/operations/install/wheel.py b/src/pip/_internal/operations/install/wheel.py index f64c3e9c443..a02a193d226 100644 --- a/src/pip/_internal/operations/install/wheel.py +++ b/src/pip/_internal/operations/install/wheel.py @@ -51,7 +51,7 @@ from pip._internal.models.direct_url import DIRECT_URL_METADATA_NAME, DirectUrl from pip._internal.models.scheme import SCHEME_KEYS, Scheme from pip._internal.utils.filesystem import adjacent_tmp_file, replace -from pip._internal.utils.misc import captured_stdout, ensure_dir, hash_file, partition +from pip._internal.utils.misc import StreamWrapper, ensure_dir, hash_file, partition from pip._internal.utils.unpacking import ( current_umask, is_within_directory, @@ -603,7 +603,9 @@ def pyc_output_path(path: str) -> str: # Compile all of the pyc files for the installed files if pycompile: - with captured_stdout() as stdout: + with contextlib.redirect_stdout( + StreamWrapper.from_stream(sys.stdout) + ) as stdout: with warnings.catch_warnings(): warnings.filterwarnings("ignore") for path in pyc_source_file_paths(): diff --git a/src/pip/_internal/req/req_file.py b/src/pip/_internal/req/req_file.py index 6f474bce49a..53ad8674cd8 100644 --- a/src/pip/_internal/req/req_file.py +++ b/src/pip/_internal/req/req_file.py @@ -26,7 +26,6 @@ from pip._internal.exceptions import InstallationError, RequirementsFileParseError from pip._internal.models.search_scope import SearchScope from pip._internal.utils.encoding import auto_decode -from pip._internal.utils.urls import get_url_scheme if TYPE_CHECKING: from pip._internal.index.package_finder import PackageFinder @@ -533,8 +532,7 @@ def get_file_content(url: str, session: "PipSession") -> Tuple[str, str]: :param url: File path or url. :param session: PipSession instance. """ - scheme = get_url_scheme(url) - + scheme = urllib.parse.urlsplit(url).scheme # Pip has special support for file:// URLs (LocalFSAdapter). if scheme in ["http", "https", "file"]: # Delay importing heavy network modules until absolutely necessary. diff --git a/src/pip/_internal/utils/misc.py b/src/pip/_internal/utils/misc.py index a5d17565812..82b6261322d 100644 --- a/src/pip/_internal/utils/misc.py +++ b/src/pip/_internal/utils/misc.py @@ -1,4 +1,3 @@ -import contextlib import errno import getpass import hashlib @@ -21,7 +20,6 @@ Any, BinaryIO, Callable, - ContextManager, Dict, Generator, Iterable, @@ -57,7 +55,6 @@ "normalize_path", "renames", "get_prog", - "captured_stdout", "ensure_dir", "remove_auth_from_url", "check_externally_managed", @@ -400,40 +397,6 @@ def encoding(self) -> str: # type: ignore return self.orig_stream.encoding -@contextlib.contextmanager -def captured_output(stream_name: str) -> Generator[StreamWrapper, None, None]: - """Return a context manager used by captured_stdout/stdin/stderr - that temporarily replaces the sys stream *stream_name* with a StringIO. - - Taken from Lib/support/__init__.py in the CPython repo. - """ - orig_stdout = getattr(sys, stream_name) - setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout)) - try: - yield getattr(sys, stream_name) - finally: - setattr(sys, stream_name, orig_stdout) - - -def captured_stdout() -> ContextManager[StreamWrapper]: - """Capture the output of sys.stdout: - - with captured_stdout() as stdout: - print('hello') - self.assertEqual(stdout.getvalue(), 'hello\n') - - Taken from Lib/support/__init__.py in the CPython repo. - """ - return captured_output("stdout") - - -def captured_stderr() -> ContextManager[StreamWrapper]: - """ - See captured_stdout(). - """ - return captured_output("stderr") - - # Simulates an enum def enum(*sequential: Any, **named: Any) -> Type[Any]: enums = dict(zip(sequential, range(len(sequential))), **named) diff --git a/src/pip/_internal/utils/urls.py b/src/pip/_internal/utils/urls.py index 6ba2e04f350..9f34f882a1a 100644 --- a/src/pip/_internal/utils/urls.py +++ b/src/pip/_internal/utils/urls.py @@ -2,17 +2,10 @@ import string import urllib.parse import urllib.request -from typing import Optional from .compat import WINDOWS -def get_url_scheme(url: str) -> Optional[str]: - if ":" not in url: - return None - return url.split(":", 1)[0].lower() - - def path_to_url(path: str) -> str: """ Convert a path to a file: URL. The path will be made absolute and have diff --git a/src/pip/_internal/vcs/versioncontrol.py b/src/pip/_internal/vcs/versioncontrol.py index ce9cf112d31..bd7d509a3ea 100644 --- a/src/pip/_internal/vcs/versioncontrol.py +++ b/src/pip/_internal/vcs/versioncontrol.py @@ -38,7 +38,6 @@ format_command_args, make_command, ) -from pip._internal.utils.urls import get_url_scheme __all__ = ["vcs"] @@ -52,8 +51,8 @@ def is_url(name: str) -> bool: """ Return true if the name looks like a URL. """ - scheme = get_url_scheme(name) - if scheme is None: + scheme = urllib.parse.urlsplit(name).scheme + if not scheme: return False return scheme in ["http", "https", "file", "ftp"] + vcs.all_schemes diff --git a/tests/unit/test_logging.py b/tests/unit/test_logging.py index 9d507d74277..f673ed29def 100644 --- a/tests/unit/test_logging.py +++ b/tests/unit/test_logging.py @@ -1,5 +1,7 @@ import logging import time +from contextlib import redirect_stderr, redirect_stdout +from io import StringIO from threading import Thread from unittest.mock import patch @@ -11,7 +13,6 @@ RichPipStreamHandler, indent_log, ) -from pip._internal.utils.misc import captured_stderr, captured_stdout logger = logging.getLogger(__name__) @@ -140,7 +141,7 @@ def test_broken_pipe_in_stderr_flush(self) -> None: """ record = self._make_log_record() - with captured_stderr() as stderr: + with redirect_stderr(StringIO()) as stderr: handler = RichPipStreamHandler(stream=stderr, no_color=True) with patch("sys.stderr.flush") as mock_flush: mock_flush.side_effect = BrokenPipeError() @@ -163,7 +164,7 @@ def test_broken_pipe_in_stdout_write(self) -> None: """ record = self._make_log_record() - with captured_stdout() as stdout: + with redirect_stdout(StringIO()) as stdout: handler = RichPipStreamHandler(stream=stdout, no_color=True) with patch("sys.stdout.write") as mock_write: mock_write.side_effect = BrokenPipeError() @@ -178,7 +179,7 @@ def test_broken_pipe_in_stdout_flush(self) -> None: """ record = self._make_log_record() - with captured_stdout() as stdout: + with redirect_stdout(StringIO()) as stdout: handler = RichPipStreamHandler(stream=stdout, no_color=True) with patch("sys.stdout.flush") as mock_flush: mock_flush.side_effect = BrokenPipeError() diff --git a/tests/unit/test_urls.py b/tests/unit/test_urls.py index 56ee80aa802..746d0222425 100644 --- a/tests/unit/test_urls.py +++ b/tests/unit/test_urls.py @@ -1,24 +1,10 @@ import os import sys import urllib.request -from typing import Optional import pytest -from pip._internal.utils.urls import get_url_scheme, path_to_url, url_to_path - - -@pytest.mark.parametrize( - "url,expected", - [ - ("http://localhost:8080/", "http"), - ("file:c:/path/to/file", "file"), - ("file:/dev/null", "file"), - ("", None), - ], -) -def test_get_url_scheme(url: str, expected: Optional[str]) -> None: - assert get_url_scheme(url) == expected +from pip._internal.utils.urls import path_to_url, url_to_path @pytest.mark.skipif("sys.platform == 'win32'") From e84ffb5ea9b321bd09dd37e0a4ad7744f4766d33 Mon Sep 17 00:00:00 2001 From: "S. Guliaev" Date: Wed, 20 Apr 2022 21:11:07 +0300 Subject: [PATCH 029/113] fix svn and bazaar checkout in verbose mode --- news/11050.bugfix.rst | 1 + src/pip/_internal/vcs/bazaar.py | 12 ++++---- src/pip/_internal/vcs/subversion.py | 6 ++-- tests/unit/test_vcs.py | 43 +++++++++++++++++++++++++++++ 4 files changed, 53 insertions(+), 9 deletions(-) create mode 100644 news/11050.bugfix.rst diff --git a/news/11050.bugfix.rst b/news/11050.bugfix.rst new file mode 100644 index 00000000000..f3d1f1ad804 --- /dev/null +++ b/news/11050.bugfix.rst @@ -0,0 +1 @@ +Fix error on checkout for vcs and bazaar with verbose mode on. diff --git a/src/pip/_internal/vcs/bazaar.py b/src/pip/_internal/vcs/bazaar.py index 20a17ed0927..a67ee3cd447 100644 --- a/src/pip/_internal/vcs/bazaar.py +++ b/src/pip/_internal/vcs/bazaar.py @@ -44,14 +44,14 @@ def fetch_new( display_path(dest), ) if verbosity <= 0: - flag = "--quiet" + flags: Tuple[str, ...] = ("--quiet",) elif verbosity == 1: - flag = "" + flags = () else: - flag = f"-{'v'*verbosity}" - cmd_args = make_command( - "checkout", "--lightweight", flag, rev_options.to_args(), url, dest - ) + flags = ("-{'v'*verbosity}",) + cmd_args = make_command( + "checkout", "--lightweight", *flags, rev_options.to_args(), url, dest + ) self.run_command(cmd_args) def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: diff --git a/src/pip/_internal/vcs/subversion.py b/src/pip/_internal/vcs/subversion.py index 16d93a67b7b..1f05cbdecb6 100644 --- a/src/pip/_internal/vcs/subversion.py +++ b/src/pip/_internal/vcs/subversion.py @@ -288,12 +288,12 @@ def fetch_new( display_path(dest), ) if verbosity <= 0: - flag = "--quiet" + flags: Tuple[str, ...] = ("--quiet",) else: - flag = "" + flags = () cmd_args = make_command( "checkout", - flag, + *flags, self.get_remote_call_options(), rev_options.to_args(), url, diff --git a/tests/unit/test_vcs.py b/tests/unit/test_vcs.py index a52a6217e77..af5f348dc2d 100644 --- a/tests/unit/test_vcs.py +++ b/tests/unit/test_vcs.py @@ -777,6 +777,22 @@ def assert_call_args(self, args: CommandArgs) -> None: assert self.call_subprocess_mock.call_args[0][0] == args def test_obtain(self) -> None: + self.svn.obtain(self.dest, hide_url(self.url), verbosity=1) + self.assert_call_args( + [ + "svn", + "checkout", + "--non-interactive", + "--username", + "username", + "--password", + hide_value("password"), + hide_url("http://svn.example.com/"), + "/tmp/test", + ] + ) + + def test_obtain_quiet(self) -> None: self.svn.obtain(self.dest, hide_url(self.url), verbosity=0) self.assert_call_args( [ @@ -794,6 +810,18 @@ def test_obtain(self) -> None: ) def test_fetch_new(self) -> None: + self.svn.fetch_new(self.dest, hide_url(self.url), self.rev_options, verbosity=1) + self.assert_call_args( + [ + "svn", + "checkout", + "--non-interactive", + hide_url("svn+http://username:password@svn.example.com/"), + "/tmp/test", + ] + ) + + def test_fetch_new_quiet(self) -> None: self.svn.fetch_new(self.dest, hide_url(self.url), self.rev_options, verbosity=0) self.assert_call_args( [ @@ -807,6 +835,21 @@ def test_fetch_new(self) -> None: ) def test_fetch_new_revision(self) -> None: + rev_options = RevOptions(Subversion, "123") + self.svn.fetch_new(self.dest, hide_url(self.url), rev_options, verbosity=1) + self.assert_call_args( + [ + "svn", + "checkout", + "--non-interactive", + "-r", + "123", + hide_url("svn+http://username:password@svn.example.com/"), + "/tmp/test", + ] + ) + + def test_fetch_new_revision_quiet(self) -> None: rev_options = RevOptions(Subversion, "123") self.svn.fetch_new(self.dest, hide_url(self.url), rev_options, verbosity=0) self.assert_call_args( From 5a9cc838172ac82e2c68214532202e8ddfaaa4e6 Mon Sep 17 00:00:00 2001 From: "S. Guliaev" Date: Wed, 20 Apr 2022 22:29:44 +0300 Subject: [PATCH 030/113] fix typo --- news/11050.bugfix.rst => 11051.bugfix.rst | 0 src/pip/_internal/vcs/bazaar.py | 5 +++++ 2 files changed, 5 insertions(+) rename news/11050.bugfix.rst => 11051.bugfix.rst (100%) diff --git a/news/11050.bugfix.rst b/11051.bugfix.rst similarity index 100% rename from news/11050.bugfix.rst rename to 11051.bugfix.rst diff --git a/src/pip/_internal/vcs/bazaar.py b/src/pip/_internal/vcs/bazaar.py index a67ee3cd447..772852bb8bd 100644 --- a/src/pip/_internal/vcs/bazaar.py +++ b/src/pip/_internal/vcs/bazaar.py @@ -48,10 +48,15 @@ def fetch_new( elif verbosity == 1: flags = () else: +<<<<<<< HEAD flags = ("-{'v'*verbosity}",) cmd_args = make_command( "checkout", "--lightweight", *flags, rev_options.to_args(), url, dest ) +======= + flags = (f"-{'v'*verbosity}",) + cmd_args = make_command("branch", *flags, rev_options.to_args(), url, dest) +>>>>>>> 57d0a77e3 (fix typo) self.run_command(cmd_args) def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: From 8051eb4699b4b969b22fa839b03e2d293ae906c9 Mon Sep 17 00:00:00 2001 From: devsagul Date: Thu, 21 Apr 2022 08:36:03 +0300 Subject: [PATCH 031/113] Fix typo in news entry --- 11051.bugfix.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/11051.bugfix.rst b/11051.bugfix.rst index f3d1f1ad804..01aa5ea7b1d 100644 --- a/11051.bugfix.rst +++ b/11051.bugfix.rst @@ -1 +1 @@ -Fix error on checkout for vcs and bazaar with verbose mode on. +Fix error on checkout for subversion and bazaar with verbose mode on. From eb876640bb9b4bba456d5d48ef376f74255671b1 Mon Sep 17 00:00:00 2001 From: "S. Guliaev" Date: Thu, 21 Apr 2022 13:46:26 +0300 Subject: [PATCH 032/113] use lists for args in bazaar and subversion --- 11051.bugfix.rst => news/11050.bugfix.rst | 0 src/pip/_internal/vcs/bazaar.py | 17 ++++++----------- src/pip/_internal/vcs/subversion.py | 4 ++-- 3 files changed, 8 insertions(+), 13 deletions(-) rename 11051.bugfix.rst => news/11050.bugfix.rst (100%) diff --git a/11051.bugfix.rst b/news/11050.bugfix.rst similarity index 100% rename from 11051.bugfix.rst rename to news/11050.bugfix.rst diff --git a/src/pip/_internal/vcs/bazaar.py b/src/pip/_internal/vcs/bazaar.py index 772852bb8bd..c754b7cc5c0 100644 --- a/src/pip/_internal/vcs/bazaar.py +++ b/src/pip/_internal/vcs/bazaar.py @@ -44,19 +44,14 @@ def fetch_new( display_path(dest), ) if verbosity <= 0: - flags: Tuple[str, ...] = ("--quiet",) + flags = ["--quiet"] elif verbosity == 1: - flags = () + flags = [] else: -<<<<<<< HEAD - flags = ("-{'v'*verbosity}",) - cmd_args = make_command( - "checkout", "--lightweight", *flags, rev_options.to_args(), url, dest - ) -======= - flags = (f"-{'v'*verbosity}",) - cmd_args = make_command("branch", *flags, rev_options.to_args(), url, dest) ->>>>>>> 57d0a77e3 (fix typo) + flags = [f"-{'v'*verbosity}"] + cmd_args = make_command( + "checkout", "--lightweight", *flags, rev_options.to_args(), url, dest + ) self.run_command(cmd_args) def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: diff --git a/src/pip/_internal/vcs/subversion.py b/src/pip/_internal/vcs/subversion.py index 1f05cbdecb6..f359266d9c0 100644 --- a/src/pip/_internal/vcs/subversion.py +++ b/src/pip/_internal/vcs/subversion.py @@ -288,9 +288,9 @@ def fetch_new( display_path(dest), ) if verbosity <= 0: - flags: Tuple[str, ...] = ("--quiet",) + flags = ["--quiet"] else: - flags = () + flags = [] cmd_args = make_command( "checkout", *flags, From b03b495053e3c38b697bbed4415a4d5400671b95 Mon Sep 17 00:00:00 2001 From: Richard Si Date: Fri, 19 Apr 2024 18:01:27 -0400 Subject: [PATCH 033/113] Clearly document the legacy resolver as unsupported --- docs/html/user_guide.rst | 6 ++++++ news/b2631650-69cc-4747-8a50-24e574a0ae57.trivial.rst | 0 2 files changed, 6 insertions(+) create mode 100644 news/b2631650-69cc-4747-8a50-24e574a0ae57.trivial.rst diff --git a/docs/html/user_guide.rst b/docs/html/user_guide.rst index 2fa5552b1c5..1f5019e5c10 100644 --- a/docs/html/user_guide.rst +++ b/docs/html/user_guide.rst @@ -1140,6 +1140,12 @@ Since this work will not change user-visible behavior described in the pip documentation, this change is not covered by the :ref:`Deprecation Policy`. +.. attention:: + + The legacy resolver is deprecated and unsupported. New features, such + as :doc:`reference/installation-report`, may not function correctly + with the legacy resolver. **These issues will not be fixed.** + Context and followup -------------------- diff --git a/news/b2631650-69cc-4747-8a50-24e574a0ae57.trivial.rst b/news/b2631650-69cc-4747-8a50-24e574a0ae57.trivial.rst new file mode 100644 index 00000000000..e69de29bb2d From 8def0444f7e1fde204a2495ebc0e7d9eeecc7ada Mon Sep 17 00:00:00 2001 From: Richard Si Date: Fri, 19 Apr 2024 19:39:21 -0400 Subject: [PATCH 034/113] Bring attention to its future removal Co-authored-by: Pradyun Gedam --- docs/html/user_guide.rst | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/html/user_guide.rst b/docs/html/user_guide.rst index 1f5019e5c10..aa2e3ad8e26 100644 --- a/docs/html/user_guide.rst +++ b/docs/html/user_guide.rst @@ -1143,8 +1143,9 @@ Policy`. .. attention:: The legacy resolver is deprecated and unsupported. New features, such - as :doc:`reference/installation-report`, may not function correctly - with the legacy resolver. **These issues will not be fixed.** + as :doc:`reference/installation-report`, will not work with the + legacy resolver and this resolver will be removed in a future + release. Context and followup -------------------- From 2b97317eb8eff5c79667921bf9d27eaec647b5db Mon Sep 17 00:00:00 2001 From: Richard Si Date: Sun, 14 Apr 2024 13:49:24 -0400 Subject: [PATCH 035/113] Bump pre-commit hooks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There was one new mypy error: src/pip/_internal/metadata/_json.py:82: error: Incompatible types in assignment (expression has type "Message | Any | str | list[Message | str]", target has type "str | list[str]") [assignment] result["description"] = payload ^~~~~~~ Found 1 error in 1 file (checked 294 source files) Co-authored-by: Stéphane Bidoul --- .pre-commit-config.yaml | 6 +++--- news/a870a527-a6ea-46fd-b4aa-c0b0d9b669b0.trivial.rst | 1 + src/pip/_internal/metadata/_json.py | 4 ++-- 3 files changed, 6 insertions(+), 5 deletions(-) create mode 100644 news/a870a527-a6ea-46fd-b4aa-c0b0d9b669b0.trivial.rst diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 52650efd54c..bb46b772b0c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,7 +2,7 @@ exclude: 'src/pip/_vendor/' repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.5.0 + rev: v4.6.0 hooks: - id: check-builtin-literals - id: check-added-large-files @@ -22,13 +22,13 @@ repos: - id: black - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.3.6 + rev: v0.4.1 hooks: - id: ruff args: [--fix, --exit-non-zero-on-fix] - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.8.0 + rev: v1.9.0 hooks: - id: mypy exclude: tests/data diff --git a/news/a870a527-a6ea-46fd-b4aa-c0b0d9b669b0.trivial.rst b/news/a870a527-a6ea-46fd-b4aa-c0b0d9b669b0.trivial.rst new file mode 100644 index 00000000000..d9cbf94d387 --- /dev/null +++ b/news/a870a527-a6ea-46fd-b4aa-c0b0d9b669b0.trivial.rst @@ -0,0 +1 @@ +Bump pre-commit hooks. diff --git a/src/pip/_internal/metadata/_json.py b/src/pip/_internal/metadata/_json.py index 27362fc726c..9097dd58590 100644 --- a/src/pip/_internal/metadata/_json.py +++ b/src/pip/_internal/metadata/_json.py @@ -2,7 +2,7 @@ from email.header import Header, decode_header, make_header from email.message import Message -from typing import Any, Dict, List, Union +from typing import Any, Dict, List, Union, cast METADATA_FIELDS = [ # Name, Multiple-Use @@ -77,7 +77,7 @@ def sanitise_header(h: Union[Header, str]) -> str: value = value.split() result[key] = value - payload = msg.get_payload() + payload = cast(str, msg.get_payload()) if payload: result["description"] = payload From c4e03f35d9397aa1a8dd3f59af3d5d3048ccc5ef Mon Sep 17 00:00:00 2001 From: hauntsaninja Date: Sat, 20 Apr 2024 13:39:45 -0700 Subject: [PATCH 036/113] comment --- src/pip/_internal/resolution/legacy/resolver.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pip/_internal/resolution/legacy/resolver.py b/src/pip/_internal/resolution/legacy/resolver.py index b0fe2323b49..cfdbb236468 100644 --- a/src/pip/_internal/resolution/legacy/resolver.py +++ b/src/pip/_internal/resolution/legacy/resolver.py @@ -49,7 +49,7 @@ logger = logging.getLogger(__name__) -DiscoveredDependencies = DefaultDict[str, List[InstallRequirement]] +DiscoveredDependencies = DefaultDict[Optional[str], List[InstallRequirement]] def _check_dist_requires_python( @@ -592,7 +592,7 @@ def schedule(req: InstallRequirement) -> None: if req.constraint: return ordered_reqs.add(req) - for dep in self._discovered_dependencies[req.name]: # type: ignore[index] + for dep in self._discovered_dependencies[req.name]: schedule(dep) order.append(req) From f5480f10c7198bbf74e741e97cb0c6420c0f0ce0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Thu, 28 Dec 2023 12:42:29 +0100 Subject: [PATCH 037/113] Cache is_satisfied_by --- .../resolution/resolvelib/provider.py | 2 ++ .../resolution/resolvelib/requirements.py | 36 +++++++++++++++++-- 2 files changed, 36 insertions(+), 2 deletions(-) diff --git a/src/pip/_internal/resolution/resolvelib/provider.py b/src/pip/_internal/resolution/resolvelib/provider.py index 315fb9c8902..ad9bfb2b97b 100644 --- a/src/pip/_internal/resolution/resolvelib/provider.py +++ b/src/pip/_internal/resolution/resolvelib/provider.py @@ -1,5 +1,6 @@ import collections import math +from functools import lru_cache from typing import ( TYPE_CHECKING, Dict, @@ -236,6 +237,7 @@ def _eligible_for_upgrade(identifier: str) -> bool: incompatibilities=incompatibilities, ) + @lru_cache(maxsize=None) def is_satisfied_by(self, requirement: Requirement, candidate: Candidate) -> bool: return requirement.is_satisfied_by(candidate) diff --git a/src/pip/_internal/resolution/resolvelib/requirements.py b/src/pip/_internal/resolution/resolvelib/requirements.py index 4af4a9f25a6..7f6e72bad18 100644 --- a/src/pip/_internal/resolution/resolvelib/requirements.py +++ b/src/pip/_internal/resolution/resolvelib/requirements.py @@ -1,8 +1,11 @@ +from typing import Any + from pip._vendor.packaging.specifiers import SpecifierSet from pip._vendor.packaging.utils import NormalizedName, canonicalize_name from pip._internal.req.constructors import install_req_drop_extras from pip._internal.req.req_install import InstallRequirement +from pip._internal.utils.models import KeyBasedCompareMixin from .base import Candidate, CandidateLookup, Requirement, format_name @@ -17,6 +20,14 @@ def __str__(self) -> str: def __repr__(self) -> str: return f"{self.__class__.__name__}({self.candidate!r})" + def __hash__(self) -> int: + return hash(self.candidate) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, ExplicitRequirement): + return False + return self.candidate == other.candidate + @property def project_name(self) -> NormalizedName: # No need to canonicalize - the candidate did this @@ -37,11 +48,14 @@ def is_satisfied_by(self, candidate: Candidate) -> bool: return candidate == self.candidate -class SpecifierRequirement(Requirement): +class SpecifierRequirement(Requirement, KeyBasedCompareMixin): def __init__(self, ireq: InstallRequirement) -> None: assert ireq.link is None, "This is a link, not a specifier" self._ireq = ireq self._extras = frozenset(canonicalize_name(e) for e in self._ireq.extras) + KeyBasedCompareMixin.__init__( + self, key=str(ireq), defining_class=SpecifierRequirement + ) def __str__(self) -> str: return str(self._ireq.req) @@ -97,6 +111,9 @@ def __init__(self, ireq: InstallRequirement) -> None: assert ireq.link is None, "This is a link, not a specifier" self._ireq = install_req_drop_extras(ireq) self._extras = frozenset(canonicalize_name(e) for e in self._ireq.extras) + KeyBasedCompareMixin.__init__( + self, key=str(ireq), defining_class=SpecifierRequirement + ) class RequiresPythonRequirement(Requirement): @@ -104,6 +121,7 @@ class RequiresPythonRequirement(Requirement): def __init__(self, specifier: SpecifierSet, match: Candidate) -> None: self.specifier = specifier + self._specifier_string = str(specifier) # for faster __eq__ self._candidate = match def __str__(self) -> str: @@ -112,6 +130,17 @@ def __str__(self) -> str: def __repr__(self) -> str: return f"{self.__class__.__name__}({str(self.specifier)!r})" + def __hash__(self) -> int: + return hash((self._specifier_string, self._candidate)) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, RequiresPythonRequirement): + return False + return ( + self._specifier_string == other._specifier_string + and self._candidate == other._candidate + ) + @property def project_name(self) -> NormalizedName: return self._candidate.project_name @@ -136,11 +165,14 @@ def is_satisfied_by(self, candidate: Candidate) -> bool: return self.specifier.contains(candidate.version, prereleases=True) -class UnsatisfiableRequirement(Requirement): +class UnsatisfiableRequirement(Requirement, KeyBasedCompareMixin): """A requirement that cannot be satisfied.""" def __init__(self, name: NormalizedName) -> None: self._name = name + KeyBasedCompareMixin.__init__( + self, key=str(name), defining_class=UnsatisfiableRequirement + ) def __str__(self) -> str: return f"{self._name} (unavailable)" From e16867e20fcc7a77695fcd95f781d64ac1136638 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Thu, 28 Dec 2023 13:18:58 +0100 Subject: [PATCH 038/113] Use the caching variant of is_satified_by in Factory.find_candidates --- src/pip/_internal/resolution/resolvelib/factory.py | 4 +++- src/pip/_internal/resolution/resolvelib/provider.py | 1 + tests/unit/resolution_resolvelib/test_requirement.py | 10 ++++++++++ 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/src/pip/_internal/resolution/resolvelib/factory.py b/src/pip/_internal/resolution/resolvelib/factory.py index 781c54fd83a..e36df15d459 100644 --- a/src/pip/_internal/resolution/resolvelib/factory.py +++ b/src/pip/_internal/resolution/resolvelib/factory.py @@ -3,6 +3,7 @@ import logging from typing import ( TYPE_CHECKING, + Callable, Dict, FrozenSet, Iterable, @@ -391,6 +392,7 @@ def find_candidates( incompatibilities: Mapping[str, Iterator[Candidate]], constraint: Constraint, prefers_installed: bool, + is_satisfied_by: Callable[[Requirement, Candidate], bool], ) -> Iterable[Candidate]: # Collect basic lookup information from the requirements. explicit_candidates: Set[Candidate] = set() @@ -456,7 +458,7 @@ def find_candidates( for c in explicit_candidates if id(c) not in incompat_ids and constraint.is_satisfied_by(c) - and all(req.is_satisfied_by(c) for req in requirements[identifier]) + and all(is_satisfied_by(req, c) for req in requirements[identifier]) ) def _make_requirements_from_install_req( diff --git a/src/pip/_internal/resolution/resolvelib/provider.py b/src/pip/_internal/resolution/resolvelib/provider.py index ad9bfb2b97b..fb0dd85f112 100644 --- a/src/pip/_internal/resolution/resolvelib/provider.py +++ b/src/pip/_internal/resolution/resolvelib/provider.py @@ -235,6 +235,7 @@ def _eligible_for_upgrade(identifier: str) -> bool: constraint=constraint, prefers_installed=(not _eligible_for_upgrade(identifier)), incompatibilities=incompatibilities, + is_satisfied_by=self.is_satisfied_by, ) @lru_cache(maxsize=None) diff --git a/tests/unit/resolution_resolvelib/test_requirement.py b/tests/unit/resolution_resolvelib/test_requirement.py index b8cd13cb566..b7b0395b037 100644 --- a/tests/unit/resolution_resolvelib/test_requirement.py +++ b/tests/unit/resolution_resolvelib/test_requirement.py @@ -23,6 +23,14 @@ # Editables +def _is_satisfied_by(requirement: Requirement, candidate: Candidate) -> bool: + """A helper function to check if a requirement is satisfied by a candidate. + + Used for mocking PipProvider.is_satified_by. + """ + return requirement.is_satisfied_by(candidate) + + @pytest.fixture def test_cases(data: TestData) -> Iterator[List[Tuple[str, str, int]]]: def _data_file(name: str) -> Path: @@ -80,6 +88,7 @@ def test_new_resolver_correct_number_of_matches( {}, Constraint.empty(), prefers_installed=False, + is_satisfied_by=_is_satisfied_by, ) assert sum(1 for _ in matches) == match_count @@ -98,6 +107,7 @@ def test_new_resolver_candidates_match_requirement( {}, Constraint.empty(), prefers_installed=False, + is_satisfied_by=_is_satisfied_by, ) for c in candidates: assert isinstance(c, Candidate) From 705ce7d5cf8300da8abe9456a245c9c4ac1c7173 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Thu, 28 Dec 2023 16:33:41 +0100 Subject: [PATCH 039/113] Optimize hashing and comparison of AlreadyInstalledCandidate --- .../_internal/resolution/resolvelib/candidates.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/src/pip/_internal/resolution/resolvelib/candidates.py b/src/pip/_internal/resolution/resolvelib/candidates.py index 49a1f660ca6..034fc343975 100644 --- a/src/pip/_internal/resolution/resolvelib/candidates.py +++ b/src/pip/_internal/resolution/resolvelib/candidates.py @@ -20,6 +20,7 @@ from pip._internal.req.req_install import InstallRequirement from pip._internal.utils.direct_url_helpers import direct_url_from_link from pip._internal.utils.misc import normalize_version_info +from pip._internal.utils.models import KeyBasedCompareMixin from .base import Candidate, CandidateVersion, Requirement, format_name @@ -324,7 +325,7 @@ def _prepare_distribution(self) -> BaseDistribution: return self._factory.preparer.prepare_editable_requirement(self._ireq) -class AlreadyInstalledCandidate(Candidate): +class AlreadyInstalledCandidate(Candidate, KeyBasedCompareMixin): is_installed = True source_link = None @@ -346,20 +347,16 @@ def __init__( skip_reason = "already satisfied" factory.preparer.prepare_installed_requirement(self._ireq, skip_reason) + KeyBasedCompareMixin.__init__( + self, key=(self.name, self.version), defining_class=self.__class__ + ) + def __str__(self) -> str: return str(self.dist) def __repr__(self) -> str: return f"{self.__class__.__name__}({self.dist!r})" - def __hash__(self) -> int: - return hash((self.__class__, self.name, self.version)) - - def __eq__(self, other: Any) -> bool: - if isinstance(other, self.__class__): - return self.name == other.name and self.version == other.version - return False - @property def project_name(self) -> NormalizedName: return self.dist.canonical_name From efab55099b400b0fd77a8cac2ace66e6e19d7ef0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Sun, 21 Apr 2024 13:35:49 +0200 Subject: [PATCH 040/113] Add news --- news/12453.feature.rst | 1 + 1 file changed, 1 insertion(+) create mode 100644 news/12453.feature.rst diff --git a/news/12453.feature.rst b/news/12453.feature.rst new file mode 100644 index 00000000000..704cd012a90 --- /dev/null +++ b/news/12453.feature.rst @@ -0,0 +1 @@ +Improve performance of resolution of large dependency trees, with more caching. From 38b56452ad23d68901409da30ee07ad2f0466581 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Sun, 21 Apr 2024 14:01:47 +0200 Subject: [PATCH 041/113] Don't use KeyBasedCompareMixin --- .../resolution/resolvelib/candidates.py | 15 +++++--- .../resolution/resolvelib/requirements.py | 38 +++++++++++++------ 2 files changed, 35 insertions(+), 18 deletions(-) diff --git a/src/pip/_internal/resolution/resolvelib/candidates.py b/src/pip/_internal/resolution/resolvelib/candidates.py index 034fc343975..140aaafcb8f 100644 --- a/src/pip/_internal/resolution/resolvelib/candidates.py +++ b/src/pip/_internal/resolution/resolvelib/candidates.py @@ -20,7 +20,6 @@ from pip._internal.req.req_install import InstallRequirement from pip._internal.utils.direct_url_helpers import direct_url_from_link from pip._internal.utils.misc import normalize_version_info -from pip._internal.utils.models import KeyBasedCompareMixin from .base import Candidate, CandidateVersion, Requirement, format_name @@ -325,7 +324,7 @@ def _prepare_distribution(self) -> BaseDistribution: return self._factory.preparer.prepare_editable_requirement(self._ireq) -class AlreadyInstalledCandidate(Candidate, KeyBasedCompareMixin): +class AlreadyInstalledCandidate(Candidate): is_installed = True source_link = None @@ -347,16 +346,20 @@ def __init__( skip_reason = "already satisfied" factory.preparer.prepare_installed_requirement(self._ireq, skip_reason) - KeyBasedCompareMixin.__init__( - self, key=(self.name, self.version), defining_class=self.__class__ - ) - def __str__(self) -> str: return str(self.dist) def __repr__(self) -> str: return f"{self.__class__.__name__}({self.dist!r})" + def __eq__(self, other: object) -> bool: + if not isinstance(other, AlreadyInstalledCandidate): + return NotImplemented + return self.name == other.name and self.version == other.version + + def __hash__(self) -> int: + return hash((self.name, self.version)) + @property def project_name(self) -> NormalizedName: return self.dist.canonical_name diff --git a/src/pip/_internal/resolution/resolvelib/requirements.py b/src/pip/_internal/resolution/resolvelib/requirements.py index 7f6e72bad18..f980a356f18 100644 --- a/src/pip/_internal/resolution/resolvelib/requirements.py +++ b/src/pip/_internal/resolution/resolvelib/requirements.py @@ -5,7 +5,6 @@ from pip._internal.req.constructors import install_req_drop_extras from pip._internal.req.req_install import InstallRequirement -from pip._internal.utils.models import KeyBasedCompareMixin from .base import Candidate, CandidateLookup, Requirement, format_name @@ -48,14 +47,11 @@ def is_satisfied_by(self, candidate: Candidate) -> bool: return candidate == self.candidate -class SpecifierRequirement(Requirement, KeyBasedCompareMixin): +class SpecifierRequirement(Requirement): def __init__(self, ireq: InstallRequirement) -> None: assert ireq.link is None, "This is a link, not a specifier" self._ireq = ireq self._extras = frozenset(canonicalize_name(e) for e in self._ireq.extras) - KeyBasedCompareMixin.__init__( - self, key=str(ireq), defining_class=SpecifierRequirement - ) def __str__(self) -> str: return str(self._ireq.req) @@ -63,6 +59,14 @@ def __str__(self) -> str: def __repr__(self) -> str: return f"{self.__class__.__name__}({str(self._ireq.req)!r})" + def __eq__(self, other: object) -> bool: + if not isinstance(other, SpecifierRequirement): + return NotImplemented + return str(self._ireq) == str(other._ireq) + + def __hash__(self) -> int: + return hash(str(self._ireq)) + @property def project_name(self) -> NormalizedName: assert self._ireq.req, "Specifier-backed ireq is always PEP 508" @@ -111,9 +115,14 @@ def __init__(self, ireq: InstallRequirement) -> None: assert ireq.link is None, "This is a link, not a specifier" self._ireq = install_req_drop_extras(ireq) self._extras = frozenset(canonicalize_name(e) for e in self._ireq.extras) - KeyBasedCompareMixin.__init__( - self, key=str(ireq), defining_class=SpecifierRequirement - ) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, SpecifierWithoutExtrasRequirement): + return NotImplemented + return str(self._ireq) == str(other._ireq) + + def __hash__(self) -> int: + return hash(str(self._ireq)) class RequiresPythonRequirement(Requirement): @@ -165,14 +174,11 @@ def is_satisfied_by(self, candidate: Candidate) -> bool: return self.specifier.contains(candidate.version, prereleases=True) -class UnsatisfiableRequirement(Requirement, KeyBasedCompareMixin): +class UnsatisfiableRequirement(Requirement): """A requirement that cannot be satisfied.""" def __init__(self, name: NormalizedName) -> None: self._name = name - KeyBasedCompareMixin.__init__( - self, key=str(name), defining_class=UnsatisfiableRequirement - ) def __str__(self) -> str: return f"{self._name} (unavailable)" @@ -180,6 +186,14 @@ def __str__(self) -> str: def __repr__(self) -> str: return f"{self.__class__.__name__}({str(self._name)!r})" + def __eq__(self, other: object) -> bool: + if not isinstance(other, UnsatisfiableRequirement): + return NotImplemented + return self._name == other._name + + def __hash__(self) -> int: + return hash(self._name) + @property def project_name(self) -> NormalizedName: return self._name From e42eb57f2ffaf2de3d6babc540e18b08c1088c81 Mon Sep 17 00:00:00 2001 From: Damian Shaw Date: Fri, 12 Apr 2024 20:49:19 -0400 Subject: [PATCH 042/113] Run Python 3.13 in CI tests --- .github/workflows/ci.yml | 4 +++- noxfile.py | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 57273ff45bd..17f6d45a7c8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -114,6 +114,7 @@ jobs: - "3.10" - "3.11" - "3.12" + - "3.13" steps: - uses: actions/checkout@v4 @@ -167,7 +168,8 @@ jobs: # - "3.9" # - "3.10" # - "3.11" - - "3.12" + - "3.12" # Comment out when 3.13 is final + - "3.13" group: [1, 2] steps: diff --git a/noxfile.py b/noxfile.py index dc74654da53..92d0e243838 100644 --- a/noxfile.py +++ b/noxfile.py @@ -67,7 +67,7 @@ def should_update_common_wheels() -> bool: # ----------------------------------------------------------------------------- # Development Commands # ----------------------------------------------------------------------------- -@nox.session(python=["3.8", "3.9", "3.10", "3.11", "3.12", "pypy3"]) +@nox.session(python=["3.8", "3.9", "3.10", "3.11", "3.12", "3.13", "pypy3"]) def test(session: nox.Session) -> None: # Get the common wheels. if should_update_common_wheels(): From 6b7592d6f55bdb471b33db8d707fd6159e2bfa04 Mon Sep 17 00:00:00 2001 From: Damian Shaw Date: Sat, 13 Apr 2024 13:53:14 -0400 Subject: [PATCH 043/113] allow-prereleases --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 17f6d45a7c8..c10502b1e6f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -177,6 +177,7 @@ jobs: - uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} + allow-prereleases: true # Temporarily required for Python 3.13 # We use C:\Temp (which is already available on the worker) # as a temporary directory for all of the tests because the From 2c77eacecd76124ead6c7178bdb61121a26da5d4 Mon Sep 17 00:00:00 2001 From: Damian Shaw Date: Sat, 13 Apr 2024 14:04:47 -0400 Subject: [PATCH 044/113] Update .github/workflows/ci.yml Co-authored-by: Hugo van Kemenade <1324225+hugovk@users.noreply.github.com> --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c10502b1e6f..161ee729f04 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -177,7 +177,7 @@ jobs: - uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - allow-prereleases: true # Temporarily required for Python 3.13 + allow-prereleases: true # We use C:\Temp (which is already available on the worker) # as a temporary directory for all of the tests because the From bc4d77d95d91133bf9264ec7cce19b0e9d726ccf Mon Sep 17 00:00:00 2001 From: Damian Shaw Date: Sun, 14 Apr 2024 14:28:24 -0400 Subject: [PATCH 045/113] For Python 3.13 source cffi from github --- tests/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/requirements.txt b/tests/requirements.txt index 5ecb21f6bf6..7fba866c745 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -1,3 +1,4 @@ +cffi @ https://github.com/python-cffi/cffi/archive/refs/heads/main.zip; python_version > "3.12" # Temporary workaround for Python 3.13 until next CFFI release cryptography freezegun installer From 9c08d7d9d6662f30c22e9e3ddc6cd6dabc92ba5c Mon Sep 17 00:00:00 2001 From: Damian Shaw Date: Sun, 14 Apr 2024 14:29:41 -0400 Subject: [PATCH 046/113] NEWS ENTRY --- news/12620.trivial.rst | 1 + 1 file changed, 1 insertion(+) create mode 100644 news/12620.trivial.rst diff --git a/news/12620.trivial.rst b/news/12620.trivial.rst new file mode 100644 index 00000000000..52fa571ed17 --- /dev/null +++ b/news/12620.trivial.rst @@ -0,0 +1 @@ +Enable Python 3.13 CI tests From 90295fb97ea38966cdb50ca11c284e818f2fad2d Mon Sep 17 00:00:00 2001 From: Damian Shaw Date: Wed, 24 Apr 2024 22:05:06 -0400 Subject: [PATCH 047/113] Fix test_subdirectory_fragment test --- tests/unit/test_cache.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/unit/test_cache.py b/tests/unit/test_cache.py index d0fee69c39b..30cdb6ebece 100644 --- a/tests/unit/test_cache.py +++ b/tests/unit/test_cache.py @@ -13,11 +13,11 @@ def test_falsey_path_none() -> None: assert wc.cache_dir is None -def test_subdirectory_fragment() -> None: +def test_subdirectory_fragment(tmp_path: Path) -> None: """ Test the subdirectory URL fragment is part of the cache key. """ - wc = WheelCache("/tmp/.foo/") + wc = WheelCache(os.fspath(tmp_path)) link1 = Link("git+https://g.c/o/r#subdirectory=d1") link2 = Link("git+https://g.c/o/r#subdirectory=d2") assert wc.get_path_for_link(link1) != wc.get_path_for_link(link2) From e884c0021ea6902db9dc316820453fcd9cd42144 Mon Sep 17 00:00:00 2001 From: Richard Si Date: Fri, 26 Apr 2024 16:06:54 -0400 Subject: [PATCH 048/113] list: disable pip version self check unless given --outdated/--uptodate (#12646) --- news/11677.feature.rst | 1 + src/pip/_internal/commands/list.py | 4 ++++ tests/unit/test_commands.py | 22 ++++++++++++++++++++++ 3 files changed, 27 insertions(+) create mode 100644 news/11677.feature.rst diff --git a/news/11677.feature.rst b/news/11677.feature.rst new file mode 100644 index 00000000000..b68a0db876b --- /dev/null +++ b/news/11677.feature.rst @@ -0,0 +1 @@ +``pip list`` no longer performs the pip version check unless ``--outdated`` or ``--uptodate`` is given. diff --git a/src/pip/_internal/commands/list.py b/src/pip/_internal/commands/list.py index e551dda9a96..f510919910e 100644 --- a/src/pip/_internal/commands/list.py +++ b/src/pip/_internal/commands/list.py @@ -135,6 +135,10 @@ def add_options(self) -> None: self.parser.insert_option_group(0, index_opts) self.parser.insert_option_group(0, self.cmd_opts) + def handle_pip_version_check(self, options: Values) -> None: + if options.outdated or options.uptodate: + super().handle_pip_version_check(options) + def _build_package_finder( self, options: Values, session: PipSession ) -> PackageFinder: diff --git a/tests/unit/test_commands.py b/tests/unit/test_commands.py index 7a5c4e8319d..4f6366513d7 100644 --- a/tests/unit/test_commands.py +++ b/tests/unit/test_commands.py @@ -1,3 +1,4 @@ +import os from typing import Callable, List from unittest import mock @@ -104,6 +105,10 @@ def test_index_group_handle_pip_version_check( options.disable_pip_version_check = disable_pip_version_check options.no_index = no_index + # See test test_list_pip_version_check() below. + if command_name == "list": + expected_called = False + command.handle_pip_version_check(options) if expected_called: mock_version_check.assert_called_once() @@ -120,3 +125,20 @@ def is_requirement_command(command: Command) -> bool: return isinstance(command, RequirementCommand) check_commands(is_requirement_command, ["download", "install", "wheel"]) + + +@pytest.mark.parametrize("flag", ["", "--outdated", "--uptodate"]) +@mock.patch("pip._internal.cli.req_command.pip_self_version_check") +@mock.patch.dict(os.environ, {"PIP_DISABLE_PIP_VERSION_CHECK": "no"}) +def test_list_pip_version_check(version_check_mock: mock.Mock, flag: str) -> None: + """ + Ensure that pip list doesn't perform a version self-check unless given + --outdated or --uptodate (as they require hitting the network anyway). + """ + command = create_command("list") + command.run = lambda *args, **kwargs: 0 # type: ignore[method-assign] + command.main([flag]) + if flag != "": + version_check_mock.assert_called_once() + else: + version_check_mock.assert_not_called() From f18bebdddbb04372169a617ebdd865974125c196 Mon Sep 17 00:00:00 2001 From: Pradyun Gedam Date: Mon, 29 Apr 2024 20:26:17 +0100 Subject: [PATCH 049/113] Pin GHA MacOS jobs to MacOS 12 (#12658) Co-authored-by: Pradyun Gedam --- .github/workflows/ci.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 57273ff45bd..207dc9b717c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -97,7 +97,7 @@ jobs: tests-unix: name: tests / ${{ matrix.python.key || matrix.python }} / ${{ matrix.os }} - runs-on: ${{ matrix.os }}-latest + runs-on: ${{ matrix.os }} needs: [packaging, determine-changes] if: >- @@ -107,7 +107,7 @@ jobs: strategy: fail-fast: true matrix: - os: [Ubuntu, MacOS] + os: [ubuntu-latest, macos-12] python: - "3.8" - "3.9" @@ -123,13 +123,13 @@ jobs: allow-prereleases: true - name: Install Ubuntu dependencies - if: matrix.os == 'Ubuntu' + if: matrix.os == 'ubuntu-latest' run: | sudo apt-get update sudo apt-get install bzr - name: Install MacOS dependencies - if: matrix.os == 'MacOS' + if: matrix.os == 'macos-12' run: brew install breezy - run: pip install nox From 3b14deb3fac93a5f791d7347c717f6469b629b18 Mon Sep 17 00:00:00 2001 From: Richard Si Date: Fri, 5 Jan 2024 14:49:27 -0500 Subject: [PATCH 050/113] Migrate uninstallation errors to diagnostic error format --- news/10421.feature.rst | 1 + src/pip/_internal/cli/base_command.py | 2 -- src/pip/_internal/exceptions.py | 46 +++++++++++++++++++++++--- src/pip/_internal/req/req_uninstall.py | 22 +++--------- tests/functional/test_uninstall.py | 22 ++++++------ 5 files changed, 57 insertions(+), 36 deletions(-) create mode 100644 news/10421.feature.rst diff --git a/news/10421.feature.rst b/news/10421.feature.rst new file mode 100644 index 00000000000..49bb72bc0d8 --- /dev/null +++ b/news/10421.feature.rst @@ -0,0 +1 @@ +Reword and improve presentation of uninstallation errors. diff --git a/src/pip/_internal/cli/base_command.py b/src/pip/_internal/cli/base_command.py index db9d5cc6624..09f8c75ff87 100644 --- a/src/pip/_internal/cli/base_command.py +++ b/src/pip/_internal/cli/base_command.py @@ -28,7 +28,6 @@ InstallationError, NetworkConnectionError, PreviousBuildDirError, - UninstallationError, ) from pip._internal.utils.filesystem import check_path_owner from pip._internal.utils.logging import BrokenStdoutLoggingError, setup_logging @@ -192,7 +191,6 @@ def exc_logging_wrapper(*args: Any) -> int: return PREVIOUS_BUILD_DIR_ERROR except ( InstallationError, - UninstallationError, BadCommand, NetworkConnectionError, ) as exc: diff --git a/src/pip/_internal/exceptions.py b/src/pip/_internal/exceptions.py index 0609e450683..37d519166ea 100644 --- a/src/pip/_internal/exceptions.py +++ b/src/pip/_internal/exceptions.py @@ -184,10 +184,6 @@ class InstallationError(PipError): """General exception during installation""" -class UninstallationError(PipError): - """General exception during uninstallation""" - - class MissingPyProjectBuildRequires(DiagnosticPipError): """Raised when pyproject.toml has `build-system`, but no `build-system.requires`.""" @@ -726,3 +722,45 @@ def from_config( exc_info = logger.isEnabledFor(VERBOSE) logger.warning("Failed to read %s", config, exc_info=exc_info) return cls(None) + + +class UninstallMissingRecord(DiagnosticPipError): + reference = "uninstall-no-record-file" + + def __init__(self, *, distribution: "BaseDistribution") -> None: + installer = distribution.installer + if not installer or installer == "pip": + dep = f"{distribution.raw_name}=={distribution.version}" + hint = Text.assemble( + "You might be able to recover from this via: ", + (f"pip install --force-reinstall --no-deps {dep}", "green"), + ) + else: + hint = Text( + f"The package was installed by {installer}. " + "You should check if it can uninstall the package." + ) + + super().__init__( + message=Text(f"Cannot uninstall {distribution}"), + context=( + "The package's contents are unknown: " + f"no RECORD file was found for {distribution.raw_name}." + ), + hint_stmt=hint, + ) + + +class LegacyDistutilsInstall(DiagnosticPipError): + reference = "uninstall-distutils-installed-package" + + def __init__(self, *, distribution: "BaseDistribution") -> None: + super().__init__( + message=Text(f"Cannot uninstall {distribution}"), + context=( + "It is a distutils installed project and thus we cannot accurately " + "determine which files belong to it which would lead to only a partial " + "uninstall." + ), + hint_stmt=None, + ) diff --git a/src/pip/_internal/req/req_uninstall.py b/src/pip/_internal/req/req_uninstall.py index 3a9ae2b1097..9498fe67930 100644 --- a/src/pip/_internal/req/req_uninstall.py +++ b/src/pip/_internal/req/req_uninstall.py @@ -5,7 +5,7 @@ from importlib.util import cache_from_source from typing import Any, Callable, Dict, Generator, Iterable, List, Optional, Set, Tuple -from pip._internal.exceptions import UninstallationError +from pip._internal.exceptions import LegacyDistutilsInstall, UninstallMissingRecord from pip._internal.locations import get_bin_prefix, get_bin_user from pip._internal.metadata import BaseDistribution from pip._internal.utils.compat import WINDOWS @@ -61,7 +61,7 @@ def uninstallation_paths(dist: BaseDistribution) -> Generator[str, None, None]: UninstallPathSet.add() takes care of the __pycache__ .py[co]. - If RECORD is not found, raises UninstallationError, + If RECORD is not found, raises an error, with possible information from the INSTALLER file. https://packaging.python.org/specifications/recording-installed-packages/ @@ -71,17 +71,7 @@ def uninstallation_paths(dist: BaseDistribution) -> Generator[str, None, None]: entries = dist.iter_declared_entries() if entries is None: - msg = f"Cannot uninstall {dist}, RECORD file not found." - installer = dist.installer - if not installer or installer == "pip": - dep = f"{dist.raw_name}=={dist.version}" - msg += ( - " You might be able to recover from this via: " - f"'pip install --force-reinstall --no-deps {dep}'." - ) - else: - msg += f" Hint: The package was installed by {installer}." - raise UninstallationError(msg) + raise UninstallMissingRecord(distribution=dist) for entry in entries: path = os.path.join(location, entry) @@ -509,11 +499,7 @@ def from_dist(cls, dist: BaseDistribution) -> "UninstallPathSet": paths_to_remove.add(f"{path}.pyo") elif dist.installed_by_distutils: - raise UninstallationError( - f"Cannot uninstall {dist.raw_name!r}. It is a distutils installed " - "project and thus we cannot accurately determine which files belong " - "to it which would lead to only a partial uninstall." - ) + raise LegacyDistutilsInstall(distribution=dist) elif dist.installed_as_egg: # package installed by easy_install diff --git a/tests/functional/test_uninstall.py b/tests/functional/test_uninstall.py index af140e07159..5bf64119e34 100644 --- a/tests/functional/test_uninstall.py +++ b/tests/functional/test_uninstall.py @@ -65,10 +65,10 @@ def test_basic_uninstall_distutils(script: PipTestEnvironment) -> None: result = script.pip( "uninstall", "distutils_install", "-y", expect_stderr=True, expect_error=True ) + assert "Cannot uninstall distutils-install 0.1" in result.stderr assert ( - "Cannot uninstall 'distutils-install'. It is a distutils installed " - "project and thus we cannot accurately determine which files belong " - "to it which would lead to only a partial uninstall." + "It is a distutils installed project and thus we cannot accurately determine " + "which files belong to it which would lead to only a partial uninstall." ) in result.stderr @@ -590,18 +590,16 @@ def test_uninstall_without_record_fails( installer_path.write_text(installer + os.linesep) result2 = script.pip("uninstall", "simple.dist", "-y", expect_error=True) - expected_error_message = ( - "ERROR: Cannot uninstall simple.dist 0.1, RECORD file not found." - ) + assert "Cannot uninstall simple.dist 0.1" in result2.stderr + assert "no RECORD file was found for simple.dist" in result2.stderr if not isinstance(installer, str) or not installer.strip() or installer == "pip": - expected_error_message += ( - " You might be able to recover from this via: " - "'pip install --force-reinstall --no-deps " - "simple.dist==0.1'." + hint = ( + "You might be able to recover from this via: " + "pip install --force-reinstall --no-deps simple.dist==0.1" ) elif installer: - expected_error_message += f" Hint: The package was installed by {installer}." - assert result2.stderr.rstrip() == expected_error_message + hint = f"The package was installed by {installer}." + assert f"hint: {hint}" in result2.stderr assert_all_changes(result.files_after, result2, ignore_changes) From f1c1d9778d43545fdf89bac4723b6592ef0eddc2 Mon Sep 17 00:00:00 2001 From: Damian Shaw Date: Sat, 27 Apr 2024 23:02:01 -0400 Subject: [PATCH 051/113] Cache hashes for specifier requirements and _InstallRequirementBackedCandidate --- .../resolution/resolvelib/candidates.py | 7 ++- .../resolution/resolvelib/requirements.py | 45 ++++++++++++++++--- 2 files changed, 45 insertions(+), 7 deletions(-) diff --git a/src/pip/_internal/resolution/resolvelib/candidates.py b/src/pip/_internal/resolution/resolvelib/candidates.py index 9d15b8fda0a..019ff60a0a5 100644 --- a/src/pip/_internal/resolution/resolvelib/candidates.py +++ b/src/pip/_internal/resolution/resolvelib/candidates.py @@ -154,6 +154,7 @@ def __init__( self._name = name self._version = version self.dist = self._prepare() + self._hash: Optional[int] = None def __str__(self) -> str: return f"{self.name} {self.version}" @@ -162,7 +163,11 @@ def __repr__(self) -> str: return f"{self.__class__.__name__}({str(self._link)!r})" def __hash__(self) -> int: - return hash((self.__class__, self._link)) + if self._hash is not None: + return self._hash + + self._hash = hash((self.__class__, self._link)) + return self._hash def __eq__(self, other: Any) -> bool: if isinstance(other, self.__class__): diff --git a/src/pip/_internal/resolution/resolvelib/requirements.py b/src/pip/_internal/resolution/resolvelib/requirements.py index f980a356f18..b04f41b2191 100644 --- a/src/pip/_internal/resolution/resolvelib/requirements.py +++ b/src/pip/_internal/resolution/resolvelib/requirements.py @@ -1,4 +1,4 @@ -from typing import Any +from typing import Any, Optional from pip._vendor.packaging.specifiers import SpecifierSet from pip._vendor.packaging.utils import NormalizedName, canonicalize_name @@ -51,8 +51,18 @@ class SpecifierRequirement(Requirement): def __init__(self, ireq: InstallRequirement) -> None: assert ireq.link is None, "This is a link, not a specifier" self._ireq = ireq + self._equal_cache: Optional[str] = None + self._hash: Optional[int] = None self._extras = frozenset(canonicalize_name(e) for e in self._ireq.extras) + @property + def _equal(self) -> str: + if self._equal_cache is not None: + return self._equal_cache + + self._equal_cache = str(self._ireq) + return self._equal_cache + def __str__(self) -> str: return str(self._ireq.req) @@ -62,10 +72,14 @@ def __repr__(self) -> str: def __eq__(self, other: object) -> bool: if not isinstance(other, SpecifierRequirement): return NotImplemented - return str(self._ireq) == str(other._ireq) + return self._equal == other._equal def __hash__(self) -> int: - return hash(str(self._ireq)) + if self._hash is not None: + return self._hash + + self._hash = hash(self._equal) + return self._hash @property def project_name(self) -> NormalizedName: @@ -114,15 +128,29 @@ class SpecifierWithoutExtrasRequirement(SpecifierRequirement): def __init__(self, ireq: InstallRequirement) -> None: assert ireq.link is None, "This is a link, not a specifier" self._ireq = install_req_drop_extras(ireq) + self._equal_cache: Optional[str] = None + self._hash: Optional[int] = None self._extras = frozenset(canonicalize_name(e) for e in self._ireq.extras) + @property + def _equal(self) -> str: + if self._equal_cache is not None: + return self._equal_cache + + self._equal_cache = str(self._ireq) + return self._equal_cache + def __eq__(self, other: object) -> bool: if not isinstance(other, SpecifierWithoutExtrasRequirement): return NotImplemented - return str(self._ireq) == str(other._ireq) + return self._equal == other._equal def __hash__(self) -> int: - return hash(str(self._ireq)) + if self._hash is not None: + return self._hash + + self._hash = hash(self._equal) + return self._hash class RequiresPythonRequirement(Requirement): @@ -131,6 +159,7 @@ class RequiresPythonRequirement(Requirement): def __init__(self, specifier: SpecifierSet, match: Candidate) -> None: self.specifier = specifier self._specifier_string = str(specifier) # for faster __eq__ + self._hash: Optional[int] = None self._candidate = match def __str__(self) -> str: @@ -140,7 +169,11 @@ def __repr__(self) -> str: return f"{self.__class__.__name__}({str(self.specifier)!r})" def __hash__(self) -> int: - return hash((self._specifier_string, self._candidate)) + if self._hash is not None: + return self._hash + + self._hash = hash((self._specifier_string, self._candidate)) + return self._hash def __eq__(self, other: Any) -> bool: if not isinstance(other, RequiresPythonRequirement): From 1e510e3bb126f22550bb495361ec7e7f58e36f9d Mon Sep 17 00:00:00 2001 From: Damian Shaw Date: Sat, 27 Apr 2024 23:13:08 -0400 Subject: [PATCH 052/113] NEWS ENTRY --- news/12657.feature.rst | 1 + 1 file changed, 1 insertion(+) create mode 100644 news/12657.feature.rst diff --git a/news/12657.feature.rst b/news/12657.feature.rst new file mode 100644 index 00000000000..27e4966b9a3 --- /dev/null +++ b/news/12657.feature.rst @@ -0,0 +1 @@ +Further improve resolution performance of large dependency trees, by caching hash calculations. From 253b4323494828662aefb05378494ce5b88c0ea8 Mon Sep 17 00:00:00 2001 From: Ofek Lev Date: Tue, 30 Apr 2024 19:25:27 -0400 Subject: [PATCH 053/113] Fix specifier in tests/requirements.txt --- tests/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/requirements.txt b/tests/requirements.txt index 7fba866c745..8eb02d6ca5b 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -1,4 +1,4 @@ -cffi @ https://github.com/python-cffi/cffi/archive/refs/heads/main.zip; python_version > "3.12" # Temporary workaround for Python 3.13 until next CFFI release +cffi @ https://github.com/python-cffi/cffi/archive/refs/heads/main.zip ; python_version > "3.12" # Temporary workaround for Python 3.13 until next CFFI release cryptography freezegun installer From 06319f213f004baf82275525284512a201127da5 Mon Sep 17 00:00:00 2001 From: Paul Moore Date: Thu, 2 May 2024 13:27:42 +0100 Subject: [PATCH 054/113] Fix #12666 by pre-loading script wrapper code --- src/pip/_vendor/distlib/scripts.py | 26 ++++++++++++++++++++------ tests/functional/test_self_update.py | 22 ++++++++++++++++++++++ 2 files changed, 42 insertions(+), 6 deletions(-) create mode 100644 tests/functional/test_self_update.py diff --git a/src/pip/_vendor/distlib/scripts.py b/src/pip/_vendor/distlib/scripts.py index cfa45d2af18..e16292b8330 100644 --- a/src/pip/_vendor/distlib/scripts.py +++ b/src/pip/_vendor/distlib/scripts.py @@ -49,6 +49,24 @@ sys.exit(%(func)s()) ''' +# Pre-fetch the contents of all executable wrapper stubs. +# This is to address https://github.com/pypa/pip/issues/12666. +# When updating pip, we rename the old pip in place before installing the +# new version. If we try to fetch a wrapper *after* that rename, the finder +# machinery will be confused as the package is no longer available at the +# location where it was imported from. So we load everything into memory in +# advance. + +# Issue 31: don't hardcode an absolute package name, but +# determine it relative to the current package +distlib_package = __name__.rsplit('.', 1)[0] + +WRAPPERS = { + r.name: r.bytes + for r in finder(distlib_package).iterator("") + if r.name.endswith(".exe") +} + def enquote_executable(executable): if ' ' in executable: @@ -409,15 +427,11 @@ def _get_launcher(self, kind): bits = '32' platform_suffix = '-arm' if get_platform() == 'win-arm64' else '' name = '%s%s%s.exe' % (kind, bits, platform_suffix) - # Issue 31: don't hardcode an absolute package name, but - # determine it relative to the current package - distlib_package = __name__.rsplit('.', 1)[0] - resource = finder(distlib_package).find(name) - if not resource: + if name not in WRAPPERS: msg = ('Unable to find resource %s in package %s' % (name, distlib_package)) raise ValueError(msg) - return resource.bytes + return WRAPPERS[name] # Public API follows diff --git a/tests/functional/test_self_update.py b/tests/functional/test_self_update.py new file mode 100644 index 00000000000..c507552208a --- /dev/null +++ b/tests/functional/test_self_update.py @@ -0,0 +1,22 @@ +# Check that pip can update itself correctly + +from typing import Any + + +def test_self_update_editable(script: Any, pip_src: Any) -> None: + # Test that if we have an environment with pip installed in non-editable + # mode, that pip can safely update itself to an editable install. + # See https://github.com/pypa/pip/issues/12666 for details. + + # Step 1. Install pip as non-editable. This is expected to succeed as + # the existing pip in the environment is installed in editable mode, so + # it only places a .pth file in the environment. + proc = script.pip("install", pip_src) + assert proc.returncode == 0 + # Step 2. Using the pip we just installed, install pip *again*, but + # in editable mode. This could fail, as we'll need to uninstall the running + # pip in order to install the new copy, and uninstalling pip while it's + # running could fail. This test is specifically to ensure that doesn't + # happen... + proc = script.pip("install", "-e", pip_src) + assert proc.returncode == 0 From e5a11b9afceb821adc266f28cbeb8556a2389210 Mon Sep 17 00:00:00 2001 From: Paul Moore Date: Thu, 2 May 2024 13:35:37 +0100 Subject: [PATCH 055/113] Make the distlib changes into a patch --- tools/vendoring/patches/distlib.patch | 47 +++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 tools/vendoring/patches/distlib.patch diff --git a/tools/vendoring/patches/distlib.patch b/tools/vendoring/patches/distlib.patch new file mode 100644 index 00000000000..de2834710a3 --- /dev/null +++ b/tools/vendoring/patches/distlib.patch @@ -0,0 +1,47 @@ +diff --git a/src/pip/_vendor/distlib/scripts.py b/src/pip/_vendor/distlib/scripts.py +index cfa45d2af..e16292b83 100644 +--- a/src/pip/_vendor/distlib/scripts.py ++++ b/src/pip/_vendor/distlib/scripts.py +@@ -49,6 +49,24 @@ if __name__ == '__main__': + sys.exit(%(func)s()) + ''' + ++# Pre-fetch the contents of all executable wrapper stubs. ++# This is to address https://github.com/pypa/pip/issues/12666. ++# When updating pip, we rename the old pip in place before installing the ++# new version. If we try to fetch a wrapper *after* that rename, the finder ++# machinery will be confused as the package is no longer available at the ++# location where it was imported from. So we load everything into memory in ++# advance. ++ ++# Issue 31: don't hardcode an absolute package name, but ++# determine it relative to the current package ++distlib_package = __name__.rsplit('.', 1)[0] ++ ++WRAPPERS = { ++ r.name: r.bytes ++ for r in finder(distlib_package).iterator("") ++ if r.name.endswith(".exe") ++} ++ + + def enquote_executable(executable): + if ' ' in executable: +@@ -409,15 +427,11 @@ class ScriptMaker(object): + bits = '32' + platform_suffix = '-arm' if get_platform() == 'win-arm64' else '' + name = '%s%s%s.exe' % (kind, bits, platform_suffix) +- # Issue 31: don't hardcode an absolute package name, but +- # determine it relative to the current package +- distlib_package = __name__.rsplit('.', 1)[0] +- resource = finder(distlib_package).find(name) +- if not resource: ++ if name not in WRAPPERS: + msg = ('Unable to find resource %s in package %s' % + (name, distlib_package)) + raise ValueError(msg) +- return resource.bytes ++ return WRAPPERS[name] + + # Public API follows + From 8fe52159b4182e5658bd15624409e27e1611e655 Mon Sep 17 00:00:00 2001 From: Paul Moore Date: Thu, 2 May 2024 13:51:09 +0100 Subject: [PATCH 056/113] Add a news file --- news/12666.bugfix.rst | 1 + 1 file changed, 1 insertion(+) create mode 100644 news/12666.bugfix.rst diff --git a/news/12666.bugfix.rst b/news/12666.bugfix.rst new file mode 100644 index 00000000000..72a4a65155e --- /dev/null +++ b/news/12666.bugfix.rst @@ -0,0 +1 @@ +Fix intermittent "cannot locate t64.exe" errors when upgrading pip. From 80ebc8ae258d13ad2a05d5dfca0b986df1c18830 Mon Sep 17 00:00:00 2001 From: Pradyun Gedam Date: Fri, 3 May 2024 20:58:44 +0100 Subject: [PATCH 057/113] Upgrade CacheControl to 0.14.0 --- news/CacheControl.vendor.rst | 1 + pyproject.toml | 1 - src/pip/_vendor/cachecontrol/__init__.py | 2 +- src/pip/_vendor/cachecontrol/adapter.py | 10 +-- .../_vendor/cachecontrol/caches/file_cache.py | 7 +- src/pip/_vendor/cachecontrol/controller.py | 7 +- src/pip/_vendor/cachecontrol/heuristics.py | 2 +- src/pip/_vendor/cachecontrol/serialize.py | 76 ++----------------- src/pip/_vendor/vendor.txt | 2 +- 9 files changed, 27 insertions(+), 81 deletions(-) create mode 100644 news/CacheControl.vendor.rst diff --git a/news/CacheControl.vendor.rst b/news/CacheControl.vendor.rst new file mode 100644 index 00000000000..f6132b8332e --- /dev/null +++ b/news/CacheControl.vendor.rst @@ -0,0 +1 @@ +Upgrade CacheControl to 0.14.0 diff --git a/pyproject.toml b/pyproject.toml index 74a7f71ca59..6d1f48c11a2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -144,7 +144,6 @@ distro = [] setuptools = "pkg_resources" [tool.vendoring.license.fallback-urls] -CacheControl = "https://raw.githubusercontent.com/ionrock/cachecontrol/v0.12.6/LICENSE.txt" distlib = "https://bitbucket.org/pypa/distlib/raw/master/LICENSE.txt" webencodings = "https://github.com/SimonSapin/python-webencodings/raw/master/LICENSE" diff --git a/src/pip/_vendor/cachecontrol/__init__.py b/src/pip/_vendor/cachecontrol/__init__.py index 4d20bc9b12a..b34b0fcbd40 100644 --- a/src/pip/_vendor/cachecontrol/__init__.py +++ b/src/pip/_vendor/cachecontrol/__init__.py @@ -8,7 +8,7 @@ """ __author__ = "Eric Larson" __email__ = "eric@ionrock.org" -__version__ = "0.13.1" +__version__ = "0.14.0" from pip._vendor.cachecontrol.adapter import CacheControlAdapter from pip._vendor.cachecontrol.controller import CacheController diff --git a/src/pip/_vendor/cachecontrol/adapter.py b/src/pip/_vendor/cachecontrol/adapter.py index 3e83e308dba..fbb4ecc8876 100644 --- a/src/pip/_vendor/cachecontrol/adapter.py +++ b/src/pip/_vendor/cachecontrol/adapter.py @@ -125,21 +125,21 @@ def build_response( else: # Wrap the response file with a wrapper that will cache the # response when the stream has been consumed. - response._fp = CallbackFileWrapper( # type: ignore[attr-defined] - response._fp, # type: ignore[attr-defined] + response._fp = CallbackFileWrapper( # type: ignore[assignment] + response._fp, # type: ignore[arg-type] functools.partial( self.controller.cache_response, request, response ), ) if response.chunked: - super_update_chunk_length = response._update_chunk_length # type: ignore[attr-defined] + super_update_chunk_length = response._update_chunk_length def _update_chunk_length(self: HTTPResponse) -> None: super_update_chunk_length() if self.chunk_left == 0: - self._fp._close() # type: ignore[attr-defined] + self._fp._close() # type: ignore[union-attr] - response._update_chunk_length = types.MethodType( # type: ignore[attr-defined] + response._update_chunk_length = types.MethodType( # type: ignore[method-assign] _update_chunk_length, response ) diff --git a/src/pip/_vendor/cachecontrol/caches/file_cache.py b/src/pip/_vendor/cachecontrol/caches/file_cache.py index 1fd28013084..e6e3a57947f 100644 --- a/src/pip/_vendor/cachecontrol/caches/file_cache.py +++ b/src/pip/_vendor/cachecontrol/caches/file_cache.py @@ -6,7 +6,8 @@ import hashlib import os from textwrap import dedent -from typing import IO, TYPE_CHECKING +from typing import IO, TYPE_CHECKING, Union +from pathlib import Path from pip._vendor.cachecontrol.cache import BaseCache, SeparateBodyBaseCache from pip._vendor.cachecontrol.controller import CacheController @@ -63,7 +64,7 @@ class _FileCacheMixin: def __init__( self, - directory: str, + directory: str | Path, forever: bool = False, filemode: int = 0o0600, dirmode: int = 0o0700, @@ -79,7 +80,7 @@ def __init__( """ NOTE: In order to use the FileCache you must have filelock installed. You can install it via pip: - pip install filelock + pip install cachecontrol[filecache] """ ) raise ImportError(notice) diff --git a/src/pip/_vendor/cachecontrol/controller.py b/src/pip/_vendor/cachecontrol/controller.py index 586b9f97b80..d7dd86e5f70 100644 --- a/src/pip/_vendor/cachecontrol/controller.py +++ b/src/pip/_vendor/cachecontrol/controller.py @@ -142,6 +142,11 @@ def _load_from_cache(self, request: PreparedRequest) -> HTTPResponse | None: """ Load a cached response, or return None if it's not available. """ + # We do not support caching of partial content: so if the request contains a + # Range header then we don't want to load anything from the cache. + if "Range" in request.headers: + return None + cache_url = request.url assert cache_url is not None cache_data = self.cache.get(cache_url) @@ -480,7 +485,7 @@ def update_cached_response( cached_response.headers.update( { k: v - for k, v in response.headers.items() # type: ignore[no-untyped-call] + for k, v in response.headers.items() if k.lower() not in excluded_headers } ) diff --git a/src/pip/_vendor/cachecontrol/heuristics.py b/src/pip/_vendor/cachecontrol/heuristics.py index b9d72ca4ac5..f6e5634e385 100644 --- a/src/pip/_vendor/cachecontrol/heuristics.py +++ b/src/pip/_vendor/cachecontrol/heuristics.py @@ -68,7 +68,7 @@ def update_headers(self, response: HTTPResponse) -> dict[str, str]: if "expires" not in response.headers: date = parsedate(response.headers["date"]) - expires = expire_after(timedelta(days=1), date=datetime(*date[:6], tzinfo=timezone.utc)) # type: ignore[misc] + expires = expire_after(timedelta(days=1), date=datetime(*date[:6], tzinfo=timezone.utc)) # type: ignore[index,misc] headers["expires"] = datetime_to_header(expires) headers["cache-control"] = "public" return headers diff --git a/src/pip/_vendor/cachecontrol/serialize.py b/src/pip/_vendor/cachecontrol/serialize.py index f9e967c3c34..a49487a1493 100644 --- a/src/pip/_vendor/cachecontrol/serialize.py +++ b/src/pip/_vendor/cachecontrol/serialize.py @@ -32,13 +32,13 @@ def dumps( # also update the response with a new file handler to be # sure it acts as though it was never read. body = response.read(decode_content=False) - response._fp = io.BytesIO(body) # type: ignore[attr-defined] + response._fp = io.BytesIO(body) # type: ignore[assignment] response.length_remaining = len(body) data = { "response": { "body": body, # Empty bytestring if body is stored separately - "headers": {str(k): str(v) for k, v in response.headers.items()}, # type: ignore[no-untyped-call] + "headers": {str(k): str(v) for k, v in response.headers.items()}, "status": response.status, "version": response.version, "reason": str(response.reason), @@ -72,31 +72,14 @@ def loads( if not data: return None - # Determine what version of the serializer the data was serialized - # with - try: - ver, data = data.split(b",", 1) - except ValueError: - ver = b"cc=0" - - # Make sure that our "ver" is actually a version and isn't a false - # positive from a , being in the data stream. - if ver[:3] != b"cc=": - data = ver + data - ver = b"cc=0" - - # Get the version number out of the cc=N - verstr = ver.split(b"=", 1)[-1].decode("ascii") - - # Dispatch to the actual load method for the given version - try: - return getattr(self, f"_loads_v{verstr}")(request, data, body_file) # type: ignore[no-any-return] - - except AttributeError: - # This is a version we don't have a loads function for, so we'll - # just treat it as a miss and return None + # Previous versions of this library supported other serialization + # formats, but these have all been removed. + if not data.startswith(f"cc={self.serde_version},".encode()): return None + data = data[5:] + return self._loads_v4(request, data, body_file) + def prepare_response( self, request: PreparedRequest, @@ -149,49 +132,6 @@ def prepare_response( return HTTPResponse(body=body, preload_content=False, **cached["response"]) - def _loads_v0( - self, - request: PreparedRequest, - data: bytes, - body_file: IO[bytes] | None = None, - ) -> None: - # The original legacy cache data. This doesn't contain enough - # information to construct everything we need, so we'll treat this as - # a miss. - return None - - def _loads_v1( - self, - request: PreparedRequest, - data: bytes, - body_file: IO[bytes] | None = None, - ) -> HTTPResponse | None: - # The "v1" pickled cache format. This is no longer supported - # for security reasons, so we treat it as a miss. - return None - - def _loads_v2( - self, - request: PreparedRequest, - data: bytes, - body_file: IO[bytes] | None = None, - ) -> HTTPResponse | None: - # The "v2" compressed base64 cache format. - # This has been removed due to age and poor size/performance - # characteristics, so we treat it as a miss. - return None - - def _loads_v3( - self, - request: PreparedRequest, - data: bytes, - body_file: IO[bytes] | None = None, - ) -> None: - # Due to Python 2 encoding issues, it's impossible to know for sure - # exactly how to load v3 entries, thus we'll treat these as a miss so - # that they get rewritten out as v4 entries. - return None - def _loads_v4( self, request: PreparedRequest, diff --git a/src/pip/_vendor/vendor.txt b/src/pip/_vendor/vendor.txt index f00a8c02db4..bb962664d08 100644 --- a/src/pip/_vendor/vendor.txt +++ b/src/pip/_vendor/vendor.txt @@ -1,4 +1,4 @@ -CacheControl==0.13.1 # Make sure to update the license in pyproject.toml for this. +CacheControl==0.14.0 colorama==0.4.6 distlib==0.3.8 distro==1.9.0 From 531bf756080b5b97d8c4601c683b3558dd2414ef Mon Sep 17 00:00:00 2001 From: Pradyun Gedam Date: Fri, 3 May 2024 20:59:26 +0100 Subject: [PATCH 058/113] Upgrade platformdirs to 4.2.1 --- news/platformdirs.vendor.rst | 2 +- src/pip/_vendor/platformdirs/__init__.py | 76 ++++++++++++------------ src/pip/_vendor/platformdirs/__main__.py | 2 +- src/pip/_vendor/platformdirs/android.py | 27 +++++---- src/pip/_vendor/platformdirs/api.py | 33 ++++++---- src/pip/_vendor/platformdirs/macos.py | 11 ++-- src/pip/_vendor/platformdirs/unix.py | 37 +++++++----- src/pip/_vendor/platformdirs/version.py | 4 +- src/pip/_vendor/platformdirs/windows.py | 35 ++++++----- src/pip/_vendor/vendor.txt | 2 +- 10 files changed, 129 insertions(+), 100 deletions(-) diff --git a/news/platformdirs.vendor.rst b/news/platformdirs.vendor.rst index fb749d1ab8d..4c1af68710e 100644 --- a/news/platformdirs.vendor.rst +++ b/news/platformdirs.vendor.rst @@ -1 +1 @@ -Upgrade platformdirs to 4.2.0 +Upgrade platformdirs to 4.2.1 diff --git a/src/pip/_vendor/platformdirs/__init__.py b/src/pip/_vendor/platformdirs/__init__.py index 3da5ac1474f..d58dd2b7dde 100644 --- a/src/pip/_vendor/platformdirs/__init__.py +++ b/src/pip/_vendor/platformdirs/__init__.py @@ -1,6 +1,8 @@ """ -Utilities for determining application-specific dirs. See for details and -usage. +Utilities for determining application-specific dirs. + +See for details and usage. + """ from __future__ import annotations @@ -20,22 +22,22 @@ def _set_platform_dir_class() -> type[PlatformDirsABC]: if sys.platform == "win32": - from pip._vendor.platformdirs.windows import Windows as Result + from pip._vendor.platformdirs.windows import Windows as Result # noqa: PLC0415 elif sys.platform == "darwin": - from pip._vendor.platformdirs.macos import MacOS as Result + from pip._vendor.platformdirs.macos import MacOS as Result # noqa: PLC0415 else: - from pip._vendor.platformdirs.unix import Unix as Result + from pip._vendor.platformdirs.unix import Unix as Result # noqa: PLC0415 if os.getenv("ANDROID_DATA") == "/data" and os.getenv("ANDROID_ROOT") == "/system": if os.getenv("SHELL") or os.getenv("PREFIX"): return Result - from pip._vendor.platformdirs.android import _android_folder + from pip._vendor.platformdirs.android import _android_folder # noqa: PLC0415 if _android_folder() is not None: - from pip._vendor.platformdirs.android import Android + from pip._vendor.platformdirs.android import Android # noqa: PLC0415 - return Android # return to avoid redefinition of result + return Android # return to avoid redefinition of a result return Result @@ -507,7 +509,7 @@ def user_log_path( def user_documents_path() -> Path: - """:returns: documents path tied to the user""" + """:returns: documents a path tied to the user""" return PlatformDirs().user_documents_path @@ -585,41 +587,41 @@ def site_runtime_path( __all__ = [ - "__version__", - "__version_info__", - "PlatformDirs", "AppDirs", + "PlatformDirs", "PlatformDirsABC", - "user_data_dir", - "user_config_dir", - "user_cache_dir", - "user_state_dir", - "user_log_dir", - "user_documents_dir", - "user_downloads_dir", - "user_pictures_dir", - "user_videos_dir", - "user_music_dir", - "user_desktop_dir", - "user_runtime_dir", - "site_data_dir", - "site_config_dir", + "__version__", + "__version_info__", "site_cache_dir", + "site_cache_path", + "site_config_dir", + "site_config_path", + "site_data_dir", + "site_data_path", "site_runtime_dir", - "user_data_path", - "user_config_path", + "site_runtime_path", + "user_cache_dir", "user_cache_path", - "user_state_path", - "user_log_path", + "user_config_dir", + "user_config_path", + "user_data_dir", + "user_data_path", + "user_desktop_dir", + "user_desktop_path", + "user_documents_dir", "user_documents_path", + "user_downloads_dir", "user_downloads_path", - "user_pictures_path", - "user_videos_path", + "user_log_dir", + "user_log_path", + "user_music_dir", "user_music_path", - "user_desktop_path", + "user_pictures_dir", + "user_pictures_path", + "user_runtime_dir", "user_runtime_path", - "site_data_path", - "site_config_path", - "site_cache_path", - "site_runtime_path", + "user_state_dir", + "user_state_path", + "user_videos_dir", + "user_videos_path", ] diff --git a/src/pip/_vendor/platformdirs/__main__.py b/src/pip/_vendor/platformdirs/__main__.py index 61342265b60..fa8a677a336 100644 --- a/src/pip/_vendor/platformdirs/__main__.py +++ b/src/pip/_vendor/platformdirs/__main__.py @@ -24,7 +24,7 @@ def main() -> None: - """Run main entry point.""" + """Run the main entry point.""" app_name = "MyApp" app_author = "MyCompany" diff --git a/src/pip/_vendor/platformdirs/android.py b/src/pip/_vendor/platformdirs/android.py index 4acdb63833f..fefafd32977 100644 --- a/src/pip/_vendor/platformdirs/android.py +++ b/src/pip/_vendor/platformdirs/android.py @@ -13,10 +13,11 @@ class Android(PlatformDirsABC): """ - Follows the guidance `from here `_. Makes use of the - `appname `, - `version `, - `ensure_exists `. + Follows the guidance `from here `_. + + Makes use of the `appname `, `version + `, `ensure_exists `. + """ @property @@ -44,7 +45,7 @@ def site_config_dir(self) -> str: @property def user_cache_dir(self) -> str: - """:return: cache directory tied to the user, e.g. e.g. ``/data/user///cache/``""" + """:return: cache directory tied to the user, e.g.,``/data/user///cache/``""" return self._append_app_name_and_version(cast(str, _android_folder()), "cache") @property @@ -119,13 +120,13 @@ def site_runtime_dir(self) -> str: def _android_folder() -> str | None: """:return: base folder for the Android OS or None if it cannot be found""" try: - # First try to get path to android app via pyjnius - from jnius import autoclass + # First try to get a path to android app via pyjnius + from jnius import autoclass # noqa: PLC0415 context = autoclass("android.content.Context") result: str | None = context.getFilesDir().getParentFile().getAbsolutePath() except Exception: # noqa: BLE001 - # if fails find an android folder looking path on the sys.path + # if fails find an android folder looking a path on the sys.path pattern = re.compile(r"/data/(data|user/\d+)/(.+)/files") for path in sys.path: if pattern.match(path): @@ -141,7 +142,7 @@ def _android_documents_folder() -> str: """:return: documents folder for the Android OS""" # Get directories with pyjnius try: - from jnius import autoclass + from jnius import autoclass # noqa: PLC0415 context = autoclass("android.content.Context") environment = autoclass("android.os.Environment") @@ -157,7 +158,7 @@ def _android_downloads_folder() -> str: """:return: downloads folder for the Android OS""" # Get directories with pyjnius try: - from jnius import autoclass + from jnius import autoclass # noqa: PLC0415 context = autoclass("android.content.Context") environment = autoclass("android.os.Environment") @@ -173,7 +174,7 @@ def _android_pictures_folder() -> str: """:return: pictures folder for the Android OS""" # Get directories with pyjnius try: - from jnius import autoclass + from jnius import autoclass # noqa: PLC0415 context = autoclass("android.content.Context") environment = autoclass("android.os.Environment") @@ -189,7 +190,7 @@ def _android_videos_folder() -> str: """:return: videos folder for the Android OS""" # Get directories with pyjnius try: - from jnius import autoclass + from jnius import autoclass # noqa: PLC0415 context = autoclass("android.content.Context") environment = autoclass("android.os.Environment") @@ -205,7 +206,7 @@ def _android_music_folder() -> str: """:return: music folder for the Android OS""" # Get directories with pyjnius try: - from jnius import autoclass + from jnius import autoclass # noqa: PLC0415 context = autoclass("android.content.Context") environment = autoclass("android.os.Environment") diff --git a/src/pip/_vendor/platformdirs/api.py b/src/pip/_vendor/platformdirs/api.py index 031a38a3d36..c50caa648a6 100644 --- a/src/pip/_vendor/platformdirs/api.py +++ b/src/pip/_vendor/platformdirs/api.py @@ -11,10 +11,10 @@ from typing import Iterator, Literal -class PlatformDirsABC(ABC): +class PlatformDirsABC(ABC): # noqa: PLR0904 """Abstract base class for platform directories.""" - def __init__( # noqa: PLR0913 + def __init__( # noqa: PLR0913, PLR0917 self, appname: str | None = None, appauthor: str | None | Literal[False] = None, @@ -34,34 +34,47 @@ def __init__( # noqa: PLR0913 :param multipath: See `multipath`. :param opinion: See `opinion`. :param ensure_exists: See `ensure_exists`. + """ self.appname = appname #: The name of application. self.appauthor = appauthor """ - The name of the app author or distributing body for this application. Typically, it is the owning company name. - Defaults to `appname`. You may pass ``False`` to disable it. + The name of the app author or distributing body for this application. + + Typically, it is the owning company name. Defaults to `appname`. You may pass ``False`` to disable it. + """ self.version = version """ - An optional version path element to append to the path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this would typically be ``.``. + An optional version path element to append to the path. + + You might want to use this if you want multiple versions of your app to be able to run independently. If used, + this would typically be ``.``. + """ self.roaming = roaming """ - Whether to use the roaming appdata directory on Windows. That means that for users on a Windows network setup - for roaming profiles, this user data will be synced on login (see - `here `_). + Whether to use the roaming appdata directory on Windows. + + That means that for users on a Windows network setup for roaming profiles, this user data will be synced on + login (see + `here `_). + """ self.multipath = multipath """ An optional parameter which indicates that the entire list of data dirs should be returned. + By default, the first item would only be returned. + """ self.opinion = opinion #: A flag to indicating to use opinionated values. self.ensure_exists = ensure_exists """ Optionally create the directory (and any missing parents) upon access if it does not exist. + By default, no directories are created. + """ def _append_app_name_and_version(self, *base: str) -> str: @@ -200,7 +213,7 @@ def user_log_path(self) -> Path: @property def user_documents_path(self) -> Path: - """:return: documents path tied to the user""" + """:return: documents a path tied to the user""" return Path(self.user_documents_dir) @property diff --git a/src/pip/_vendor/platformdirs/macos.py b/src/pip/_vendor/platformdirs/macos.py index b7b48808ca7..eb1ba5df1da 100644 --- a/src/pip/_vendor/platformdirs/macos.py +++ b/src/pip/_vendor/platformdirs/macos.py @@ -10,11 +10,14 @@ class MacOS(PlatformDirsABC): """ - Platform directories for the macOS operating system. Follows the guidance from `Apple documentation - `_. + Platform directories for the macOS operating system. + + Follows the guidance from + `Apple documentation `_. Makes use of the `appname `, `version `, `ensure_exists `. + """ @property @@ -28,7 +31,7 @@ def site_data_dir(self) -> str: :return: data directory shared by users, e.g. ``/Library/Application Support/$appname/$version``. If we're using a Python binary managed by `Homebrew `_, the directory will be under the Homebrew prefix, e.g. ``/opt/homebrew/share/$appname/$version``. - If `multipath ` is enabled and we're in Homebrew, + If `multipath ` is enabled, and we're in Homebrew, the response is a multi-path string separated by ":", e.g. ``/opt/homebrew/share/$appname/$version:/Library/Application Support/$appname/$version`` """ @@ -60,7 +63,7 @@ def site_cache_dir(self) -> str: :return: cache directory shared by users, e.g. ``/Library/Caches/$appname/$version``. If we're using a Python binary managed by `Homebrew `_, the directory will be under the Homebrew prefix, e.g. ``/opt/homebrew/var/cache/$appname/$version``. - If `multipath ` is enabled and we're in Homebrew, + If `multipath ` is enabled, and we're in Homebrew, the response is a multi-path string separated by ":", e.g. ``/opt/homebrew/var/cache/$appname/$version:/Library/Caches/$appname/$version`` """ diff --git a/src/pip/_vendor/platformdirs/unix.py b/src/pip/_vendor/platformdirs/unix.py index ca4728e6079..9500ade614c 100644 --- a/src/pip/_vendor/platformdirs/unix.py +++ b/src/pip/_vendor/platformdirs/unix.py @@ -6,13 +6,13 @@ import sys from configparser import ConfigParser from pathlib import Path -from typing import Iterator +from typing import Iterator, NoReturn from .api import PlatformDirsABC if sys.platform == "win32": - def getuid() -> int: + def getuid() -> NoReturn: msg = "should only be used on Unix" raise RuntimeError(msg) @@ -20,17 +20,17 @@ def getuid() -> int: from os import getuid -class Unix(PlatformDirsABC): +class Unix(PlatformDirsABC): # noqa: PLR0904 """ - On Unix/Linux, we follow the - `XDG Basedir Spec `_. The spec allows - overriding directories with environment variables. The examples show are the default values, alongside the name of - the environment variable that overrides them. Makes use of the - `appname `, - `version `, - `multipath `, - `opinion `, - `ensure_exists `. + On Unix/Linux, we follow the `XDG Basedir Spec `_. + + The spec allows overriding directories with environment variables. The examples shown are the default values, + alongside the name of the environment variable that overrides them. Makes use of the `appname + `, `version `, `multipath + `, `opinion `, `ensure_exists + `. + """ @property @@ -205,17 +205,17 @@ def site_runtime_dir(self) -> str: @property def site_data_path(self) -> Path: - """:return: data path shared by users. Only return first item, even if ``multipath`` is set to ``True``""" + """:return: data path shared by users. Only return the first item, even if ``multipath`` is set to ``True``""" return self._first_item_as_path_if_multipath(self.site_data_dir) @property def site_config_path(self) -> Path: - """:return: config path shared by the users. Only return first item, even if ``multipath`` is set to ``True``""" + """:return: config path shared by the users, returns the first item, even if ``multipath`` is set to ``True``""" return self._first_item_as_path_if_multipath(self.site_config_dir) @property def site_cache_path(self) -> Path: - """:return: cache path shared by users. Only return first item, even if ``multipath`` is set to ``True``""" + """:return: cache path shared by users. Only return the first item, even if ``multipath`` is set to ``True``""" return self._first_item_as_path_if_multipath(self.site_cache_dir) def _first_item_as_path_if_multipath(self, directory: str) -> Path: @@ -246,7 +246,12 @@ def _get_user_media_dir(env_var: str, fallback_tilde_path: str) -> str: def _get_user_dirs_folder(key: str) -> str | None: - """Return directory from user-dirs.dirs config file. See https://freedesktop.org/wiki/Software/xdg-user-dirs/.""" + """ + Return directory from user-dirs.dirs config file. + + See https://freedesktop.org/wiki/Software/xdg-user-dirs/. + + """ user_dirs_config_path = Path(Unix().user_config_dir) / "user-dirs.dirs" if user_dirs_config_path.exists(): parser = ConfigParser() diff --git a/src/pip/_vendor/platformdirs/version.py b/src/pip/_vendor/platformdirs/version.py index cc1e34568ad..c418cd0c9af 100644 --- a/src/pip/_vendor/platformdirs/version.py +++ b/src/pip/_vendor/platformdirs/version.py @@ -12,5 +12,5 @@ __version_tuple__: VERSION_TUPLE version_tuple: VERSION_TUPLE -__version__ = version = '4.2.0' -__version_tuple__ = version_tuple = (4, 2, 0) +__version__ = version = '4.2.1' +__version_tuple__ = version_tuple = (4, 2, 1) diff --git a/src/pip/_vendor/platformdirs/windows.py b/src/pip/_vendor/platformdirs/windows.py index c62d0c8d2ba..d7bc96091a2 100644 --- a/src/pip/_vendor/platformdirs/windows.py +++ b/src/pip/_vendor/platformdirs/windows.py @@ -2,7 +2,6 @@ from __future__ import annotations -import ctypes import os import sys from functools import lru_cache @@ -16,15 +15,13 @@ class Windows(PlatformDirsABC): """ - `MSDN on where to store app data files - `_. - Makes use of the - `appname `, - `appauthor `, - `version `, - `roaming `, - `opinion `, - `ensure_exists `. + `MSDN on where to store app data files `_. + + Makes use of the `appname `, `appauthor + `, `version `, `roaming + `, `opinion `, `ensure_exists + `. + """ @property @@ -165,7 +162,7 @@ def get_win_folder_from_env_vars(csidl_name: str) -> str: def get_win_folder_if_csidl_name_not_env_var(csidl_name: str) -> str | None: - """Get folder for a CSIDL name that does not exist as an environment variable.""" + """Get a folder for a CSIDL name that does not exist as an environment variable.""" if csidl_name == "CSIDL_PERSONAL": return os.path.join(os.path.normpath(os.environ["USERPROFILE"]), "Documents") # noqa: PTH118 @@ -189,6 +186,7 @@ def get_win_folder_from_registry(csidl_name: str) -> str: This is a fallback technique at best. I'm not sure if using the registry for these guarantees us the correct answer for all CSIDL_* names. + """ shell_folder_name = { "CSIDL_APPDATA": "AppData", @@ -205,7 +203,7 @@ def get_win_folder_from_registry(csidl_name: str) -> str: raise ValueError(msg) if sys.platform != "win32": # only needed for mypy type checker to know that this code runs only on Windows raise NotImplementedError - import winreg + import winreg # noqa: PLC0415 key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders") directory, _ = winreg.QueryValueEx(key, shell_folder_name) @@ -218,6 +216,8 @@ def get_win_folder_via_ctypes(csidl_name: str) -> str: # Use 'CSIDL_PROFILE' (40) and append the default folder 'Downloads' instead. # https://learn.microsoft.com/en-us/windows/win32/shell/knownfolderid + import ctypes # noqa: PLC0415 + csidl_const = { "CSIDL_APPDATA": 26, "CSIDL_COMMON_APPDATA": 35, @@ -250,10 +250,15 @@ def get_win_folder_via_ctypes(csidl_name: str) -> str: def _pick_get_win_folder() -> Callable[[str], str]: - if hasattr(ctypes, "windll"): - return get_win_folder_via_ctypes try: - import winreg # noqa: F401 + import ctypes # noqa: PLC0415 + except ImportError: + pass + else: + if hasattr(ctypes, "windll"): + return get_win_folder_via_ctypes + try: + import winreg # noqa: PLC0415, F401 except ImportError: return get_win_folder_from_env_vars else: diff --git a/src/pip/_vendor/vendor.txt b/src/pip/_vendor/vendor.txt index bb962664d08..fc60f5cae04 100644 --- a/src/pip/_vendor/vendor.txt +++ b/src/pip/_vendor/vendor.txt @@ -4,7 +4,7 @@ distlib==0.3.8 distro==1.9.0 msgpack==1.0.8 packaging==21.3 -platformdirs==4.2.0 +platformdirs==4.2.1 pyparsing==3.1.0 pyproject-hooks==1.0.0 requests==2.31.0 From f404164d7918b6939e5dceff1b4c41c3e8e1591b Mon Sep 17 00:00:00 2001 From: Pradyun Gedam Date: Fri, 3 May 2024 20:59:46 +0100 Subject: [PATCH 059/113] Upgrade pyparsing to 3.1.2 --- news/pyparsing.vendor.rst | 1 + src/pip/_vendor/pyparsing/__init__.py | 7 +- src/pip/_vendor/pyparsing/actions.py | 21 +- src/pip/_vendor/pyparsing/common.py | 9 +- src/pip/_vendor/pyparsing/core.py | 964 +++++++++--------- src/pip/_vendor/pyparsing/diagram/__init__.py | 2 +- src/pip/_vendor/pyparsing/exceptions.py | 81 +- src/pip/_vendor/pyparsing/helpers.py | 86 +- src/pip/_vendor/pyparsing/results.py | 211 ++-- src/pip/_vendor/pyparsing/testing.py | 118 ++- src/pip/_vendor/pyparsing/unicode.py | 13 +- src/pip/_vendor/pyparsing/util.py | 13 +- src/pip/_vendor/vendor.txt | 2 +- 13 files changed, 740 insertions(+), 788 deletions(-) create mode 100644 news/pyparsing.vendor.rst diff --git a/news/pyparsing.vendor.rst b/news/pyparsing.vendor.rst new file mode 100644 index 00000000000..d6aab47079f --- /dev/null +++ b/news/pyparsing.vendor.rst @@ -0,0 +1 @@ +Upgrade pyparsing to 3.1.2 diff --git a/src/pip/_vendor/pyparsing/__init__.py b/src/pip/_vendor/pyparsing/__init__.py index 88bc10ac18a..83937ebf1f1 100644 --- a/src/pip/_vendor/pyparsing/__init__.py +++ b/src/pip/_vendor/pyparsing/__init__.py @@ -120,8 +120,8 @@ def __repr__(self): return f"{__name__}.{type(self).__name__}({', '.join('{}={!r}'.format(*nv) for nv in zip(self._fields, self))})" -__version_info__ = version_info(3, 1, 0, "final", 1) -__version_time__ = "18 Jun 2023 14:05 UTC" +__version_info__ = version_info(3, 1, 2, "final", 1) +__version_time__ = "06 Mar 2024 07:08 UTC" __version__ = __version_info__.__version__ __versionTime__ = __version_time__ __author__ = "Paul McGuire " @@ -319,4 +319,7 @@ def __repr__(self): "unicodeString", "withAttribute", "withClass", + "common", + "unicode", + "testing", ] diff --git a/src/pip/_vendor/pyparsing/actions.py b/src/pip/_vendor/pyparsing/actions.py index ca6e4c6afb4..ce51b3957ca 100644 --- a/src/pip/_vendor/pyparsing/actions.py +++ b/src/pip/_vendor/pyparsing/actions.py @@ -111,7 +111,6 @@ def with_attribute(*args, **attr_dict):
1,3 2,3 1,1
this has no type
- ''' div,div_end = make_html_tags("div") @@ -199,19 +198,9 @@ def with_class(classname, namespace=""): # pre-PEP8 compatibility symbols # fmt: off -@replaced_by_pep8(replace_with) -def replaceWith(): ... - -@replaced_by_pep8(remove_quotes) -def removeQuotes(): ... - -@replaced_by_pep8(with_attribute) -def withAttribute(): ... - -@replaced_by_pep8(with_class) -def withClass(): ... - -@replaced_by_pep8(match_only_at_col) -def matchOnlyAtCol(): ... - +replaceWith = replaced_by_pep8("replaceWith", replace_with) +removeQuotes = replaced_by_pep8("removeQuotes", remove_quotes) +withAttribute = replaced_by_pep8("withAttribute", with_attribute) +withClass = replaced_by_pep8("withClass", with_class) +matchOnlyAtCol = replaced_by_pep8("matchOnlyAtCol", match_only_at_col) # fmt: on diff --git a/src/pip/_vendor/pyparsing/common.py b/src/pip/_vendor/pyparsing/common.py index 7a666b276df..74faa46085a 100644 --- a/src/pip/_vendor/pyparsing/common.py +++ b/src/pip/_vendor/pyparsing/common.py @@ -206,7 +206,7 @@ class pyparsing_common: scientific notation and returns a float""" # streamlining this expression makes the docs nicer-looking - number = (sci_real | real | signed_integer).setName("number").streamline() + number = (sci_real | real | signed_integer).set_name("number").streamline() """any numeric expression, returns the corresponding Python type""" fnumber = ( @@ -216,6 +216,13 @@ class pyparsing_common: ) """any int or real number, returned as float""" + ieee_float = ( + Regex(r"(?i)[+-]?((\d+\.?\d*(e[+-]?\d+)?)|nan|inf(inity)?)") + .set_name("ieee_float") + .set_parse_action(convert_to_float) + ) + """any floating-point literal (int, real number, infinity, or NaN), returned as float""" + identifier = Word(identchars, identbodychars).set_name("identifier") """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" diff --git a/src/pip/_vendor/pyparsing/core.py b/src/pip/_vendor/pyparsing/core.py index 8d5a856ecd6..fc403862354 100644 --- a/src/pip/_vendor/pyparsing/core.py +++ b/src/pip/_vendor/pyparsing/core.py @@ -571,6 +571,7 @@ def set_results_name( Example:: + integer = Word(nums) date_str = (integer.set_results_name("year") + '/' + integer.set_results_name("month") + '/' + integer.set_results_name("day")) @@ -610,9 +611,8 @@ def breaker(instring, loc, doActions=True, callPreParse=True): breaker._originalParseMethod = _parseMethod # type: ignore [attr-defined] self._parse = breaker # type: ignore [assignment] - else: - if hasattr(self._parse, "_originalParseMethod"): - self._parse = self._parse._originalParseMethod # type: ignore [attr-defined, assignment] + elif hasattr(self._parse, "_originalParseMethod"): + self._parse = self._parse._originalParseMethod # type: ignore [attr-defined, assignment] return self def set_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement": @@ -692,13 +692,15 @@ def is_valid_date(instring, loc, toks): """ if list(fns) == [None]: self.parseAction = [] - else: - if not all(callable(fn) for fn in fns): - raise TypeError("parse actions must be callable") - self.parseAction = [_trim_arity(fn) for fn in fns] - self.callDuringTry = kwargs.get( - "call_during_try", kwargs.get("callDuringTry", False) - ) + return self + + if not all(callable(fn) for fn in fns): + raise TypeError("parse actions must be callable") + self.parseAction = [_trim_arity(fn) for fn in fns] + self.callDuringTry = kwargs.get( + "call_during_try", kwargs.get("callDuringTry", False) + ) + return self def add_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement": @@ -771,6 +773,7 @@ def _skipIgnorables(self, instring: str, loc: int) -> int: return loc exprsFound = True ignore_expr_fns = [e._parse for e in self.ignoreExprs] + last_loc = loc while exprsFound: exprsFound = False for ignore_fn in ignore_expr_fns: @@ -780,6 +783,10 @@ def _skipIgnorables(self, instring: str, loc: int) -> int: exprsFound = True except ParseException: pass + # check if all ignore exprs matched but didn't actually advance the parse location + if loc == last_loc: + break + last_loc = loc return loc def preParse(self, instring: str, loc: int) -> int: @@ -939,11 +946,9 @@ class to help type checking not_in_cache: bool - def get(self, *args): - ... + def get(self, *args): ... - def set(self, *args): - ... + def set(self, *args): ... # argument cache for optimizing repeated calls when backtracking through recursive expressions packrat_cache = ( @@ -1075,11 +1080,13 @@ def enable_left_recursion( elif cache_size_limit > 0: ParserElement.recursion_memos = _LRUMemo(capacity=cache_size_limit) # type: ignore[assignment] else: - raise NotImplementedError("Memo size of %s" % cache_size_limit) + raise NotImplementedError(f"Memo size of {cache_size_limit}") ParserElement._left_recursion_enabled = True @staticmethod - def enable_packrat(cache_size_limit: int = 128, *, force: bool = False) -> None: + def enable_packrat( + cache_size_limit: Union[int, None] = 128, *, force: bool = False + ) -> None: """ Enables "packrat" parsing, which adds memoizing to the parsing logic. Repeated parse attempts at the same string location (which happens @@ -1114,13 +1121,16 @@ def enable_packrat(cache_size_limit: int = 128, *, force: bool = False) -> None: ParserElement.disable_memoization() elif ParserElement._left_recursion_enabled: raise RuntimeError("Packrat and Bounded Recursion are not compatible") - if not ParserElement._packratEnabled: - ParserElement._packratEnabled = True - if cache_size_limit is None: - ParserElement.packrat_cache = _UnboundedCache() - else: - ParserElement.packrat_cache = _FifoCache(cache_size_limit) # type: ignore[assignment] - ParserElement._parse = ParserElement._parseCache + + if ParserElement._packratEnabled: + return + + ParserElement._packratEnabled = True + if cache_size_limit is None: + ParserElement.packrat_cache = _UnboundedCache() + else: + ParserElement.packrat_cache = _FifoCache(cache_size_limit) # type: ignore[assignment] + ParserElement._parse = ParserElement._parseCache def parse_string( self, instring: str, parse_all: bool = False, *, parseAll: bool = False @@ -1278,9 +1288,9 @@ def scan_string( except ParseBaseException as exc: if ParserElement.verbose_stacktrace: raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc.with_traceback(None) + + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc.with_traceback(None) def transform_string(self, instring: str, *, debug: bool = False) -> str: """ @@ -1310,23 +1320,27 @@ def transform_string(self, instring: str, *, debug: bool = False) -> str: try: for t, s, e in self.scan_string(instring, debug=debug): out.append(instring[lastE:s]) - if t: - if isinstance(t, ParseResults): - out += t.as_list() - elif isinstance(t, Iterable) and not isinstance(t, str_type): - out.extend(t) - else: - out.append(t) lastE = e + + if not t: + continue + + if isinstance(t, ParseResults): + out += t.as_list() + elif isinstance(t, Iterable) and not isinstance(t, str_type): + out.extend(t) + else: + out.append(t) + out.append(instring[lastE:]) out = [o for o in out if o] return "".join([str(s) for s in _flatten(out)]) except ParseBaseException as exc: if ParserElement.verbose_stacktrace: raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc.with_traceback(None) + + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc.with_traceback(None) def search_string( self, @@ -1364,9 +1378,9 @@ def search_string( except ParseBaseException as exc: if ParserElement.verbose_stacktrace: raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc.with_traceback(None) + + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc.with_traceback(None) def split( self, @@ -1495,9 +1509,12 @@ def __mul__(self, other) -> "ParserElement": elif isinstance(other, tuple) and other[:1] == (Ellipsis,): other = ((0,) + other[1:] + (None,))[:2] + if not isinstance(other, (int, tuple)): + return NotImplemented + if isinstance(other, int): minElements, optElements = other, 0 - elif isinstance(other, tuple): + else: other = tuple(o if o is not Ellipsis else None for o in other) other = (other + (None, None))[:2] if other[0] is None: @@ -1514,8 +1531,6 @@ def __mul__(self, other) -> "ParserElement": optElements -= minElements else: return NotImplemented - else: - return NotImplemented if minElements < 0: raise ValueError("cannot multiply ParserElement by negative value") @@ -1704,8 +1719,8 @@ def __call__(self, name: typing.Optional[str] = None) -> "ParserElement": """ if name is not None: return self._setResultsName(name) - else: - return self.copy() + + return self.copy() def suppress(self) -> "ParserElement": """ @@ -1763,7 +1778,7 @@ def ignore(self, other: "ParserElement") -> "ParserElement": Example:: - patt = Word(alphas)[1, ...] + patt = Word(alphas)[...] patt.parse_string('ablaj /* comment */ lskjd') # -> ['ablaj'] @@ -1771,8 +1786,6 @@ def ignore(self, other: "ParserElement") -> "ParserElement": patt.parse_string('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd'] """ - import typing - if isinstance(other, str_type): other = Suppress(other) @@ -1880,11 +1893,14 @@ def set_name(self, name: str) -> "ParserElement": Example:: - Word(nums).parse_string("ABC") # -> Exception: Expected W:(0-9) (at char 0), (line:1, col:1) - Word(nums).set_name("integer").parse_string("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) + integer = Word(nums) + integer.parse_string("ABC") # -> Exception: Expected W:(0-9) (at char 0), (line:1, col:1) + + integer.set_name("integer") + integer.parse_string("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) """ self.customName = name - self.errmsg = "Expected " + self.name + self.errmsg = f"Expected {self.name}" if __diag__.enable_debug_on_named_expressions: self.set_debug() return self @@ -1950,9 +1966,9 @@ def parse_file( except ParseBaseException as exc: if ParserElement.verbose_stacktrace: raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc.with_traceback(None) + + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc.with_traceback(None) def __eq__(self, other): if self is other: @@ -2130,6 +2146,7 @@ def run_tests( success = True NL = Literal(r"\n").add_parse_action(replace_with("\n")).ignore(quoted_string) BOM = "\ufeff" + nlstr = "\n" for t in tests: if comment_specified and comment.matches(t, False) or comments and not t: comments.append( @@ -2139,7 +2156,7 @@ def run_tests( if not t: continue out = [ - "\n" + "\n".join(comments) if comments else "", + f"{nlstr}{nlstr.join(comments) if comments else ''}", pyparsing_test.with_line_numbers(t) if with_line_numbers else t, ] comments = [] @@ -2148,9 +2165,9 @@ def run_tests( t = NL.transform_string(t.lstrip(BOM)) result = self.parse_string(t, parse_all=parseAll) except ParseBaseException as pe: - fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else "" + fatal = "(FATAL) " if isinstance(pe, ParseFatalException) else "" out.append(pe.explain()) - out.append("FAIL: " + str(pe)) + out.append(f"FAIL: {fatal}{pe}") if ParserElement.verbose_stacktrace: out.extend(traceback.format_tb(pe.__traceback__)) success = success and failureTests @@ -2237,91 +2254,42 @@ def create_diagram( show_groups=show_groups, diagram_kwargs=kwargs, ) - if isinstance(output_html, (str, Path)): - with open(output_html, "w", encoding="utf-8") as diag_file: - diag_file.write(railroad_to_html(railroad, embed=embed, **kwargs)) - else: + if not isinstance(output_html, (str, Path)): # we were passed a file-like object, just write to it output_html.write(railroad_to_html(railroad, embed=embed, **kwargs)) + return + + with open(output_html, "w", encoding="utf-8") as diag_file: + diag_file.write(railroad_to_html(railroad, embed=embed, **kwargs)) # Compatibility synonyms # fmt: off - @staticmethod - @replaced_by_pep8(inline_literals_using) - def inlineLiteralsUsing(): ... - - @staticmethod - @replaced_by_pep8(set_default_whitespace_chars) - def setDefaultWhitespaceChars(): ... - - @replaced_by_pep8(set_results_name) - def setResultsName(self): ... - - @replaced_by_pep8(set_break) - def setBreak(self): ... - - @replaced_by_pep8(set_parse_action) - def setParseAction(self): ... - - @replaced_by_pep8(add_parse_action) - def addParseAction(self): ... - - @replaced_by_pep8(add_condition) - def addCondition(self): ... - - @replaced_by_pep8(set_fail_action) - def setFailAction(self): ... - - @replaced_by_pep8(try_parse) - def tryParse(self): ... - - @staticmethod - @replaced_by_pep8(enable_left_recursion) - def enableLeftRecursion(): ... - - @staticmethod - @replaced_by_pep8(enable_packrat) - def enablePackrat(): ... - - @replaced_by_pep8(parse_string) - def parseString(self): ... - - @replaced_by_pep8(scan_string) - def scanString(self): ... - - @replaced_by_pep8(transform_string) - def transformString(self): ... - - @replaced_by_pep8(search_string) - def searchString(self): ... - - @replaced_by_pep8(ignore_whitespace) - def ignoreWhitespace(self): ... - - @replaced_by_pep8(leave_whitespace) - def leaveWhitespace(self): ... - - @replaced_by_pep8(set_whitespace_chars) - def setWhitespaceChars(self): ... - - @replaced_by_pep8(parse_with_tabs) - def parseWithTabs(self): ... - - @replaced_by_pep8(set_debug_actions) - def setDebugActions(self): ... - - @replaced_by_pep8(set_debug) - def setDebug(self): ... - - @replaced_by_pep8(set_name) - def setName(self): ... - - @replaced_by_pep8(parse_file) - def parseFile(self): ... - - @replaced_by_pep8(run_tests) - def runTests(self): ... - + inlineLiteralsUsing = replaced_by_pep8("inlineLiteralsUsing", inline_literals_using) + setDefaultWhitespaceChars = replaced_by_pep8( + "setDefaultWhitespaceChars", set_default_whitespace_chars + ) + setResultsName = replaced_by_pep8("setResultsName", set_results_name) + setBreak = replaced_by_pep8("setBreak", set_break) + setParseAction = replaced_by_pep8("setParseAction", set_parse_action) + addParseAction = replaced_by_pep8("addParseAction", add_parse_action) + addCondition = replaced_by_pep8("addCondition", add_condition) + setFailAction = replaced_by_pep8("setFailAction", set_fail_action) + tryParse = replaced_by_pep8("tryParse", try_parse) + enableLeftRecursion = replaced_by_pep8("enableLeftRecursion", enable_left_recursion) + enablePackrat = replaced_by_pep8("enablePackrat", enable_packrat) + parseString = replaced_by_pep8("parseString", parse_string) + scanString = replaced_by_pep8("scanString", scan_string) + transformString = replaced_by_pep8("transformString", transform_string) + searchString = replaced_by_pep8("searchString", search_string) + ignoreWhitespace = replaced_by_pep8("ignoreWhitespace", ignore_whitespace) + leaveWhitespace = replaced_by_pep8("leaveWhitespace", leave_whitespace) + setWhitespaceChars = replaced_by_pep8("setWhitespaceChars", set_whitespace_chars) + parseWithTabs = replaced_by_pep8("parseWithTabs", parse_with_tabs) + setDebugActions = replaced_by_pep8("setDebugActions", set_debug_actions) + setDebug = replaced_by_pep8("setDebug", set_debug) + setName = replaced_by_pep8("setName", set_name) + parseFile = replaced_by_pep8("parseFile", parse_file) + runTests = replaced_by_pep8("runTests", run_tests) canParseNext = can_parse_next resetCache = reset_cache defaultName = default_name @@ -2351,7 +2319,7 @@ def must_skip(t): def show_skip(t): if t._skipped.as_list()[-1:] == [""]: t.pop("_skipped") - t["_skipped"] = "missing <" + repr(self.anchor) + ">" + t["_skipped"] = f"missing <{self.anchor!r}>" return ( self.anchor + skipper().add_parse_action(must_skip) @@ -2402,9 +2370,9 @@ class Literal(Token): Example:: - Literal('blah').parse_string('blah') # -> ['blah'] - Literal('blah').parse_string('blahfooblah') # -> ['blah'] - Literal('blah').parse_string('bla') # -> Exception: Expected "blah" + Literal('abc').parse_string('abc') # -> ['abc'] + Literal('abc').parse_string('abcdef') # -> ['abc'] + Literal('abc').parse_string('ab') # -> Exception: Expected "abc" For case-insensitive matching, use :class:`CaselessLiteral`. @@ -2434,7 +2402,7 @@ def __init__(self, match_string: str = "", *, matchString: str = ""): self.match = match_string self.matchLen = len(match_string) self.firstMatchChar = match_string[:1] - self.errmsg = "Expected " + self.name + self.errmsg = f"Expected {self.name}" self.mayReturnEmpty = False self.mayIndexError = False @@ -2548,40 +2516,37 @@ def parseImpl(self, instring, loc, doActions=True): or instring[loc + self.matchLen].upper() not in self.identChars ): return loc + self.matchLen, self.match - else: - # followed by keyword char - errmsg += ", was immediately followed by keyword character" - errloc = loc + self.matchLen - else: - # preceded by keyword char - errmsg += ", keyword was immediately preceded by keyword character" - errloc = loc - 1 - # else no match just raise plain exception - else: - if ( - instring[loc] == self.firstMatchChar - and self.matchLen == 1 - or instring.startswith(self.match, loc) - ): - if loc == 0 or instring[loc - 1] not in self.identChars: - if ( - loc >= len(instring) - self.matchLen - or instring[loc + self.matchLen] not in self.identChars - ): - return loc + self.matchLen, self.match - else: - # followed by keyword char - errmsg += ( - ", keyword was immediately followed by keyword character" - ) - errloc = loc + self.matchLen + # followed by keyword char + errmsg += ", was immediately followed by keyword character" + errloc = loc + self.matchLen else: # preceded by keyword char errmsg += ", keyword was immediately preceded by keyword character" errloc = loc - 1 # else no match just raise plain exception + elif ( + instring[loc] == self.firstMatchChar + and self.matchLen == 1 + or instring.startswith(self.match, loc) + ): + if loc == 0 or instring[loc - 1] not in self.identChars: + if ( + loc >= len(instring) - self.matchLen + or instring[loc + self.matchLen] not in self.identChars + ): + return loc + self.matchLen, self.match + + # followed by keyword char + errmsg += ", keyword was immediately followed by keyword character" + errloc = loc + self.matchLen + else: + # preceded by keyword char + errmsg += ", keyword was immediately preceded by keyword character" + errloc = loc - 1 + # else no match just raise plain exception + raise ParseException(instring, errloc, errmsg, self) @staticmethod @@ -2613,7 +2578,7 @@ def __init__(self, match_string: str = "", *, matchString: str = ""): super().__init__(match_string.upper()) # Preserve the defining literal. self.returnString = match_string - self.errmsg = "Expected " + self.name + self.errmsg = f"Expected {self.name}" def parseImpl(self, instring, loc, doActions=True): if instring[loc : loc + self.matchLen].upper() == self.match: @@ -2788,7 +2753,7 @@ class Word(Token): integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9")) # a word with a leading capital, and zero or more lowercase - capital_word = Word(alphas.upper(), alphas.lower()) + capitalized_word = Word(alphas.upper(), alphas.lower()) # hostnames are alphanumeric, with leading alpha, and '-' hostname = Word(alphas, alphanums + '-') @@ -2865,7 +2830,7 @@ def __init__( self.maxLen = exact self.minLen = exact - self.errmsg = "Expected " + self.name + self.errmsg = f"Expected {self.name}" self.mayIndexError = False self.asKeyword = asKeyword if self.asKeyword: @@ -2879,7 +2844,7 @@ def __init__( re_leading_fragment = f"[{_collapse_string_to_ranges(self.initChars)}]" if self.bodyChars == self.initChars: - if max == 0: + if max == 0 and self.minLen == 1: repeat = "+" elif max == 1: repeat = "" @@ -2895,19 +2860,17 @@ def __init__( repeat = "" else: re_body_fragment = f"[{_collapse_string_to_ranges(self.bodyChars)}]" - if max == 0: + if max == 0 and self.minLen == 1: repeat = "*" elif max == 2: repeat = "?" if min <= 1 else "" else: if min != max: - repeat = f"{{{min - 1 if min > 0 else 0},{max - 1}}}" + repeat = f"{{{min - 1 if min > 0 else ''},{max - 1 if max > 0 else ''}}}" else: - repeat = f"{{{min - 1 if min > 0 else 0}}}" + repeat = f"{{{min - 1 if min > 0 else ''}}}" - self.reString = ( - f"{re_leading_fragment}" f"{re_body_fragment}" f"{repeat}" - ) + self.reString = f"{re_leading_fragment}{re_body_fragment}{repeat}" if self.asKeyword: self.reString = rf"\b{self.reString}\b" @@ -2924,10 +2887,11 @@ def _generateDefaultName(self) -> str: def charsAsStr(s): max_repr_len = 16 s = _collapse_string_to_ranges(s, re_escape=False) + if len(s) > max_repr_len: return s[: max_repr_len - 3] + "..." - else: - return s + + return s if self.initChars != self.bodyChars: base = f"W:({charsAsStr(self.initChars)}, {charsAsStr(self.bodyChars)})" @@ -2965,14 +2929,11 @@ def parseImpl(self, instring, loc, doActions=True): throwException = True elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars: throwException = True - elif self.asKeyword: - if ( - start > 0 - and instring[start - 1] in bodychars - or loc < instrlen - and instring[loc] in bodychars - ): - throwException = True + elif self.asKeyword and ( + (start > 0 and instring[start - 1] in bodychars) + or (loc < instrlen and instring[loc] in bodychars) + ): + throwException = True if throwException: raise ParseException(instring, loc, self.errmsg, self) @@ -3072,7 +3033,7 @@ def __init__( "Regex may only be constructed with a string or a compiled RE object" ) - self.errmsg = "Expected " + self.name + self.errmsg = f"Expected {self.name}" self.mayIndexError = False self.asGroupList = asGroupList self.asMatch = asMatch @@ -3085,11 +3046,11 @@ def __init__( def re(self): if self._re: return self._re - else: - try: - return re.compile(self.pattern, self.flags) - except re.error: - raise ValueError(f"invalid pattern ({self.pattern!r}) passed to Regex") + + try: + return re.compile(self.pattern, self.flags) + except re.error: + raise ValueError(f"invalid pattern ({self.pattern!r}) passed to Regex") @cached_property def re_match(self): @@ -3110,9 +3071,10 @@ def parseImpl(self, instring, loc, doActions=True): loc = result.end() ret = ParseResults(result.group()) d = result.groupdict() - if d: - for k, v in d.items(): - ret[k] = v + + for k, v in d.items(): + ret[k] = v + return loc, ret def parseImplAsGroupList(self, instring, loc, doActions=True): @@ -3204,6 +3166,7 @@ class QuotedString(Token): [['This is the "quote"']] [['This is the quote with "embedded" quotes']] """ + ws_map = dict(((r"\t", "\t"), (r"\n", "\n"), (r"\f", "\f"), (r"\r", "\r"))) def __init__( @@ -3224,148 +3187,164 @@ def __init__( convertWhitespaceEscapes: bool = True, ): super().__init__() - escChar = escChar or esc_char - escQuote = escQuote or esc_quote - unquoteResults = unquoteResults and unquote_results - endQuoteChar = endQuoteChar or end_quote_char - convertWhitespaceEscapes = ( + esc_char = escChar or esc_char + esc_quote = escQuote or esc_quote + unquote_results = unquoteResults and unquote_results + end_quote_char = endQuoteChar or end_quote_char + convert_whitespace_escapes = ( convertWhitespaceEscapes and convert_whitespace_escapes ) quote_char = quoteChar or quote_char - # remove white space from quote chars - wont work anyway + # remove white space from quote chars quote_char = quote_char.strip() if not quote_char: raise ValueError("quote_char cannot be the empty string") - if endQuoteChar is None: - endQuoteChar = quote_char + if end_quote_char is None: + end_quote_char = quote_char else: - endQuoteChar = endQuoteChar.strip() - if not endQuoteChar: + end_quote_char = end_quote_char.strip() + if not end_quote_char: raise ValueError("end_quote_char cannot be the empty string") - self.quoteChar: str = quote_char - self.quoteCharLen: int = len(quote_char) - self.firstQuoteChar: str = quote_char[0] - self.endQuoteChar: str = endQuoteChar - self.endQuoteCharLen: int = len(endQuoteChar) - self.escChar: str = escChar or "" - self.escQuote: str = escQuote or "" - self.unquoteResults: bool = unquoteResults - self.convertWhitespaceEscapes: bool = convertWhitespaceEscapes + self.quote_char: str = quote_char + self.quote_char_len: int = len(quote_char) + self.first_quote_char: str = quote_char[0] + self.end_quote_char: str = end_quote_char + self.end_quote_char_len: int = len(end_quote_char) + self.esc_char: str = esc_char or "" + self.has_esc_char: bool = esc_char is not None + self.esc_quote: str = esc_quote or "" + self.unquote_results: bool = unquote_results + self.convert_whitespace_escapes: bool = convert_whitespace_escapes self.multiline = multiline + self.re_flags = re.RegexFlag(0) - sep = "" - inner_pattern = "" + # fmt: off + # build up re pattern for the content between the quote delimiters + inner_pattern = [] - if escQuote: - inner_pattern += rf"{sep}(?:{re.escape(escQuote)})" - sep = "|" + if esc_quote: + inner_pattern.append(rf"(?:{re.escape(esc_quote)})") - if escChar: - inner_pattern += rf"{sep}(?:{re.escape(escChar)}.)" - sep = "|" - self.escCharReplacePattern = re.escape(escChar) + "(.)" + if esc_char: + inner_pattern.append(rf"(?:{re.escape(esc_char)}.)") - if len(self.endQuoteChar) > 1: - inner_pattern += ( - f"{sep}(?:" + if len(self.end_quote_char) > 1: + inner_pattern.append( + "(?:" + "|".join( - f"(?:{re.escape(self.endQuoteChar[:i])}(?!{re.escape(self.endQuoteChar[i:])}))" - for i in range(len(self.endQuoteChar) - 1, 0, -1) + f"(?:{re.escape(self.end_quote_char[:i])}(?!{re.escape(self.end_quote_char[i:])}))" + for i in range(len(self.end_quote_char) - 1, 0, -1) ) + ")" ) - sep = "|" - - self.flags = re.RegexFlag(0) - if multiline: - self.flags = re.MULTILINE | re.DOTALL - inner_pattern += ( - rf"{sep}(?:[^{_escape_regex_range_chars(self.endQuoteChar[0])}" - rf"{(_escape_regex_range_chars(escChar) if escChar is not None else '')}])" + if self.multiline: + self.re_flags |= re.MULTILINE | re.DOTALL + inner_pattern.append( + rf"(?:[^{_escape_regex_range_chars(self.end_quote_char[0])}" + rf"{(_escape_regex_range_chars(esc_char) if self.has_esc_char else '')}])" ) else: - inner_pattern += ( - rf"{sep}(?:[^{_escape_regex_range_chars(self.endQuoteChar[0])}\n\r" - rf"{(_escape_regex_range_chars(escChar) if escChar is not None else '')}])" + inner_pattern.append( + rf"(?:[^{_escape_regex_range_chars(self.end_quote_char[0])}\n\r" + rf"{(_escape_regex_range_chars(esc_char) if self.has_esc_char else '')}])" ) self.pattern = "".join( [ - re.escape(self.quoteChar), + re.escape(self.quote_char), "(?:", - inner_pattern, + '|'.join(inner_pattern), ")*", - re.escape(self.endQuoteChar), + re.escape(self.end_quote_char), ] ) - if self.unquoteResults: - if self.convertWhitespaceEscapes: + if self.unquote_results: + if self.convert_whitespace_escapes: self.unquote_scan_re = re.compile( - rf"({'|'.join(re.escape(k) for k in self.ws_map)})|({re.escape(self.escChar)}.)|(\n|.)", - flags=self.flags, + rf"({'|'.join(re.escape(k) for k in self.ws_map)})" + rf"|({re.escape(self.esc_char)}.)" + rf"|(\n|.)", + flags=self.re_flags, ) else: self.unquote_scan_re = re.compile( - rf"({re.escape(self.escChar)}.)|(\n|.)", flags=self.flags + rf"({re.escape(self.esc_char)}.)" + rf"|(\n|.)", + flags=self.re_flags ) + # fmt: on try: - self.re = re.compile(self.pattern, self.flags) + self.re = re.compile(self.pattern, self.re_flags) self.reString = self.pattern self.re_match = self.re.match except re.error: raise ValueError(f"invalid pattern {self.pattern!r} passed to Regex") - self.errmsg = "Expected " + self.name + self.errmsg = f"Expected {self.name}" self.mayIndexError = False self.mayReturnEmpty = True def _generateDefaultName(self) -> str: - if self.quoteChar == self.endQuoteChar and isinstance(self.quoteChar, str_type): - return f"string enclosed in {self.quoteChar!r}" + if self.quote_char == self.end_quote_char and isinstance( + self.quote_char, str_type + ): + return f"string enclosed in {self.quote_char!r}" - return f"quoted string, starting with {self.quoteChar} ending with {self.endQuoteChar}" + return f"quoted string, starting with {self.quote_char} ending with {self.end_quote_char}" def parseImpl(self, instring, loc, doActions=True): + # check first character of opening quote to see if that is a match + # before doing the more complicated regex match result = ( - instring[loc] == self.firstQuoteChar + instring[loc] == self.first_quote_char and self.re_match(instring, loc) or None ) if not result: raise ParseException(instring, loc, self.errmsg, self) + # get ending loc and matched string from regex matching result loc = result.end() ret = result.group() - if self.unquoteResults: + if self.unquote_results: # strip off quotes - ret = ret[self.quoteCharLen : -self.endQuoteCharLen] + ret = ret[self.quote_char_len : -self.end_quote_char_len] if isinstance(ret, str_type): - if self.convertWhitespaceEscapes: + # fmt: off + if self.convert_whitespace_escapes: + # as we iterate over matches in the input string, + # collect from whichever match group of the unquote_scan_re + # regex matches (only 1 group will match at any given time) ret = "".join( - self.ws_map[match.group(1)] - if match.group(1) - else match.group(2)[-1] - if match.group(2) + # match group 1 matches \t, \n, etc. + self.ws_map[match.group(1)] if match.group(1) + # match group 2 matches escaped characters + else match.group(2)[-1] if match.group(2) + # match group 3 matches any character else match.group(3) for match in self.unquote_scan_re.finditer(ret) ) else: ret = "".join( - match.group(1)[-1] if match.group(1) else match.group(2) + # match group 1 matches escaped characters + match.group(1)[-1] if match.group(1) + # match group 2 matches any character + else match.group(2) for match in self.unquote_scan_re.finditer(ret) ) + # fmt: on # replace escaped quotes - if self.escQuote: - ret = ret.replace(self.escQuote, self.endQuoteChar) + if self.esc_quote: + ret = ret.replace(self.esc_quote, self.end_quote_char) return loc, ret @@ -3407,8 +3386,8 @@ def __init__( if min < 1: raise ValueError( - "cannot specify a minimum length < 1; use " - "Opt(CharsNotIn()) if zero-length char group is permitted" + "cannot specify a minimum length < 1; use" + " Opt(CharsNotIn()) if zero-length char group is permitted" ) self.minLen = min @@ -3422,7 +3401,7 @@ def __init__( self.maxLen = exact self.minLen = exact - self.errmsg = "Expected " + self.name + self.errmsg = f"Expected {self.name}" self.mayReturnEmpty = self.minLen == 0 self.mayIndexError = False @@ -3495,7 +3474,7 @@ def __init__(self, ws: str = " \t\r\n", min: int = 1, max: int = 0, exact: int = ) # self.leave_whitespace() self.mayReturnEmpty = True - self.errmsg = "Expected " + self.name + self.errmsg = f"Expected {self.name}" self.minLen = min @@ -3544,16 +3523,19 @@ def __init__(self, colno: int): self.col = colno def preParse(self, instring: str, loc: int) -> int: - if col(loc, instring) != self.col: - instrlen = len(instring) - if self.ignoreExprs: - loc = self._skipIgnorables(instring, loc) - while ( - loc < instrlen - and instring[loc].isspace() - and col(loc, instring) != self.col - ): - loc += 1 + if col(loc, instring) == self.col: + return loc + + instrlen = len(instring) + if self.ignoreExprs: + loc = self._skipIgnorables(instring, loc) + while ( + loc < instrlen + and instring[loc].isspace() + and col(loc, instring) != self.col + ): + loc += 1 + return loc def parseImpl(self, instring, loc, doActions=True): @@ -3599,12 +3581,14 @@ def __init__(self): def preParse(self, instring: str, loc: int) -> int: if loc == 0: return loc - else: - ret = self.skipper.preParse(instring, loc) - if "\n" in self.orig_whiteChars: - while instring[ret : ret + 1] == "\n": - ret = self.skipper.preParse(instring, ret + 1) - return ret + + ret = self.skipper.preParse(instring, loc) + + if "\n" in self.orig_whiteChars: + while instring[ret : ret + 1] == "\n": + ret = self.skipper.preParse(instring, ret + 1) + + return ret def parseImpl(self, instring, loc, doActions=True): if col(loc, instring) == 1: @@ -3645,10 +3629,10 @@ def __init__(self): self.errmsg = "Expected start of text" def parseImpl(self, instring, loc, doActions=True): - if loc != 0: - # see if entire string up to here is just whitespace and ignoreables - if loc != self.preParse(instring, 0): - raise ParseException(instring, loc, self.errmsg, self) + # see if entire string up to here is just whitespace and ignoreables + if loc != 0 and loc != self.preParse(instring, 0): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] @@ -3664,12 +3648,12 @@ def __init__(self): def parseImpl(self, instring, loc, doActions=True): if loc < len(instring): raise ParseException(instring, loc, self.errmsg, self) - elif loc == len(instring): + if loc == len(instring): return loc + 1, [] - elif loc > len(instring): + if loc > len(instring): return loc, [] - else: - raise ParseException(instring, loc, self.errmsg, self) + + raise ParseException(instring, loc, self.errmsg, self) class WordStart(PositionToken): @@ -3802,7 +3786,7 @@ def ignore(self, other) -> ParserElement: return self def _generateDefaultName(self) -> str: - return f"{self.__class__.__name__}:({str(self.exprs)})" + return f"{type(self).__name__}:({self.exprs})" def streamline(self) -> ParserElement: if self.streamlined: @@ -3841,7 +3825,7 @@ def streamline(self) -> ParserElement: self.mayReturnEmpty |= other.mayReturnEmpty self.mayIndexError |= other.mayIndexError - self.errmsg = "Expected " + str(self) + self.errmsg = f"Expected {self}" return self @@ -3863,38 +3847,36 @@ def copy(self) -> ParserElement: return ret def _setResultsName(self, name, listAllMatches=False): - if ( + if not ( __diag__.warn_ungrouped_named_tokens_in_collection and Diagnostics.warn_ungrouped_named_tokens_in_collection not in self.suppress_warnings_ ): - for e in self.exprs: - if ( - isinstance(e, ParserElement) - and e.resultsName - and Diagnostics.warn_ungrouped_named_tokens_in_collection + return super()._setResultsName(name, listAllMatches) + + for e in self.exprs: + if ( + isinstance(e, ParserElement) + and e.resultsName + and ( + Diagnostics.warn_ungrouped_named_tokens_in_collection not in e.suppress_warnings_ - ): - warnings.warn( - "{}: setting results name {!r} on {} expression " - "collides with {!r} on contained expression".format( - "warn_ungrouped_named_tokens_in_collection", - name, - type(self).__name__, - e.resultsName, - ), - stacklevel=3, - ) + ) + ): + warning = ( + "warn_ungrouped_named_tokens_in_collection:" + f" setting results name {name!r} on {type(self).__name__} expression" + f" collides with {e.resultsName!r} on contained expression" + ) + warnings.warn(warning, stacklevel=3) + break return super()._setResultsName(name, listAllMatches) # Compatibility synonyms # fmt: off - @replaced_by_pep8(leave_whitespace) - def leaveWhitespace(self): ... - - @replaced_by_pep8(ignore_whitespace) - def ignoreWhitespace(self): ... + leaveWhitespace = replaced_by_pep8("leaveWhitespace", leave_whitespace) + ignoreWhitespace = replaced_by_pep8("ignoreWhitespace", ignore_whitespace) # fmt: on @@ -3931,18 +3913,18 @@ def __init__( if exprs and Ellipsis in exprs: tmp = [] for i, expr in enumerate(exprs): - if expr is Ellipsis: - if i < len(exprs) - 1: - skipto_arg: ParserElement = typing.cast( - ParseExpression, (Empty() + exprs[i + 1]) - ).exprs[-1] - tmp.append(SkipTo(skipto_arg)("_skipped*")) - else: - raise Exception( - "cannot construct And with sequence ending in ..." - ) - else: + if expr is not Ellipsis: tmp.append(expr) + continue + + if i < len(exprs) - 1: + skipto_arg: ParserElement = typing.cast( + ParseExpression, (Empty() + exprs[i + 1]) + ).exprs[-1] + tmp.append(SkipTo(skipto_arg)("_skipped*")) + continue + + raise Exception("cannot construct And with sequence ending in ...") exprs[:] = tmp super().__init__(exprs, savelist) if self.exprs: @@ -3961,25 +3943,24 @@ def __init__( def streamline(self) -> ParserElement: # collapse any _PendingSkip's - if self.exprs: - if any( - isinstance(e, ParseExpression) - and e.exprs - and isinstance(e.exprs[-1], _PendingSkip) - for e in self.exprs[:-1] - ): - deleted_expr_marker = NoMatch() - for i, e in enumerate(self.exprs[:-1]): - if e is deleted_expr_marker: - continue - if ( - isinstance(e, ParseExpression) - and e.exprs - and isinstance(e.exprs[-1], _PendingSkip) - ): - e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1] - self.exprs[i + 1] = deleted_expr_marker - self.exprs = [e for e in self.exprs if e is not deleted_expr_marker] + if self.exprs and any( + isinstance(e, ParseExpression) + and e.exprs + and isinstance(e.exprs[-1], _PendingSkip) + for e in self.exprs[:-1] + ): + deleted_expr_marker = NoMatch() + for i, e in enumerate(self.exprs[:-1]): + if e is deleted_expr_marker: + continue + if ( + isinstance(e, ParseExpression) + and e.exprs + and isinstance(e.exprs[-1], _PendingSkip) + ): + e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1] + self.exprs[i + 1] = deleted_expr_marker + self.exprs = [e for e in self.exprs if e is not deleted_expr_marker] super().streamline() @@ -4058,7 +4039,7 @@ def _generateDefaultName(self) -> str: # strip off redundant inner {}'s while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}": inner = inner[1:-1] - return "{" + inner + "}" + return f"{{{inner}}}" class Or(ParseExpression): @@ -4179,10 +4160,8 @@ def parseImpl(self, instring, loc, doActions=True): if maxExcLoc == loc: maxException.msg = self.errmsg raise maxException - else: - raise ParseException( - instring, loc, "no defined alternatives to match", self - ) + + raise ParseException(instring, loc, "no defined alternatives to match", self) def __ixor__(self, other): if isinstance(other, str_type): @@ -4192,7 +4171,7 @@ def __ixor__(self, other): return self.append(other) # Or([self, other]) def _generateDefaultName(self) -> str: - return "{" + " ^ ".join(str(e) for e in self.exprs) + "}" + return f"{{{' ^ '.join(str(e) for e in self.exprs)}}}" def _setResultsName(self, name, listAllMatches=False): if ( @@ -4206,17 +4185,14 @@ def _setResultsName(self, name, listAllMatches=False): not in e.suppress_warnings_ for e in self.exprs ): - warnings.warn( - "{}: setting results name {!r} on {} expression " - "will return a list of all parsed tokens in an And alternative, " - "in prior versions only the first token was returned; enclose " - "contained argument in Group".format( - "warn_multiple_tokens_in_named_alternation", - name, - type(self).__name__, - ), - stacklevel=3, + warning = ( + "warn_multiple_tokens_in_named_alternation:" + f" setting results name {name!r} on {type(self).__name__} expression" + " will return a list of all parsed tokens in an And alternative," + " in prior versions only the first token was returned; enclose" + " contained argument in Group" ) + warnings.warn(warning, stacklevel=3) return super()._setResultsName(name, listAllMatches) @@ -4269,11 +4245,7 @@ def parseImpl(self, instring, loc, doActions=True): for e in self.exprs: try: - return e._parse( - instring, - loc, - doActions, - ) + return e._parse(instring, loc, doActions) except ParseFatalException as pfe: pfe.__traceback__ = None pfe.parser_element = e @@ -4295,10 +4267,8 @@ def parseImpl(self, instring, loc, doActions=True): if maxExcLoc == loc: maxException.msg = self.errmsg raise maxException - else: - raise ParseException( - instring, loc, "no defined alternatives to match", self - ) + + raise ParseException(instring, loc, "no defined alternatives to match", self) def __ior__(self, other): if isinstance(other, str_type): @@ -4308,7 +4278,7 @@ def __ior__(self, other): return self.append(other) # MatchFirst([self, other]) def _generateDefaultName(self) -> str: - return "{" + " | ".join(str(e) for e in self.exprs) + "}" + return f"{{{' | '.join(str(e) for e in self.exprs)}}}" def _setResultsName(self, name, listAllMatches=False): if ( @@ -4322,17 +4292,14 @@ def _setResultsName(self, name, listAllMatches=False): not in e.suppress_warnings_ for e in self.exprs ): - warnings.warn( - "{}: setting results name {!r} on {} expression " - "will return a list of all parsed tokens in an And alternative, " - "in prior versions only the first token was returned; enclose " - "contained argument in Group".format( - "warn_multiple_tokens_in_named_alternation", - name, - type(self).__name__, - ), - stacklevel=3, + warning = ( + "warn_multiple_tokens_in_named_alternation:" + f" setting results name {name!r} on {type(self).__name__} expression" + " will return a list of all parsed tokens in an And alternative," + " in prior versions only the first token was returned; enclose" + " contained argument in Group" ) + warnings.warn(warning, stacklevel=3) return super()._setResultsName(name, listAllMatches) @@ -4508,7 +4475,7 @@ def parseImpl(self, instring, loc, doActions=True): return loc, total_results def _generateDefaultName(self) -> str: - return "{" + " & ".join(str(e) for e in self.exprs) + "}" + return f"{{{' & '.join(str(e) for e in self.exprs)}}}" class ParseElementEnhance(ParserElement): @@ -4543,15 +4510,17 @@ def recurse(self) -> List[ParserElement]: return [self.expr] if self.expr is not None else [] def parseImpl(self, instring, loc, doActions=True): - if self.expr is not None: - try: - return self.expr._parse(instring, loc, doActions, callPreParse=False) - except ParseBaseException as pbe: - pbe.msg = self.errmsg - raise - else: + if self.expr is None: raise ParseException(instring, loc, "No expression defined", self) + try: + return self.expr._parse(instring, loc, doActions, callPreParse=False) + except ParseBaseException as pbe: + if not isinstance(self, Forward) or self.customName is not None: + if self.errmsg: + pbe.msg = self.errmsg + raise + def leave_whitespace(self, recursive: bool = True) -> ParserElement: super().leave_whitespace(recursive) @@ -4571,15 +4540,11 @@ def ignore_whitespace(self, recursive: bool = True) -> ParserElement: return self def ignore(self, other) -> ParserElement: - if isinstance(other, Suppress): - if other not in self.ignoreExprs: - super().ignore(other) - if self.expr is not None: - self.expr.ignore(self.ignoreExprs[-1]) - else: + if not isinstance(other, Suppress) or other not in self.ignoreExprs: super().ignore(other) if self.expr is not None: self.expr.ignore(self.ignoreExprs[-1]) + return self def streamline(self) -> ParserElement: @@ -4609,15 +4574,12 @@ def validate(self, validateTrace=None) -> None: self._checkRecursion([]) def _generateDefaultName(self) -> str: - return f"{self.__class__.__name__}:({str(self.expr)})" + return f"{type(self).__name__}:({self.expr})" # Compatibility synonyms # fmt: off - @replaced_by_pep8(leave_whitespace) - def leaveWhitespace(self): ... - - @replaced_by_pep8(ignore_whitespace) - def ignoreWhitespace(self): ... + leaveWhitespace = replaced_by_pep8("leaveWhitespace", leave_whitespace) + ignoreWhitespace = replaced_by_pep8("ignoreWhitespace", ignore_whitespace) # fmt: on @@ -4827,7 +4789,7 @@ def __init__( retreat = 0 self.exact = True self.retreat = retreat - self.errmsg = "not preceded by " + str(expr) + self.errmsg = f"not preceded by {expr}" self.skipWhitespace = False self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None))) @@ -4837,23 +4799,24 @@ def parseImpl(self, instring, loc=0, doActions=True): raise ParseException(instring, loc, self.errmsg) start = loc - self.retreat _, ret = self.expr._parse(instring, start) - else: - # retreat specified a maximum lookbehind window, iterate - test_expr = self.expr + StringEnd() - instring_slice = instring[max(0, loc - self.retreat) : loc] - last_expr = ParseException(instring, loc, self.errmsg) - for offset in range(1, min(loc, self.retreat + 1) + 1): - try: - # print('trying', offset, instring_slice, repr(instring_slice[loc - offset:])) - _, ret = test_expr._parse( - instring_slice, len(instring_slice) - offset - ) - except ParseBaseException as pbe: - last_expr = pbe - else: - break + return loc, ret + + # retreat specified a maximum lookbehind window, iterate + test_expr = self.expr + StringEnd() + instring_slice = instring[max(0, loc - self.retreat) : loc] + last_expr = ParseException(instring, loc, self.errmsg) + + for offset in range(1, min(loc, self.retreat + 1) + 1): + try: + # print('trying', offset, instring_slice, repr(instring_slice[loc - offset:])) + _, ret = test_expr._parse(instring_slice, len(instring_slice) - offset) + except ParseBaseException as pbe: + last_expr = pbe else: - raise last_expr + break + else: + raise last_expr + return loc, ret @@ -4931,7 +4894,7 @@ def __init__(self, expr: Union[ParserElement, str]): self.skipWhitespace = False self.mayReturnEmpty = True - self.errmsg = "Found unwanted token, " + str(self.expr) + self.errmsg = f"Found unwanted token, {self.expr}" def parseImpl(self, instring, loc, doActions=True): if self.expr.can_parse_next(instring, loc, do_actions=doActions): @@ -4939,7 +4902,7 @@ def parseImpl(self, instring, loc, doActions=True): return loc, [] def _generateDefaultName(self) -> str: - return "~{" + str(self.expr) + "}" + return f"~{{{self.expr}}}" class _MultipleMatch(ParseElementEnhance): @@ -5002,19 +4965,18 @@ def _setResultsName(self, name, listAllMatches=False): if ( isinstance(e, ParserElement) and e.resultsName - and Diagnostics.warn_ungrouped_named_tokens_in_collection - not in e.suppress_warnings_ + and ( + Diagnostics.warn_ungrouped_named_tokens_in_collection + not in e.suppress_warnings_ + ) ): - warnings.warn( - "{}: setting results name {!r} on {} expression " - "collides with {!r} on contained expression".format( - "warn_ungrouped_named_tokens_in_collection", - name, - type(self).__name__, - e.resultsName, - ), - stacklevel=3, + warning = ( + "warn_ungrouped_named_tokens_in_collection:" + f" setting results name {name!r} on {type(self).__name__} expression" + f" collides with {e.resultsName!r} on contained expression" ) + warnings.warn(warning, stacklevel=3) + break return super()._setResultsName(name, listAllMatches) @@ -5048,7 +5010,7 @@ class OneOrMore(_MultipleMatch): """ def _generateDefaultName(self) -> str: - return "{" + str(self.expr) + "}..." + return f"{{{self.expr}}}..." class ZeroOrMore(_MultipleMatch): @@ -5082,7 +5044,7 @@ def parseImpl(self, instring, loc, doActions=True): return loc, ParseResults([], name=self.resultsName) def _generateDefaultName(self) -> str: - return "[" + str(self.expr) + "]..." + return f"[{self.expr}]..." class DelimitedList(ParseElementEnhance): @@ -5117,12 +5079,11 @@ def __init__( expr = ParserElement._literalStringClass(expr) expr = typing.cast(ParserElement, expr) - if min is not None: - if min < 1: - raise ValueError("min must be greater than 0") - if max is not None: - if min is not None and max < min: - raise ValueError("max must be greater than, or equal to min") + if min is not None and min < 1: + raise ValueError("min must be greater than 0") + + if max is not None and min is not None and max < min: + raise ValueError("max must be greater than, or equal to min") self.content = expr self.raw_delim = str(delim) @@ -5147,7 +5108,8 @@ def __init__( super().__init__(delim_list_expr, savelist=True) def _generateDefaultName(self) -> str: - return "{0} [{1} {0}]...".format(self.content.streamline(), self.raw_delim) + content_expr = self.content.streamline() + return f"{content_expr} [{self.raw_delim} {content_expr}]..." class _NullToken: @@ -5229,7 +5191,7 @@ def _generateDefaultName(self) -> str: # strip off redundant inner {}'s while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}": inner = inner[1:-1] - return "[" + inner + "]" + return f"[{inner}]" Optional = Opt @@ -5308,8 +5270,7 @@ def __init__( ): super().__init__(other) failOn = failOn or fail_on - if ignore is not None: - self.ignore(ignore) + self.ignoreExpr = ignore self.mayReturnEmpty = True self.mayIndexError = False self.includeMatch = include @@ -5319,6 +5280,20 @@ def __init__( else: self.failOn = failOn self.errmsg = "No match found for " + str(self.expr) + self.ignorer = Empty().leave_whitespace() + self._update_ignorer() + + def _update_ignorer(self): + # rebuild internal ignore expr from current ignore exprs and assigned ignoreExpr + self.ignorer.ignoreExprs.clear() + for e in self.expr.ignoreExprs: + self.ignorer.ignore(e) + if self.ignoreExpr: + self.ignorer.ignore(self.ignoreExpr) + + def ignore(self, expr): + super().ignore(expr) + self._update_ignorer() def parseImpl(self, instring, loc, doActions=True): startloc = loc @@ -5327,7 +5302,7 @@ def parseImpl(self, instring, loc, doActions=True): self_failOn_canParseNext = ( self.failOn.canParseNext if self.failOn is not None else None ) - self_preParse = self.preParse if self.callPreparse else None + ignorer_try_parse = self.ignorer.try_parse if self.ignorer.ignoreExprs else None tmploc = loc while tmploc <= instrlen: @@ -5336,9 +5311,18 @@ def parseImpl(self, instring, loc, doActions=True): if self_failOn_canParseNext(instring, tmploc): break - if self_preParse is not None: - # skip grammar-ignored expressions - tmploc = self_preParse(instring, tmploc) + if ignorer_try_parse is not None: + # advance past ignore expressions + prev_tmploc = tmploc + while 1: + try: + tmploc = ignorer_try_parse(instring, tmploc) + except ParseBaseException: + break + # see if all ignorers matched, but didn't actually ignore anything + if tmploc == prev_tmploc: + break + prev_tmploc = tmploc try: self_expr_parse(instring, tmploc, doActions=False, callPreParse=False) @@ -5542,14 +5526,13 @@ def parseImpl(self, instring, loc, doActions=True): del memo[peek_key] return prev_loc, prev_peek.copy() # the match did get better: see if we can improve further - else: - if doActions: - try: - memo[act_key] = super().parseImpl(instring, loc, True) - except ParseException as e: - memo[peek_key] = memo[act_key] = (new_loc, e) - raise - prev_loc, prev_peek = memo[peek_key] = new_loc, new_peek + if doActions: + try: + memo[act_key] = super().parseImpl(instring, loc, True) + except ParseException as e: + memo[peek_key] = memo[act_key] = (new_loc, e) + raise + prev_loc, prev_peek = memo[peek_key] = new_loc, new_peek def leave_whitespace(self, recursive: bool = True) -> ParserElement: self.skipWhitespace = False @@ -5593,7 +5576,7 @@ def _generateDefaultName(self) -> str: else: retString = "None" finally: - return self.__class__.__name__ + ": " + retString + return f"{type(self).__name__}: {retString}" def copy(self) -> ParserElement: if self.expr is not None: @@ -5604,29 +5587,26 @@ def copy(self) -> ParserElement: return ret def _setResultsName(self, name, list_all_matches=False): + # fmt: off if ( __diag__.warn_name_set_on_empty_Forward - and Diagnostics.warn_name_set_on_empty_Forward - not in self.suppress_warnings_ + and Diagnostics.warn_name_set_on_empty_Forward not in self.suppress_warnings_ + and self.expr is None ): - if self.expr is None: - warnings.warn( - "{}: setting results name {!r} on {} expression " - "that has no contained expression".format( - "warn_name_set_on_empty_Forward", name, type(self).__name__ - ), - stacklevel=3, - ) + warning = ( + "warn_name_set_on_empty_Forward:" + f" setting results name {name!r} on {type(self).__name__} expression" + " that has no contained expression" + ) + warnings.warn(warning, stacklevel=3) + # fmt: on return super()._setResultsName(name, list_all_matches) # Compatibility synonyms # fmt: off - @replaced_by_pep8(leave_whitespace) - def leaveWhitespace(self): ... - - @replaced_by_pep8(ignore_whitespace) - def ignoreWhitespace(self): ... + leaveWhitespace = replaced_by_pep8("leaveWhitespace", leave_whitespace) + ignoreWhitespace = replaced_by_pep8("ignoreWhitespace", ignore_whitespace) # fmt: on @@ -5730,8 +5710,8 @@ def postParse(self, instring, loc, tokenlist): if isinstance(tokenlist, ParseResults) else list(tokenlist) ) - else: - return [tokenlist] + + return [tokenlist] class Dict(TokenConverter): @@ -5817,8 +5797,8 @@ def postParse(self, instring, loc, tokenlist): if self._asPythonDict: return [tokenlist.as_dict()] if self.resultsName else tokenlist.as_dict() - else: - return [tokenlist] if self.resultsName else tokenlist + + return [tokenlist] if self.resultsName else tokenlist class Suppress(TokenConverter): @@ -5860,14 +5840,14 @@ def __init__(self, expr: Union[ParserElement, str], savelist: bool = False): def __add__(self, other) -> "ParserElement": if isinstance(self.expr, _PendingSkip): return Suppress(SkipTo(other)) + other - else: - return super().__add__(other) + + return super().__add__(other) def __sub__(self, other) -> "ParserElement": if isinstance(self.expr, _PendingSkip): return Suppress(SkipTo(other)) - other - else: - return super().__sub__(other) + + return super().__sub__(other) def postParse(self, instring, loc, tokenlist): return [] @@ -5907,7 +5887,7 @@ def z(*paArgs): thisFunc = f.__name__ s, l, t = paArgs[-3:] if len(paArgs) > 3: - thisFunc = paArgs[0].__class__.__name__ + "." + thisFunc + thisFunc = f"{type(paArgs[0]).__name__}.{thisFunc}" sys.stderr.write(f">>entering {thisFunc}(line: {line(l, s)!r}, {l}, {t!r})\n") try: ret = f(*paArgs) @@ -5975,8 +5955,8 @@ def srange(s: str) -> str: - any combination of the above (``'aeiouy'``, ``'a-zA-Z0-9_$'``, etc.) """ - _expanded = ( - lambda p: p + _expanded = lambda p: ( + p if not isinstance(p, ParseResults) else "".join(chr(c) for c in range(ord(p[0]), ord(p[1]) + 1)) ) @@ -6100,16 +6080,8 @@ def autoname_elements() -> None: lineEnd = line_end stringStart = string_start stringEnd = string_end - -@replaced_by_pep8(null_debug_action) -def nullDebugAction(): ... - -@replaced_by_pep8(trace_parse_action) -def traceParseAction(): ... - -@replaced_by_pep8(condition_as_parse_action) -def conditionAsParseAction(): ... - -@replaced_by_pep8(token_map) -def tokenMap(): ... +nullDebugAction = replaced_by_pep8("nullDebugAction", null_debug_action) +traceParseAction = replaced_by_pep8("traceParseAction", trace_parse_action) +conditionAsParseAction = replaced_by_pep8("conditionAsParseAction", condition_as_parse_action) +tokenMap = replaced_by_pep8("tokenMap", token_map) # fmt: on diff --git a/src/pip/_vendor/pyparsing/diagram/__init__.py b/src/pip/_vendor/pyparsing/diagram/__init__.py index 83f9018ee93..6074d2bfd0f 100644 --- a/src/pip/_vendor/pyparsing/diagram/__init__.py +++ b/src/pip/_vendor/pyparsing/diagram/__init__.py @@ -473,7 +473,7 @@ def _to_diagram_element( :param show_groups: bool flag indicating whether to show groups using bounding box """ exprs = element.recurse() - name = name_hint or element.customName or element.__class__.__name__ + name = name_hint or element.customName or type(element).__name__ # Python's id() is used to provide a unique identifier for elements el_id = id(element) diff --git a/src/pip/_vendor/pyparsing/exceptions.py b/src/pip/_vendor/pyparsing/exceptions.py index 12219f124ae..1aaea56f54d 100644 --- a/src/pip/_vendor/pyparsing/exceptions.py +++ b/src/pip/_vendor/pyparsing/exceptions.py @@ -14,11 +14,13 @@ from .unicode import pyparsing_unicode as ppu -class ExceptionWordUnicode(ppu.Latin1, ppu.LatinA, ppu.LatinB, ppu.Greek, ppu.Cyrillic): +class _ExceptionWordUnicodeSet( + ppu.Latin1, ppu.LatinA, ppu.LatinB, ppu.Greek, ppu.Cyrillic +): pass -_extract_alphanums = _collapse_string_to_ranges(ExceptionWordUnicode.alphanums) +_extract_alphanums = _collapse_string_to_ranges(_ExceptionWordUnicodeSet.alphanums) _exception_word_extractor = re.compile("([" + _extract_alphanums + "]{1,16})|.") @@ -86,41 +88,39 @@ def explain_exception(exc, depth=16): ret.append(" " * (exc.column - 1) + "^") ret.append(f"{type(exc).__name__}: {exc}") - if depth > 0: - callers = inspect.getinnerframes(exc.__traceback__, context=depth) - seen = set() - for i, ff in enumerate(callers[-depth:]): - frm = ff[0] - - f_self = frm.f_locals.get("self", None) - if isinstance(f_self, ParserElement): - if not frm.f_code.co_name.startswith( - ("parseImpl", "_parseNoCache") - ): - continue - if id(f_self) in seen: - continue - seen.add(id(f_self)) - - self_type = type(f_self) - ret.append( - f"{self_type.__module__}.{self_type.__name__} - {f_self}" - ) - - elif f_self is not None: - self_type = type(f_self) - ret.append(f"{self_type.__module__}.{self_type.__name__}") + if depth <= 0: + return "\n".join(ret) - else: - code = frm.f_code - if code.co_name in ("wrapper", ""): - continue + callers = inspect.getinnerframes(exc.__traceback__, context=depth) + seen = set() + for ff in callers[-depth:]: + frm = ff[0] + + f_self = frm.f_locals.get("self", None) + if isinstance(f_self, ParserElement): + if not frm.f_code.co_name.startswith(("parseImpl", "_parseNoCache")): + continue + if id(f_self) in seen: + continue + seen.add(id(f_self)) + + self_type = type(f_self) + ret.append(f"{self_type.__module__}.{self_type.__name__} - {f_self}") + + elif f_self is not None: + self_type = type(f_self) + ret.append(f"{self_type.__module__}.{self_type.__name__}") + + else: + code = frm.f_code + if code.co_name in ("wrapper", ""): + continue - ret.append(code.co_name) + ret.append(code.co_name) - depth -= 1 - if not depth: - break + depth -= 1 + if not depth: + break return "\n".join(ret) @@ -220,8 +220,10 @@ def explain(self, depth=16) -> str: Example:: + # an expression to parse 3 integers expr = pp.Word(pp.nums) * 3 try: + # a failing parse - the third integer is prefixed with "A" expr.parse_string("123 456 A789") except pp.ParseException as pe: print(pe.explain(depth=0)) @@ -244,8 +246,7 @@ def explain(self, depth=16) -> str: return self.explain_exception(self, depth) # fmt: off - @replaced_by_pep8(mark_input_line) - def markInputline(self): ... + markInputline = replaced_by_pep8("markInputline", mark_input_line) # fmt: on @@ -255,16 +256,16 @@ class ParseException(ParseBaseException): Example:: + integer = Word(nums).set_name("integer") try: - Word(nums).set_name("integer").parse_string("ABC") + integer.parse_string("ABC") except ParseException as pe: print(pe) - print("column: {}".format(pe.column)) + print(f"column: {pe.column}") prints:: - Expected integer (at char 0), (line:1, col:1) - column: 1 + Expected integer (at char 0), (line:1, col:1) column: 1 """ diff --git a/src/pip/_vendor/pyparsing/helpers.py b/src/pip/_vendor/pyparsing/helpers.py index 018f0d6ac86..dcfdb8fe4bf 100644 --- a/src/pip/_vendor/pyparsing/helpers.py +++ b/src/pip/_vendor/pyparsing/helpers.py @@ -74,7 +74,7 @@ def count_field_parse_action(s, l, t): intExpr = intExpr.copy() intExpr.set_name("arrayLen") intExpr.add_parse_action(count_field_parse_action, call_during_try=True) - return (intExpr + array_expr).set_name("(len) " + str(expr) + "...") + return (intExpr + array_expr).set_name(f"(len) {expr}...") def match_previous_literal(expr: ParserElement) -> ParserElement: @@ -95,15 +95,17 @@ def match_previous_literal(expr: ParserElement) -> ParserElement: rep = Forward() def copy_token_to_repeater(s, l, t): - if t: - if len(t) == 1: - rep << t[0] - else: - # flatten t tokens - tflat = _flatten(t.as_list()) - rep << And(Literal(tt) for tt in tflat) - else: + if not t: rep << Empty() + return + + if len(t) == 1: + rep << t[0] + return + + # flatten t tokens + tflat = _flatten(t.as_list()) + rep << And(Literal(tt) for tt in tflat) expr.add_parse_action(copy_token_to_repeater, callDuringTry=True) rep.set_name("(prev) " + str(expr)) @@ -230,7 +232,7 @@ def one_of( if isequal(other, cur): del symbols[i + j + 1] break - elif masks(cur, other): + if masks(cur, other): del symbols[i + j + 1] symbols.insert(i, other) break @@ -534,7 +536,9 @@ def nested_expr( ) else: ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer)) - ret.set_name("nested %s%s expression" % (opener, closer)) + ret.set_name(f"nested {opener}{closer} expression") + # don't override error message from content expressions + ret.errmsg = None return ret @@ -580,7 +584,7 @@ def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")) ) closeTag = Combine(Literal("", adjacent=False) - openTag.set_name("<%s>" % resname) + openTag.set_name(f"<{resname}>") # add start results name in parse action now that ungrouped names are not reported at two levels openTag.add_parse_action( lambda t: t.__setitem__( @@ -589,7 +593,7 @@ def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")) ) closeTag = closeTag( "end" + "".join(resname.replace(":", " ").title().split()) - ).set_name("" % resname) + ).set_name(f"") openTag.tag = resname closeTag.tag = resname openTag.tag_body = SkipTo(closeTag()) @@ -777,7 +781,7 @@ def parseImpl(self, instring, loc, doActions=True): rpar = Suppress(rpar) # if lpar and rpar are not suppressed, wrap in group - if not (isinstance(rpar, Suppress) and isinstance(rpar, Suppress)): + if not (isinstance(lpar, Suppress) and isinstance(rpar, Suppress)): lastExpr = base_expr | Group(lpar + ret + rpar) else: lastExpr = base_expr | (lpar + ret + rpar) @@ -787,7 +791,7 @@ def parseImpl(self, instring, loc, doActions=True): pa: typing.Optional[ParseAction] opExpr1: ParserElement opExpr2: ParserElement - for i, operDef in enumerate(op_list): + for operDef in op_list: opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4] # type: ignore[assignment] if isinstance(opExpr, str_type): opExpr = ParserElement._literalStringClass(opExpr) @@ -1058,43 +1062,17 @@ def delimited_list( cppStyleComment = cpp_style_comment javaStyleComment = java_style_comment pythonStyleComment = python_style_comment - -@replaced_by_pep8(DelimitedList) -def delimitedList(): ... - -@replaced_by_pep8(DelimitedList) -def delimited_list(): ... - -@replaced_by_pep8(counted_array) -def countedArray(): ... - -@replaced_by_pep8(match_previous_literal) -def matchPreviousLiteral(): ... - -@replaced_by_pep8(match_previous_expr) -def matchPreviousExpr(): ... - -@replaced_by_pep8(one_of) -def oneOf(): ... - -@replaced_by_pep8(dict_of) -def dictOf(): ... - -@replaced_by_pep8(original_text_for) -def originalTextFor(): ... - -@replaced_by_pep8(nested_expr) -def nestedExpr(): ... - -@replaced_by_pep8(make_html_tags) -def makeHTMLTags(): ... - -@replaced_by_pep8(make_xml_tags) -def makeXMLTags(): ... - -@replaced_by_pep8(replace_html_entity) -def replaceHTMLEntity(): ... - -@replaced_by_pep8(infix_notation) -def infixNotation(): ... +delimitedList = replaced_by_pep8("delimitedList", DelimitedList) +delimited_list = replaced_by_pep8("delimited_list", DelimitedList) +countedArray = replaced_by_pep8("countedArray", counted_array) +matchPreviousLiteral = replaced_by_pep8("matchPreviousLiteral", match_previous_literal) +matchPreviousExpr = replaced_by_pep8("matchPreviousExpr", match_previous_expr) +oneOf = replaced_by_pep8("oneOf", one_of) +dictOf = replaced_by_pep8("dictOf", dict_of) +originalTextFor = replaced_by_pep8("originalTextFor", original_text_for) +nestedExpr = replaced_by_pep8("nestedExpr", nested_expr) +makeHTMLTags = replaced_by_pep8("makeHTMLTags", make_html_tags) +makeXMLTags = replaced_by_pep8("makeXMLTags", make_xml_tags) +replaceHTMLEntity = replaced_by_pep8("replaceHTMLEntity", replace_html_entity) +infixNotation = replaced_by_pep8("infixNotation", infix_notation) # fmt: on diff --git a/src/pip/_vendor/pyparsing/results.py b/src/pip/_vendor/pyparsing/results.py index 0313049763b..3e5fe2089bd 100644 --- a/src/pip/_vendor/pyparsing/results.py +++ b/src/pip/_vendor/pyparsing/results.py @@ -173,42 +173,48 @@ def __init__( ): self._tokdict: Dict[str, _ParseResultsWithOffset] self._modal = modal - if name is not None and name != "": - if isinstance(name, int): - name = str(name) - if not modal: - self._all_names = {name} - self._name = name - if toklist not in self._null_values: - if isinstance(toklist, (str_type, type)): - toklist = [toklist] - if asList: - if isinstance(toklist, ParseResults): - self[name] = _ParseResultsWithOffset( - ParseResults(toklist._toklist), 0 - ) - else: - self[name] = _ParseResultsWithOffset( - ParseResults(toklist[0]), 0 - ) - self[name]._name = name - else: - try: - self[name] = toklist[0] - except (KeyError, TypeError, IndexError): - if toklist is not self: - self[name] = toklist - else: - self._name = name + + if name is None or name == "": + return + + if isinstance(name, int): + name = str(name) + + if not modal: + self._all_names = {name} + + self._name = name + + if toklist in self._null_values: + return + + if isinstance(toklist, (str_type, type)): + toklist = [toklist] + + if asList: + if isinstance(toklist, ParseResults): + self[name] = _ParseResultsWithOffset(ParseResults(toklist._toklist), 0) + else: + self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]), 0) + self[name]._name = name + return + + try: + self[name] = toklist[0] + except (KeyError, TypeError, IndexError): + if toklist is not self: + self[name] = toklist + else: + self._name = name def __getitem__(self, i): if isinstance(i, (int, slice)): return self._toklist[i] - else: - if i not in self._all_names: - return self._tokdict[i][-1][0] - else: - return ParseResults([v[0] for v in self._tokdict[i]]) + + if i not in self._all_names: + return self._tokdict[i][-1][0] + + return ParseResults([v[0] for v in self._tokdict[i]]) def __setitem__(self, k, v, isinstance=isinstance): if isinstance(v, _ParseResultsWithOffset): @@ -226,27 +232,28 @@ def __setitem__(self, k, v, isinstance=isinstance): sub._parent = self def __delitem__(self, i): - if isinstance(i, (int, slice)): - mylen = len(self._toklist) - del self._toklist[i] - - # convert int to slice - if isinstance(i, int): - if i < 0: - i += mylen - i = slice(i, i + 1) - # get removed indices - removed = list(range(*i.indices(mylen))) - removed.reverse() - # fixup indices in token dictionary - for name, occurrences in self._tokdict.items(): - for j in removed: - for k, (value, position) in enumerate(occurrences): - occurrences[k] = _ParseResultsWithOffset( - value, position - (position > j) - ) - else: + if not isinstance(i, (int, slice)): del self._tokdict[i] + return + + mylen = len(self._toklist) + del self._toklist[i] + + # convert int to slice + if isinstance(i, int): + if i < 0: + i += mylen + i = slice(i, i + 1) + # get removed indices + removed = list(range(*i.indices(mylen))) + removed.reverse() + # fixup indices in token dictionary + for occurrences in self._tokdict.values(): + for j in removed: + for k, (value, position) in enumerate(occurrences): + occurrences[k] = _ParseResultsWithOffset( + value, position - (position > j) + ) def __contains__(self, k) -> bool: return k in self._tokdict @@ -376,7 +383,7 @@ def insert_locn(locn, tokens): """ self._toklist.insert(index, ins_string) # fixup indices in token dictionary - for name, occurrences in self._tokdict.items(): + for occurrences in self._tokdict.values(): for k, (value, position) in enumerate(occurrences): occurrences[k] = _ParseResultsWithOffset( value, position + (position > index) @@ -652,58 +659,52 @@ def dump(self, indent="", full=True, include_list=True, _depth=0) -> str: NL = "\n" out.append(indent + str(self.as_list()) if include_list else "") - if full: - if self.haskeys(): - items = sorted((str(k), v) for k, v in self.items()) - for k, v in items: - if out: - out.append(NL) - out.append(f"{indent}{(' ' * _depth)}- {k}: ") - if isinstance(v, ParseResults): - if v: - out.append( - v.dump( - indent=indent, - full=full, - include_list=include_list, - _depth=_depth + 1, - ) - ) - else: - out.append(str(v)) - else: - out.append(repr(v)) - if any(isinstance(vv, ParseResults) for vv in self): - v = self - for i, vv in enumerate(v): - if isinstance(vv, ParseResults): - out.append( - "\n{}{}[{}]:\n{}{}{}".format( - indent, - (" " * (_depth)), - i, - indent, - (" " * (_depth + 1)), - vv.dump( - indent=indent, - full=full, - include_list=include_list, - _depth=_depth + 1, - ), - ) - ) - else: - out.append( - "\n%s%s[%d]:\n%s%s%s" - % ( - indent, - (" " * (_depth)), - i, - indent, - (" " * (_depth + 1)), - str(vv), - ) - ) + if not full: + return "".join(out) + + if self.haskeys(): + items = sorted((str(k), v) for k, v in self.items()) + for k, v in items: + if out: + out.append(NL) + out.append(f"{indent}{(' ' * _depth)}- {k}: ") + if not isinstance(v, ParseResults): + out.append(repr(v)) + continue + + if not v: + out.append(str(v)) + continue + + out.append( + v.dump( + indent=indent, + full=full, + include_list=include_list, + _depth=_depth + 1, + ) + ) + if not any(isinstance(vv, ParseResults) for vv in self): + return "".join(out) + + v = self + incr = " " + nl = "\n" + for i, vv in enumerate(v): + if isinstance(vv, ParseResults): + vv_dump = vv.dump( + indent=indent, + full=full, + include_list=include_list, + _depth=_depth + 1, + ) + out.append( + f"{nl}{indent}{incr * _depth}[{i}]:{nl}{indent}{incr * (_depth + 1)}{vv_dump}" + ) + else: + out.append( + f"{nl}{indent}{incr * _depth}[{i}]:{nl}{indent}{incr * (_depth + 1)}{vv}" + ) return "".join(out) diff --git a/src/pip/_vendor/pyparsing/testing.py b/src/pip/_vendor/pyparsing/testing.py index 6a254c1c5e2..5654d47d62d 100644 --- a/src/pip/_vendor/pyparsing/testing.py +++ b/src/pip/_vendor/pyparsing/testing.py @@ -1,8 +1,10 @@ # testing.py from contextlib import contextmanager +import re import typing + from .core import ( ParserElement, ParseException, @@ -49,23 +51,23 @@ def save(self): self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS - self._save_context[ - "literal_string_class" - ] = ParserElement._literalStringClass + self._save_context["literal_string_class"] = ( + ParserElement._literalStringClass + ) self._save_context["verbose_stacktrace"] = ParserElement.verbose_stacktrace self._save_context["packrat_enabled"] = ParserElement._packratEnabled if ParserElement._packratEnabled: - self._save_context[ - "packrat_cache_size" - ] = ParserElement.packrat_cache.size + self._save_context["packrat_cache_size"] = ( + ParserElement.packrat_cache.size + ) else: self._save_context["packrat_cache_size"] = None self._save_context["packrat_parse"] = ParserElement._parse - self._save_context[ - "recursion_enabled" - ] = ParserElement._left_recursion_enabled + self._save_context["recursion_enabled"] = ( + ParserElement._left_recursion_enabled + ) self._save_context["__diag__"] = { name: getattr(__diag__, name) for name in __diag__._all_names @@ -180,49 +182,52 @@ def assertRunTestResults( """ run_test_success, run_test_results = run_tests_report - if expected_parse_results is not None: - merged = [ - (*rpt, expected) - for rpt, expected in zip(run_test_results, expected_parse_results) - ] - for test_string, result, expected in merged: - # expected should be a tuple containing a list and/or a dict or an exception, - # and optional failure message string - # an empty tuple will skip any result validation - fail_msg = next( - (exp for exp in expected if isinstance(exp, str)), None + if expected_parse_results is None: + self.assertTrue( + run_test_success, msg=msg if msg is not None else "failed runTests" + ) + return + + merged = [ + (*rpt, expected) + for rpt, expected in zip(run_test_results, expected_parse_results) + ] + for test_string, result, expected in merged: + # expected should be a tuple containing a list and/or a dict or an exception, + # and optional failure message string + # an empty tuple will skip any result validation + fail_msg = next((exp for exp in expected if isinstance(exp, str)), None) + expected_exception = next( + ( + exp + for exp in expected + if isinstance(exp, type) and issubclass(exp, Exception) + ), + None, + ) + if expected_exception is not None: + with self.assertRaises( + expected_exception=expected_exception, msg=fail_msg or msg + ): + if isinstance(result, Exception): + raise result + else: + expected_list = next( + (exp for exp in expected if isinstance(exp, list)), None ) - expected_exception = next( - ( - exp - for exp in expected - if isinstance(exp, type) and issubclass(exp, Exception) - ), - None, + expected_dict = next( + (exp for exp in expected if isinstance(exp, dict)), None ) - if expected_exception is not None: - with self.assertRaises( - expected_exception=expected_exception, msg=fail_msg or msg - ): - if isinstance(result, Exception): - raise result - else: - expected_list = next( - (exp for exp in expected if isinstance(exp, list)), None + if (expected_list, expected_dict) != (None, None): + self.assertParseResultsEquals( + result, + expected_list=expected_list, + expected_dict=expected_dict, + msg=fail_msg or msg, ) - expected_dict = next( - (exp for exp in expected if isinstance(exp, dict)), None - ) - if (expected_list, expected_dict) != (None, None): - self.assertParseResultsEquals( - result, - expected_list=expected_list, - expected_dict=expected_dict, - msg=fail_msg or msg, - ) - else: - # warning here maybe? - print(f"no validation for {test_string!r}") + else: + # warning here maybe? + print(f"no validation for {test_string!r}") # do this last, in case some specific test results can be reported instead self.assertTrue( @@ -230,9 +235,18 @@ def assertRunTestResults( ) @contextmanager - def assertRaisesParseException(self, exc_type=ParseException, msg=None): - with self.assertRaises(exc_type, msg=msg): - yield + def assertRaisesParseException( + self, exc_type=ParseException, expected_msg=None, msg=None + ): + if expected_msg is not None: + if isinstance(expected_msg, str): + expected_msg = re.escape(expected_msg) + with self.assertRaisesRegex(exc_type, expected_msg, msg=msg) as ctx: + yield ctx + + else: + with self.assertRaises(exc_type, msg=msg) as ctx: + yield ctx @staticmethod def with_line_numbers( diff --git a/src/pip/_vendor/pyparsing/unicode.py b/src/pip/_vendor/pyparsing/unicode.py index ec0b3a4fe60..e0b7b4ccb9a 100644 --- a/src/pip/_vendor/pyparsing/unicode.py +++ b/src/pip/_vendor/pyparsing/unicode.py @@ -102,17 +102,10 @@ def identbodychars(cls): all characters in this range that are valid identifier body characters, plus the digits 0-9, and · (Unicode MIDDLE DOT) """ - return "".join( - sorted( - set( - cls.identchars - + "0123456789·" - + "".join( - [c for c in cls._chars_for_ranges if ("_" + c).isidentifier()] - ) - ) - ) + identifier_chars = set( + c for c in cls._chars_for_ranges if ("_" + c).isidentifier() ) + return "".join(sorted(identifier_chars | set(cls.identchars + "0123456789·"))) @_lazyclassproperty def identifier(cls): diff --git a/src/pip/_vendor/pyparsing/util.py b/src/pip/_vendor/pyparsing/util.py index d8d3f414cca..4ae018a9636 100644 --- a/src/pip/_vendor/pyparsing/util.py +++ b/src/pip/_vendor/pyparsing/util.py @@ -237,7 +237,7 @@ def _flatten(ll: list) -> list: return ret -def _make_synonym_function(compat_name: str, fn: C) -> C: +def replaced_by_pep8(compat_name: str, fn: C) -> C: # In a future version, uncomment the code in the internal _inner() functions # to begin emitting DeprecationWarnings. @@ -251,7 +251,7 @@ def _make_synonym_function(compat_name: str, fn: C) -> C: @wraps(fn) def _inner(self, *args, **kwargs): # warnings.warn( - # f"Deprecated - use {fn.__name__}", DeprecationWarning, stacklevel=3 + # f"Deprecated - use {fn.__name__}", DeprecationWarning, stacklevel=2 # ) return fn(self, *args, **kwargs) @@ -260,7 +260,7 @@ def _inner(self, *args, **kwargs): @wraps(fn) def _inner(*args, **kwargs): # warnings.warn( - # f"Deprecated - use {fn.__name__}", DeprecationWarning, stacklevel=3 + # f"Deprecated - use {fn.__name__}", DeprecationWarning, stacklevel=2 # ) return fn(*args, **kwargs) @@ -275,10 +275,3 @@ def _inner(*args, **kwargs): _inner.__kwdefaults__ = None _inner.__qualname__ = fn.__qualname__ return cast(C, _inner) - - -def replaced_by_pep8(fn: C) -> Callable[[Callable], C]: - """ - Decorator for pre-PEP8 compatibility synonyms, to link them to the new function. - """ - return lambda other: _make_synonym_function(other.__name__, fn) diff --git a/src/pip/_vendor/vendor.txt b/src/pip/_vendor/vendor.txt index fc60f5cae04..ab207435d0d 100644 --- a/src/pip/_vendor/vendor.txt +++ b/src/pip/_vendor/vendor.txt @@ -5,7 +5,7 @@ distro==1.9.0 msgpack==1.0.8 packaging==21.3 platformdirs==4.2.1 -pyparsing==3.1.0 +pyparsing==3.1.2 pyproject-hooks==1.0.0 requests==2.31.0 certifi==2024.2.2 From cba5b13f9b0791de05456d7a13d0c1a0a32e1f95 Mon Sep 17 00:00:00 2001 From: Pradyun Gedam Date: Fri, 3 May 2024 21:03:50 +0100 Subject: [PATCH 060/113] Upgrade idna to 3.7 --- news/idna.vendor.rst | 2 +- src/pip/_vendor/idna/LICENSE.md | 2 +- src/pip/_vendor/idna/core.py | 23 +- src/pip/_vendor/idna/idnadata.py | 2197 +++++++++++++++++++++++++- src/pip/_vendor/idna/package_data.py | 2 +- src/pip/_vendor/vendor.txt | 2 +- 6 files changed, 2159 insertions(+), 69 deletions(-) diff --git a/news/idna.vendor.rst b/news/idna.vendor.rst index 229b1f3568a..1b8f7430aa6 100644 --- a/news/idna.vendor.rst +++ b/news/idna.vendor.rst @@ -1 +1 @@ -Upgrade idna to 3.6 +Upgrade idna to 3.7 diff --git a/src/pip/_vendor/idna/LICENSE.md b/src/pip/_vendor/idna/LICENSE.md index ce3670186c6..19b6b45242c 100644 --- a/src/pip/_vendor/idna/LICENSE.md +++ b/src/pip/_vendor/idna/LICENSE.md @@ -1,6 +1,6 @@ BSD 3-Clause License -Copyright (c) 2013-2023, Kim Davies and contributors. +Copyright (c) 2013-2024, Kim Davies and contributors. All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/src/pip/_vendor/idna/core.py b/src/pip/_vendor/idna/core.py index aaf7d658ba0..0dae61acdbc 100644 --- a/src/pip/_vendor/idna/core.py +++ b/src/pip/_vendor/idna/core.py @@ -150,9 +150,11 @@ def valid_contextj(label: str, pos: int) -> bool: joining_type = idnadata.joining_types.get(ord(label[i])) if joining_type == ord('T'): continue - if joining_type in [ord('L'), ord('D')]: + elif joining_type in [ord('L'), ord('D')]: ok = True break + else: + break if not ok: return False @@ -162,9 +164,11 @@ def valid_contextj(label: str, pos: int) -> bool: joining_type = idnadata.joining_types.get(ord(label[i])) if joining_type == ord('T'): continue - if joining_type in [ord('R'), ord('D')]: + elif joining_type in [ord('R'), ord('D')]: ok = True break + else: + break return ok if cp_value == 0x200d: @@ -236,12 +240,8 @@ def check_label(label: Union[str, bytes, bytearray]) -> None: if intranges_contain(cp_value, idnadata.codepoint_classes['PVALID']): continue elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTJ']): - try: - if not valid_contextj(label, pos): - raise InvalidCodepointContext('Joiner {} not allowed at position {} in {}'.format( - _unot(cp_value), pos+1, repr(label))) - except ValueError: - raise IDNAError('Unknown codepoint adjacent to joiner {} at position {} in {}'.format( + if not valid_contextj(label, pos): + raise InvalidCodepointContext('Joiner {} not allowed at position {} in {}'.format( _unot(cp_value), pos+1, repr(label))) elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTO']): if not valid_contexto(label, pos): @@ -262,13 +262,8 @@ def alabel(label: str) -> bytes: except UnicodeEncodeError: pass - if not label: - raise IDNAError('No Input') - - label = str(label) check_label(label) - label_bytes = _punycode(label) - label_bytes = _alabel_prefix + label_bytes + label_bytes = _alabel_prefix + _punycode(label) if not valid_label_length(label_bytes): raise IDNAError('Label too long') diff --git a/src/pip/_vendor/idna/idnadata.py b/src/pip/_vendor/idna/idnadata.py index 5cd05d9056e..c61dcf977e5 100644 --- a/src/pip/_vendor/idna/idnadata.py +++ b/src/pip/_vendor/idna/idnadata.py @@ -101,16 +101,190 @@ ), } joining_types = { - 0x600: 85, - 0x601: 85, - 0x602: 85, - 0x603: 85, - 0x604: 85, - 0x605: 85, - 0x608: 85, - 0x60b: 85, + 0xad: 84, + 0x300: 84, + 0x301: 84, + 0x302: 84, + 0x303: 84, + 0x304: 84, + 0x305: 84, + 0x306: 84, + 0x307: 84, + 0x308: 84, + 0x309: 84, + 0x30a: 84, + 0x30b: 84, + 0x30c: 84, + 0x30d: 84, + 0x30e: 84, + 0x30f: 84, + 0x310: 84, + 0x311: 84, + 0x312: 84, + 0x313: 84, + 0x314: 84, + 0x315: 84, + 0x316: 84, + 0x317: 84, + 0x318: 84, + 0x319: 84, + 0x31a: 84, + 0x31b: 84, + 0x31c: 84, + 0x31d: 84, + 0x31e: 84, + 0x31f: 84, + 0x320: 84, + 0x321: 84, + 0x322: 84, + 0x323: 84, + 0x324: 84, + 0x325: 84, + 0x326: 84, + 0x327: 84, + 0x328: 84, + 0x329: 84, + 0x32a: 84, + 0x32b: 84, + 0x32c: 84, + 0x32d: 84, + 0x32e: 84, + 0x32f: 84, + 0x330: 84, + 0x331: 84, + 0x332: 84, + 0x333: 84, + 0x334: 84, + 0x335: 84, + 0x336: 84, + 0x337: 84, + 0x338: 84, + 0x339: 84, + 0x33a: 84, + 0x33b: 84, + 0x33c: 84, + 0x33d: 84, + 0x33e: 84, + 0x33f: 84, + 0x340: 84, + 0x341: 84, + 0x342: 84, + 0x343: 84, + 0x344: 84, + 0x345: 84, + 0x346: 84, + 0x347: 84, + 0x348: 84, + 0x349: 84, + 0x34a: 84, + 0x34b: 84, + 0x34c: 84, + 0x34d: 84, + 0x34e: 84, + 0x34f: 84, + 0x350: 84, + 0x351: 84, + 0x352: 84, + 0x353: 84, + 0x354: 84, + 0x355: 84, + 0x356: 84, + 0x357: 84, + 0x358: 84, + 0x359: 84, + 0x35a: 84, + 0x35b: 84, + 0x35c: 84, + 0x35d: 84, + 0x35e: 84, + 0x35f: 84, + 0x360: 84, + 0x361: 84, + 0x362: 84, + 0x363: 84, + 0x364: 84, + 0x365: 84, + 0x366: 84, + 0x367: 84, + 0x368: 84, + 0x369: 84, + 0x36a: 84, + 0x36b: 84, + 0x36c: 84, + 0x36d: 84, + 0x36e: 84, + 0x36f: 84, + 0x483: 84, + 0x484: 84, + 0x485: 84, + 0x486: 84, + 0x487: 84, + 0x488: 84, + 0x489: 84, + 0x591: 84, + 0x592: 84, + 0x593: 84, + 0x594: 84, + 0x595: 84, + 0x596: 84, + 0x597: 84, + 0x598: 84, + 0x599: 84, + 0x59a: 84, + 0x59b: 84, + 0x59c: 84, + 0x59d: 84, + 0x59e: 84, + 0x59f: 84, + 0x5a0: 84, + 0x5a1: 84, + 0x5a2: 84, + 0x5a3: 84, + 0x5a4: 84, + 0x5a5: 84, + 0x5a6: 84, + 0x5a7: 84, + 0x5a8: 84, + 0x5a9: 84, + 0x5aa: 84, + 0x5ab: 84, + 0x5ac: 84, + 0x5ad: 84, + 0x5ae: 84, + 0x5af: 84, + 0x5b0: 84, + 0x5b1: 84, + 0x5b2: 84, + 0x5b3: 84, + 0x5b4: 84, + 0x5b5: 84, + 0x5b6: 84, + 0x5b7: 84, + 0x5b8: 84, + 0x5b9: 84, + 0x5ba: 84, + 0x5bb: 84, + 0x5bc: 84, + 0x5bd: 84, + 0x5bf: 84, + 0x5c1: 84, + 0x5c2: 84, + 0x5c4: 84, + 0x5c5: 84, + 0x5c7: 84, + 0x610: 84, + 0x611: 84, + 0x612: 84, + 0x613: 84, + 0x614: 84, + 0x615: 84, + 0x616: 84, + 0x617: 84, + 0x618: 84, + 0x619: 84, + 0x61a: 84, + 0x61c: 84, 0x620: 68, - 0x621: 85, 0x622: 82, 0x623: 82, 0x624: 82, @@ -152,12 +326,33 @@ 0x648: 82, 0x649: 68, 0x64a: 68, + 0x64b: 84, + 0x64c: 84, + 0x64d: 84, + 0x64e: 84, + 0x64f: 84, + 0x650: 84, + 0x651: 84, + 0x652: 84, + 0x653: 84, + 0x654: 84, + 0x655: 84, + 0x656: 84, + 0x657: 84, + 0x658: 84, + 0x659: 84, + 0x65a: 84, + 0x65b: 84, + 0x65c: 84, + 0x65d: 84, + 0x65e: 84, + 0x65f: 84, 0x66e: 68, 0x66f: 68, + 0x670: 84, 0x671: 82, 0x672: 82, 0x673: 82, - 0x674: 85, 0x675: 82, 0x676: 82, 0x677: 82, @@ -254,7 +449,25 @@ 0x6d2: 82, 0x6d3: 82, 0x6d5: 82, - 0x6dd: 85, + 0x6d6: 84, + 0x6d7: 84, + 0x6d8: 84, + 0x6d9: 84, + 0x6da: 84, + 0x6db: 84, + 0x6dc: 84, + 0x6df: 84, + 0x6e0: 84, + 0x6e1: 84, + 0x6e2: 84, + 0x6e3: 84, + 0x6e4: 84, + 0x6e7: 84, + 0x6e8: 84, + 0x6ea: 84, + 0x6eb: 84, + 0x6ec: 84, + 0x6ed: 84, 0x6ee: 82, 0x6ef: 82, 0x6fa: 68, @@ -263,6 +476,7 @@ 0x6ff: 68, 0x70f: 84, 0x710: 82, + 0x711: 84, 0x712: 68, 0x713: 68, 0x714: 68, @@ -293,6 +507,33 @@ 0x72d: 68, 0x72e: 68, 0x72f: 82, + 0x730: 84, + 0x731: 84, + 0x732: 84, + 0x733: 84, + 0x734: 84, + 0x735: 84, + 0x736: 84, + 0x737: 84, + 0x738: 84, + 0x739: 84, + 0x73a: 84, + 0x73b: 84, + 0x73c: 84, + 0x73d: 84, + 0x73e: 84, + 0x73f: 84, + 0x740: 84, + 0x741: 84, + 0x742: 84, + 0x743: 84, + 0x744: 84, + 0x745: 84, + 0x746: 84, + 0x747: 84, + 0x748: 84, + 0x749: 84, + 0x74a: 84, 0x74d: 82, 0x74e: 68, 0x74f: 68, @@ -344,6 +585,17 @@ 0x77d: 68, 0x77e: 68, 0x77f: 68, + 0x7a6: 84, + 0x7a7: 84, + 0x7a8: 84, + 0x7a9: 84, + 0x7aa: 84, + 0x7ab: 84, + 0x7ac: 84, + 0x7ad: 84, + 0x7ae: 84, + 0x7af: 84, + 0x7b0: 84, 0x7ca: 68, 0x7cb: 68, 0x7cc: 68, @@ -377,7 +629,38 @@ 0x7e8: 68, 0x7e9: 68, 0x7ea: 68, + 0x7eb: 84, + 0x7ec: 84, + 0x7ed: 84, + 0x7ee: 84, + 0x7ef: 84, + 0x7f0: 84, + 0x7f1: 84, + 0x7f2: 84, + 0x7f3: 84, 0x7fa: 67, + 0x7fd: 84, + 0x816: 84, + 0x817: 84, + 0x818: 84, + 0x819: 84, + 0x81b: 84, + 0x81c: 84, + 0x81d: 84, + 0x81e: 84, + 0x81f: 84, + 0x820: 84, + 0x821: 84, + 0x822: 84, + 0x823: 84, + 0x825: 84, + 0x826: 84, + 0x827: 84, + 0x829: 84, + 0x82a: 84, + 0x82b: 84, + 0x82c: 84, + 0x82d: 84, 0x840: 82, 0x841: 68, 0x842: 68, @@ -403,13 +686,14 @@ 0x856: 82, 0x857: 82, 0x858: 82, + 0x859: 84, + 0x85a: 84, + 0x85b: 84, 0x860: 68, - 0x861: 85, 0x862: 68, 0x863: 68, 0x864: 68, 0x865: 68, - 0x866: 85, 0x867: 82, 0x868: 68, 0x869: 82, @@ -437,16 +721,20 @@ 0x884: 67, 0x885: 67, 0x886: 68, - 0x887: 85, - 0x888: 85, 0x889: 68, 0x88a: 68, 0x88b: 68, 0x88c: 68, 0x88d: 68, 0x88e: 82, - 0x890: 85, - 0x891: 85, + 0x898: 84, + 0x899: 84, + 0x89a: 84, + 0x89b: 84, + 0x89c: 84, + 0x89d: 84, + 0x89e: 84, + 0x89f: 84, 0x8a0: 68, 0x8a1: 68, 0x8a2: 68, @@ -460,7 +748,6 @@ 0x8aa: 82, 0x8ab: 82, 0x8ac: 82, - 0x8ad: 85, 0x8ae: 82, 0x8af: 68, 0x8b0: 68, @@ -488,11 +775,357 @@ 0x8c6: 68, 0x8c7: 68, 0x8c8: 68, - 0x8e2: 85, - 0x1806: 85, + 0x8ca: 84, + 0x8cb: 84, + 0x8cc: 84, + 0x8cd: 84, + 0x8ce: 84, + 0x8cf: 84, + 0x8d0: 84, + 0x8d1: 84, + 0x8d2: 84, + 0x8d3: 84, + 0x8d4: 84, + 0x8d5: 84, + 0x8d6: 84, + 0x8d7: 84, + 0x8d8: 84, + 0x8d9: 84, + 0x8da: 84, + 0x8db: 84, + 0x8dc: 84, + 0x8dd: 84, + 0x8de: 84, + 0x8df: 84, + 0x8e0: 84, + 0x8e1: 84, + 0x8e3: 84, + 0x8e4: 84, + 0x8e5: 84, + 0x8e6: 84, + 0x8e7: 84, + 0x8e8: 84, + 0x8e9: 84, + 0x8ea: 84, + 0x8eb: 84, + 0x8ec: 84, + 0x8ed: 84, + 0x8ee: 84, + 0x8ef: 84, + 0x8f0: 84, + 0x8f1: 84, + 0x8f2: 84, + 0x8f3: 84, + 0x8f4: 84, + 0x8f5: 84, + 0x8f6: 84, + 0x8f7: 84, + 0x8f8: 84, + 0x8f9: 84, + 0x8fa: 84, + 0x8fb: 84, + 0x8fc: 84, + 0x8fd: 84, + 0x8fe: 84, + 0x8ff: 84, + 0x900: 84, + 0x901: 84, + 0x902: 84, + 0x93a: 84, + 0x93c: 84, + 0x941: 84, + 0x942: 84, + 0x943: 84, + 0x944: 84, + 0x945: 84, + 0x946: 84, + 0x947: 84, + 0x948: 84, + 0x94d: 84, + 0x951: 84, + 0x952: 84, + 0x953: 84, + 0x954: 84, + 0x955: 84, + 0x956: 84, + 0x957: 84, + 0x962: 84, + 0x963: 84, + 0x981: 84, + 0x9bc: 84, + 0x9c1: 84, + 0x9c2: 84, + 0x9c3: 84, + 0x9c4: 84, + 0x9cd: 84, + 0x9e2: 84, + 0x9e3: 84, + 0x9fe: 84, + 0xa01: 84, + 0xa02: 84, + 0xa3c: 84, + 0xa41: 84, + 0xa42: 84, + 0xa47: 84, + 0xa48: 84, + 0xa4b: 84, + 0xa4c: 84, + 0xa4d: 84, + 0xa51: 84, + 0xa70: 84, + 0xa71: 84, + 0xa75: 84, + 0xa81: 84, + 0xa82: 84, + 0xabc: 84, + 0xac1: 84, + 0xac2: 84, + 0xac3: 84, + 0xac4: 84, + 0xac5: 84, + 0xac7: 84, + 0xac8: 84, + 0xacd: 84, + 0xae2: 84, + 0xae3: 84, + 0xafa: 84, + 0xafb: 84, + 0xafc: 84, + 0xafd: 84, + 0xafe: 84, + 0xaff: 84, + 0xb01: 84, + 0xb3c: 84, + 0xb3f: 84, + 0xb41: 84, + 0xb42: 84, + 0xb43: 84, + 0xb44: 84, + 0xb4d: 84, + 0xb55: 84, + 0xb56: 84, + 0xb62: 84, + 0xb63: 84, + 0xb82: 84, + 0xbc0: 84, + 0xbcd: 84, + 0xc00: 84, + 0xc04: 84, + 0xc3c: 84, + 0xc3e: 84, + 0xc3f: 84, + 0xc40: 84, + 0xc46: 84, + 0xc47: 84, + 0xc48: 84, + 0xc4a: 84, + 0xc4b: 84, + 0xc4c: 84, + 0xc4d: 84, + 0xc55: 84, + 0xc56: 84, + 0xc62: 84, + 0xc63: 84, + 0xc81: 84, + 0xcbc: 84, + 0xcbf: 84, + 0xcc6: 84, + 0xccc: 84, + 0xccd: 84, + 0xce2: 84, + 0xce3: 84, + 0xd00: 84, + 0xd01: 84, + 0xd3b: 84, + 0xd3c: 84, + 0xd41: 84, + 0xd42: 84, + 0xd43: 84, + 0xd44: 84, + 0xd4d: 84, + 0xd62: 84, + 0xd63: 84, + 0xd81: 84, + 0xdca: 84, + 0xdd2: 84, + 0xdd3: 84, + 0xdd4: 84, + 0xdd6: 84, + 0xe31: 84, + 0xe34: 84, + 0xe35: 84, + 0xe36: 84, + 0xe37: 84, + 0xe38: 84, + 0xe39: 84, + 0xe3a: 84, + 0xe47: 84, + 0xe48: 84, + 0xe49: 84, + 0xe4a: 84, + 0xe4b: 84, + 0xe4c: 84, + 0xe4d: 84, + 0xe4e: 84, + 0xeb1: 84, + 0xeb4: 84, + 0xeb5: 84, + 0xeb6: 84, + 0xeb7: 84, + 0xeb8: 84, + 0xeb9: 84, + 0xeba: 84, + 0xebb: 84, + 0xebc: 84, + 0xec8: 84, + 0xec9: 84, + 0xeca: 84, + 0xecb: 84, + 0xecc: 84, + 0xecd: 84, + 0xece: 84, + 0xf18: 84, + 0xf19: 84, + 0xf35: 84, + 0xf37: 84, + 0xf39: 84, + 0xf71: 84, + 0xf72: 84, + 0xf73: 84, + 0xf74: 84, + 0xf75: 84, + 0xf76: 84, + 0xf77: 84, + 0xf78: 84, + 0xf79: 84, + 0xf7a: 84, + 0xf7b: 84, + 0xf7c: 84, + 0xf7d: 84, + 0xf7e: 84, + 0xf80: 84, + 0xf81: 84, + 0xf82: 84, + 0xf83: 84, + 0xf84: 84, + 0xf86: 84, + 0xf87: 84, + 0xf8d: 84, + 0xf8e: 84, + 0xf8f: 84, + 0xf90: 84, + 0xf91: 84, + 0xf92: 84, + 0xf93: 84, + 0xf94: 84, + 0xf95: 84, + 0xf96: 84, + 0xf97: 84, + 0xf99: 84, + 0xf9a: 84, + 0xf9b: 84, + 0xf9c: 84, + 0xf9d: 84, + 0xf9e: 84, + 0xf9f: 84, + 0xfa0: 84, + 0xfa1: 84, + 0xfa2: 84, + 0xfa3: 84, + 0xfa4: 84, + 0xfa5: 84, + 0xfa6: 84, + 0xfa7: 84, + 0xfa8: 84, + 0xfa9: 84, + 0xfaa: 84, + 0xfab: 84, + 0xfac: 84, + 0xfad: 84, + 0xfae: 84, + 0xfaf: 84, + 0xfb0: 84, + 0xfb1: 84, + 0xfb2: 84, + 0xfb3: 84, + 0xfb4: 84, + 0xfb5: 84, + 0xfb6: 84, + 0xfb7: 84, + 0xfb8: 84, + 0xfb9: 84, + 0xfba: 84, + 0xfbb: 84, + 0xfbc: 84, + 0xfc6: 84, + 0x102d: 84, + 0x102e: 84, + 0x102f: 84, + 0x1030: 84, + 0x1032: 84, + 0x1033: 84, + 0x1034: 84, + 0x1035: 84, + 0x1036: 84, + 0x1037: 84, + 0x1039: 84, + 0x103a: 84, + 0x103d: 84, + 0x103e: 84, + 0x1058: 84, + 0x1059: 84, + 0x105e: 84, + 0x105f: 84, + 0x1060: 84, + 0x1071: 84, + 0x1072: 84, + 0x1073: 84, + 0x1074: 84, + 0x1082: 84, + 0x1085: 84, + 0x1086: 84, + 0x108d: 84, + 0x109d: 84, + 0x135d: 84, + 0x135e: 84, + 0x135f: 84, + 0x1712: 84, + 0x1713: 84, + 0x1714: 84, + 0x1732: 84, + 0x1733: 84, + 0x1752: 84, + 0x1753: 84, + 0x1772: 84, + 0x1773: 84, + 0x17b4: 84, + 0x17b5: 84, + 0x17b7: 84, + 0x17b8: 84, + 0x17b9: 84, + 0x17ba: 84, + 0x17bb: 84, + 0x17bc: 84, + 0x17bd: 84, + 0x17c6: 84, + 0x17c9: 84, + 0x17ca: 84, + 0x17cb: 84, + 0x17cc: 84, + 0x17cd: 84, + 0x17ce: 84, + 0x17cf: 84, + 0x17d0: 84, + 0x17d1: 84, + 0x17d2: 84, + 0x17d3: 84, + 0x17dd: 84, 0x1807: 68, 0x180a: 67, - 0x180e: 85, + 0x180b: 84, + 0x180c: 84, + 0x180d: 84, + 0x180f: 84, 0x1820: 68, 0x1821: 68, 0x1822: 68, @@ -582,11 +1215,6 @@ 0x1876: 68, 0x1877: 68, 0x1878: 68, - 0x1880: 85, - 0x1881: 85, - 0x1882: 85, - 0x1883: 85, - 0x1884: 85, 0x1885: 84, 0x1886: 84, 0x1887: 68, @@ -623,14 +1251,339 @@ 0x18a6: 68, 0x18a7: 68, 0x18a8: 68, + 0x18a9: 84, 0x18aa: 68, - 0x200c: 85, + 0x1920: 84, + 0x1921: 84, + 0x1922: 84, + 0x1927: 84, + 0x1928: 84, + 0x1932: 84, + 0x1939: 84, + 0x193a: 84, + 0x193b: 84, + 0x1a17: 84, + 0x1a18: 84, + 0x1a1b: 84, + 0x1a56: 84, + 0x1a58: 84, + 0x1a59: 84, + 0x1a5a: 84, + 0x1a5b: 84, + 0x1a5c: 84, + 0x1a5d: 84, + 0x1a5e: 84, + 0x1a60: 84, + 0x1a62: 84, + 0x1a65: 84, + 0x1a66: 84, + 0x1a67: 84, + 0x1a68: 84, + 0x1a69: 84, + 0x1a6a: 84, + 0x1a6b: 84, + 0x1a6c: 84, + 0x1a73: 84, + 0x1a74: 84, + 0x1a75: 84, + 0x1a76: 84, + 0x1a77: 84, + 0x1a78: 84, + 0x1a79: 84, + 0x1a7a: 84, + 0x1a7b: 84, + 0x1a7c: 84, + 0x1a7f: 84, + 0x1ab0: 84, + 0x1ab1: 84, + 0x1ab2: 84, + 0x1ab3: 84, + 0x1ab4: 84, + 0x1ab5: 84, + 0x1ab6: 84, + 0x1ab7: 84, + 0x1ab8: 84, + 0x1ab9: 84, + 0x1aba: 84, + 0x1abb: 84, + 0x1abc: 84, + 0x1abd: 84, + 0x1abe: 84, + 0x1abf: 84, + 0x1ac0: 84, + 0x1ac1: 84, + 0x1ac2: 84, + 0x1ac3: 84, + 0x1ac4: 84, + 0x1ac5: 84, + 0x1ac6: 84, + 0x1ac7: 84, + 0x1ac8: 84, + 0x1ac9: 84, + 0x1aca: 84, + 0x1acb: 84, + 0x1acc: 84, + 0x1acd: 84, + 0x1ace: 84, + 0x1b00: 84, + 0x1b01: 84, + 0x1b02: 84, + 0x1b03: 84, + 0x1b34: 84, + 0x1b36: 84, + 0x1b37: 84, + 0x1b38: 84, + 0x1b39: 84, + 0x1b3a: 84, + 0x1b3c: 84, + 0x1b42: 84, + 0x1b6b: 84, + 0x1b6c: 84, + 0x1b6d: 84, + 0x1b6e: 84, + 0x1b6f: 84, + 0x1b70: 84, + 0x1b71: 84, + 0x1b72: 84, + 0x1b73: 84, + 0x1b80: 84, + 0x1b81: 84, + 0x1ba2: 84, + 0x1ba3: 84, + 0x1ba4: 84, + 0x1ba5: 84, + 0x1ba8: 84, + 0x1ba9: 84, + 0x1bab: 84, + 0x1bac: 84, + 0x1bad: 84, + 0x1be6: 84, + 0x1be8: 84, + 0x1be9: 84, + 0x1bed: 84, + 0x1bef: 84, + 0x1bf0: 84, + 0x1bf1: 84, + 0x1c2c: 84, + 0x1c2d: 84, + 0x1c2e: 84, + 0x1c2f: 84, + 0x1c30: 84, + 0x1c31: 84, + 0x1c32: 84, + 0x1c33: 84, + 0x1c36: 84, + 0x1c37: 84, + 0x1cd0: 84, + 0x1cd1: 84, + 0x1cd2: 84, + 0x1cd4: 84, + 0x1cd5: 84, + 0x1cd6: 84, + 0x1cd7: 84, + 0x1cd8: 84, + 0x1cd9: 84, + 0x1cda: 84, + 0x1cdb: 84, + 0x1cdc: 84, + 0x1cdd: 84, + 0x1cde: 84, + 0x1cdf: 84, + 0x1ce0: 84, + 0x1ce2: 84, + 0x1ce3: 84, + 0x1ce4: 84, + 0x1ce5: 84, + 0x1ce6: 84, + 0x1ce7: 84, + 0x1ce8: 84, + 0x1ced: 84, + 0x1cf4: 84, + 0x1cf8: 84, + 0x1cf9: 84, + 0x1dc0: 84, + 0x1dc1: 84, + 0x1dc2: 84, + 0x1dc3: 84, + 0x1dc4: 84, + 0x1dc5: 84, + 0x1dc6: 84, + 0x1dc7: 84, + 0x1dc8: 84, + 0x1dc9: 84, + 0x1dca: 84, + 0x1dcb: 84, + 0x1dcc: 84, + 0x1dcd: 84, + 0x1dce: 84, + 0x1dcf: 84, + 0x1dd0: 84, + 0x1dd1: 84, + 0x1dd2: 84, + 0x1dd3: 84, + 0x1dd4: 84, + 0x1dd5: 84, + 0x1dd6: 84, + 0x1dd7: 84, + 0x1dd8: 84, + 0x1dd9: 84, + 0x1dda: 84, + 0x1ddb: 84, + 0x1ddc: 84, + 0x1ddd: 84, + 0x1dde: 84, + 0x1ddf: 84, + 0x1de0: 84, + 0x1de1: 84, + 0x1de2: 84, + 0x1de3: 84, + 0x1de4: 84, + 0x1de5: 84, + 0x1de6: 84, + 0x1de7: 84, + 0x1de8: 84, + 0x1de9: 84, + 0x1dea: 84, + 0x1deb: 84, + 0x1dec: 84, + 0x1ded: 84, + 0x1dee: 84, + 0x1def: 84, + 0x1df0: 84, + 0x1df1: 84, + 0x1df2: 84, + 0x1df3: 84, + 0x1df4: 84, + 0x1df5: 84, + 0x1df6: 84, + 0x1df7: 84, + 0x1df8: 84, + 0x1df9: 84, + 0x1dfa: 84, + 0x1dfb: 84, + 0x1dfc: 84, + 0x1dfd: 84, + 0x1dfe: 84, + 0x1dff: 84, + 0x200b: 84, 0x200d: 67, - 0x202f: 85, - 0x2066: 85, - 0x2067: 85, - 0x2068: 85, - 0x2069: 85, + 0x200e: 84, + 0x200f: 84, + 0x202a: 84, + 0x202b: 84, + 0x202c: 84, + 0x202d: 84, + 0x202e: 84, + 0x2060: 84, + 0x2061: 84, + 0x2062: 84, + 0x2063: 84, + 0x2064: 84, + 0x206a: 84, + 0x206b: 84, + 0x206c: 84, + 0x206d: 84, + 0x206e: 84, + 0x206f: 84, + 0x20d0: 84, + 0x20d1: 84, + 0x20d2: 84, + 0x20d3: 84, + 0x20d4: 84, + 0x20d5: 84, + 0x20d6: 84, + 0x20d7: 84, + 0x20d8: 84, + 0x20d9: 84, + 0x20da: 84, + 0x20db: 84, + 0x20dc: 84, + 0x20dd: 84, + 0x20de: 84, + 0x20df: 84, + 0x20e0: 84, + 0x20e1: 84, + 0x20e2: 84, + 0x20e3: 84, + 0x20e4: 84, + 0x20e5: 84, + 0x20e6: 84, + 0x20e7: 84, + 0x20e8: 84, + 0x20e9: 84, + 0x20ea: 84, + 0x20eb: 84, + 0x20ec: 84, + 0x20ed: 84, + 0x20ee: 84, + 0x20ef: 84, + 0x20f0: 84, + 0x2cef: 84, + 0x2cf0: 84, + 0x2cf1: 84, + 0x2d7f: 84, + 0x2de0: 84, + 0x2de1: 84, + 0x2de2: 84, + 0x2de3: 84, + 0x2de4: 84, + 0x2de5: 84, + 0x2de6: 84, + 0x2de7: 84, + 0x2de8: 84, + 0x2de9: 84, + 0x2dea: 84, + 0x2deb: 84, + 0x2dec: 84, + 0x2ded: 84, + 0x2dee: 84, + 0x2def: 84, + 0x2df0: 84, + 0x2df1: 84, + 0x2df2: 84, + 0x2df3: 84, + 0x2df4: 84, + 0x2df5: 84, + 0x2df6: 84, + 0x2df7: 84, + 0x2df8: 84, + 0x2df9: 84, + 0x2dfa: 84, + 0x2dfb: 84, + 0x2dfc: 84, + 0x2dfd: 84, + 0x2dfe: 84, + 0x2dff: 84, + 0x302a: 84, + 0x302b: 84, + 0x302c: 84, + 0x302d: 84, + 0x3099: 84, + 0x309a: 84, + 0xa66f: 84, + 0xa670: 84, + 0xa671: 84, + 0xa672: 84, + 0xa674: 84, + 0xa675: 84, + 0xa676: 84, + 0xa677: 84, + 0xa678: 84, + 0xa679: 84, + 0xa67a: 84, + 0xa67b: 84, + 0xa67c: 84, + 0xa67d: 84, + 0xa69e: 84, + 0xa69f: 84, + 0xa6f0: 84, + 0xa6f1: 84, + 0xa802: 84, + 0xa806: 84, + 0xa80b: 84, + 0xa825: 84, + 0xa826: 84, + 0xa82c: 84, 0xa840: 68, 0xa841: 68, 0xa842: 68, @@ -682,20 +1635,151 @@ 0xa870: 68, 0xa871: 68, 0xa872: 76, - 0xa873: 85, + 0xa8c4: 84, + 0xa8c5: 84, + 0xa8e0: 84, + 0xa8e1: 84, + 0xa8e2: 84, + 0xa8e3: 84, + 0xa8e4: 84, + 0xa8e5: 84, + 0xa8e6: 84, + 0xa8e7: 84, + 0xa8e8: 84, + 0xa8e9: 84, + 0xa8ea: 84, + 0xa8eb: 84, + 0xa8ec: 84, + 0xa8ed: 84, + 0xa8ee: 84, + 0xa8ef: 84, + 0xa8f0: 84, + 0xa8f1: 84, + 0xa8ff: 84, + 0xa926: 84, + 0xa927: 84, + 0xa928: 84, + 0xa929: 84, + 0xa92a: 84, + 0xa92b: 84, + 0xa92c: 84, + 0xa92d: 84, + 0xa947: 84, + 0xa948: 84, + 0xa949: 84, + 0xa94a: 84, + 0xa94b: 84, + 0xa94c: 84, + 0xa94d: 84, + 0xa94e: 84, + 0xa94f: 84, + 0xa950: 84, + 0xa951: 84, + 0xa980: 84, + 0xa981: 84, + 0xa982: 84, + 0xa9b3: 84, + 0xa9b6: 84, + 0xa9b7: 84, + 0xa9b8: 84, + 0xa9b9: 84, + 0xa9bc: 84, + 0xa9bd: 84, + 0xa9e5: 84, + 0xaa29: 84, + 0xaa2a: 84, + 0xaa2b: 84, + 0xaa2c: 84, + 0xaa2d: 84, + 0xaa2e: 84, + 0xaa31: 84, + 0xaa32: 84, + 0xaa35: 84, + 0xaa36: 84, + 0xaa43: 84, + 0xaa4c: 84, + 0xaa7c: 84, + 0xaab0: 84, + 0xaab2: 84, + 0xaab3: 84, + 0xaab4: 84, + 0xaab7: 84, + 0xaab8: 84, + 0xaabe: 84, + 0xaabf: 84, + 0xaac1: 84, + 0xaaec: 84, + 0xaaed: 84, + 0xaaf6: 84, + 0xabe5: 84, + 0xabe8: 84, + 0xabed: 84, + 0xfb1e: 84, + 0xfe00: 84, + 0xfe01: 84, + 0xfe02: 84, + 0xfe03: 84, + 0xfe04: 84, + 0xfe05: 84, + 0xfe06: 84, + 0xfe07: 84, + 0xfe08: 84, + 0xfe09: 84, + 0xfe0a: 84, + 0xfe0b: 84, + 0xfe0c: 84, + 0xfe0d: 84, + 0xfe0e: 84, + 0xfe0f: 84, + 0xfe20: 84, + 0xfe21: 84, + 0xfe22: 84, + 0xfe23: 84, + 0xfe24: 84, + 0xfe25: 84, + 0xfe26: 84, + 0xfe27: 84, + 0xfe28: 84, + 0xfe29: 84, + 0xfe2a: 84, + 0xfe2b: 84, + 0xfe2c: 84, + 0xfe2d: 84, + 0xfe2e: 84, + 0xfe2f: 84, + 0xfeff: 84, + 0xfff9: 84, + 0xfffa: 84, + 0xfffb: 84, + 0x101fd: 84, + 0x102e0: 84, + 0x10376: 84, + 0x10377: 84, + 0x10378: 84, + 0x10379: 84, + 0x1037a: 84, + 0x10a01: 84, + 0x10a02: 84, + 0x10a03: 84, + 0x10a05: 84, + 0x10a06: 84, + 0x10a0c: 84, + 0x10a0d: 84, + 0x10a0e: 84, + 0x10a0f: 84, + 0x10a38: 84, + 0x10a39: 84, + 0x10a3a: 84, + 0x10a3f: 84, 0x10ac0: 68, 0x10ac1: 68, 0x10ac2: 68, 0x10ac3: 68, 0x10ac4: 68, 0x10ac5: 82, - 0x10ac6: 85, 0x10ac7: 82, - 0x10ac8: 85, 0x10ac9: 82, 0x10aca: 82, - 0x10acb: 85, - 0x10acc: 85, 0x10acd: 76, 0x10ace: 82, 0x10acf: 82, @@ -717,9 +1801,9 @@ 0x10adf: 68, 0x10ae0: 68, 0x10ae1: 82, - 0x10ae2: 85, - 0x10ae3: 85, 0x10ae4: 82, + 0x10ae5: 84, + 0x10ae6: 84, 0x10aeb: 68, 0x10aec: 68, 0x10aed: 68, @@ -749,7 +1833,6 @@ 0x10bac: 82, 0x10bad: 68, 0x10bae: 68, - 0x10baf: 85, 0x10d00: 76, 0x10d01: 68, 0x10d02: 68, @@ -786,6 +1869,15 @@ 0x10d21: 68, 0x10d22: 82, 0x10d23: 68, + 0x10d24: 84, + 0x10d25: 84, + 0x10d26: 84, + 0x10d27: 84, + 0x10eab: 84, + 0x10eac: 84, + 0x10efd: 84, + 0x10efe: 84, + 0x10eff: 84, 0x10f30: 68, 0x10f31: 68, 0x10f32: 68, @@ -807,7 +1899,17 @@ 0x10f42: 68, 0x10f43: 68, 0x10f44: 68, - 0x10f45: 85, + 0x10f46: 84, + 0x10f47: 84, + 0x10f48: 84, + 0x10f49: 84, + 0x10f4a: 84, + 0x10f4b: 84, + 0x10f4c: 84, + 0x10f4d: 84, + 0x10f4e: 84, + 0x10f4f: 84, + 0x10f50: 84, 0x10f51: 68, 0x10f52: 68, 0x10f53: 68, @@ -830,14 +1932,16 @@ 0x10f7f: 68, 0x10f80: 68, 0x10f81: 68, + 0x10f82: 84, + 0x10f83: 84, + 0x10f84: 84, + 0x10f85: 84, 0x10fb0: 68, - 0x10fb1: 85, 0x10fb2: 68, 0x10fb3: 68, 0x10fb4: 82, 0x10fb5: 82, 0x10fb6: 82, - 0x10fb7: 85, 0x10fb8: 68, 0x10fb9: 82, 0x10fba: 82, @@ -846,20 +1950,668 @@ 0x10fbd: 82, 0x10fbe: 68, 0x10fbf: 68, - 0x10fc0: 85, 0x10fc1: 68, 0x10fc2: 82, 0x10fc3: 82, 0x10fc4: 68, - 0x10fc5: 85, - 0x10fc6: 85, - 0x10fc7: 85, - 0x10fc8: 85, 0x10fc9: 82, 0x10fca: 68, 0x10fcb: 76, - 0x110bd: 85, - 0x110cd: 85, + 0x11001: 84, + 0x11038: 84, + 0x11039: 84, + 0x1103a: 84, + 0x1103b: 84, + 0x1103c: 84, + 0x1103d: 84, + 0x1103e: 84, + 0x1103f: 84, + 0x11040: 84, + 0x11041: 84, + 0x11042: 84, + 0x11043: 84, + 0x11044: 84, + 0x11045: 84, + 0x11046: 84, + 0x11070: 84, + 0x11073: 84, + 0x11074: 84, + 0x1107f: 84, + 0x11080: 84, + 0x11081: 84, + 0x110b3: 84, + 0x110b4: 84, + 0x110b5: 84, + 0x110b6: 84, + 0x110b9: 84, + 0x110ba: 84, + 0x110c2: 84, + 0x11100: 84, + 0x11101: 84, + 0x11102: 84, + 0x11127: 84, + 0x11128: 84, + 0x11129: 84, + 0x1112a: 84, + 0x1112b: 84, + 0x1112d: 84, + 0x1112e: 84, + 0x1112f: 84, + 0x11130: 84, + 0x11131: 84, + 0x11132: 84, + 0x11133: 84, + 0x11134: 84, + 0x11173: 84, + 0x11180: 84, + 0x11181: 84, + 0x111b6: 84, + 0x111b7: 84, + 0x111b8: 84, + 0x111b9: 84, + 0x111ba: 84, + 0x111bb: 84, + 0x111bc: 84, + 0x111bd: 84, + 0x111be: 84, + 0x111c9: 84, + 0x111ca: 84, + 0x111cb: 84, + 0x111cc: 84, + 0x111cf: 84, + 0x1122f: 84, + 0x11230: 84, + 0x11231: 84, + 0x11234: 84, + 0x11236: 84, + 0x11237: 84, + 0x1123e: 84, + 0x11241: 84, + 0x112df: 84, + 0x112e3: 84, + 0x112e4: 84, + 0x112e5: 84, + 0x112e6: 84, + 0x112e7: 84, + 0x112e8: 84, + 0x112e9: 84, + 0x112ea: 84, + 0x11300: 84, + 0x11301: 84, + 0x1133b: 84, + 0x1133c: 84, + 0x11340: 84, + 0x11366: 84, + 0x11367: 84, + 0x11368: 84, + 0x11369: 84, + 0x1136a: 84, + 0x1136b: 84, + 0x1136c: 84, + 0x11370: 84, + 0x11371: 84, + 0x11372: 84, + 0x11373: 84, + 0x11374: 84, + 0x11438: 84, + 0x11439: 84, + 0x1143a: 84, + 0x1143b: 84, + 0x1143c: 84, + 0x1143d: 84, + 0x1143e: 84, + 0x1143f: 84, + 0x11442: 84, + 0x11443: 84, + 0x11444: 84, + 0x11446: 84, + 0x1145e: 84, + 0x114b3: 84, + 0x114b4: 84, + 0x114b5: 84, + 0x114b6: 84, + 0x114b7: 84, + 0x114b8: 84, + 0x114ba: 84, + 0x114bf: 84, + 0x114c0: 84, + 0x114c2: 84, + 0x114c3: 84, + 0x115b2: 84, + 0x115b3: 84, + 0x115b4: 84, + 0x115b5: 84, + 0x115bc: 84, + 0x115bd: 84, + 0x115bf: 84, + 0x115c0: 84, + 0x115dc: 84, + 0x115dd: 84, + 0x11633: 84, + 0x11634: 84, + 0x11635: 84, + 0x11636: 84, + 0x11637: 84, + 0x11638: 84, + 0x11639: 84, + 0x1163a: 84, + 0x1163d: 84, + 0x1163f: 84, + 0x11640: 84, + 0x116ab: 84, + 0x116ad: 84, + 0x116b0: 84, + 0x116b1: 84, + 0x116b2: 84, + 0x116b3: 84, + 0x116b4: 84, + 0x116b5: 84, + 0x116b7: 84, + 0x1171d: 84, + 0x1171e: 84, + 0x1171f: 84, + 0x11722: 84, + 0x11723: 84, + 0x11724: 84, + 0x11725: 84, + 0x11727: 84, + 0x11728: 84, + 0x11729: 84, + 0x1172a: 84, + 0x1172b: 84, + 0x1182f: 84, + 0x11830: 84, + 0x11831: 84, + 0x11832: 84, + 0x11833: 84, + 0x11834: 84, + 0x11835: 84, + 0x11836: 84, + 0x11837: 84, + 0x11839: 84, + 0x1183a: 84, + 0x1193b: 84, + 0x1193c: 84, + 0x1193e: 84, + 0x11943: 84, + 0x119d4: 84, + 0x119d5: 84, + 0x119d6: 84, + 0x119d7: 84, + 0x119da: 84, + 0x119db: 84, + 0x119e0: 84, + 0x11a01: 84, + 0x11a02: 84, + 0x11a03: 84, + 0x11a04: 84, + 0x11a05: 84, + 0x11a06: 84, + 0x11a07: 84, + 0x11a08: 84, + 0x11a09: 84, + 0x11a0a: 84, + 0x11a33: 84, + 0x11a34: 84, + 0x11a35: 84, + 0x11a36: 84, + 0x11a37: 84, + 0x11a38: 84, + 0x11a3b: 84, + 0x11a3c: 84, + 0x11a3d: 84, + 0x11a3e: 84, + 0x11a47: 84, + 0x11a51: 84, + 0x11a52: 84, + 0x11a53: 84, + 0x11a54: 84, + 0x11a55: 84, + 0x11a56: 84, + 0x11a59: 84, + 0x11a5a: 84, + 0x11a5b: 84, + 0x11a8a: 84, + 0x11a8b: 84, + 0x11a8c: 84, + 0x11a8d: 84, + 0x11a8e: 84, + 0x11a8f: 84, + 0x11a90: 84, + 0x11a91: 84, + 0x11a92: 84, + 0x11a93: 84, + 0x11a94: 84, + 0x11a95: 84, + 0x11a96: 84, + 0x11a98: 84, + 0x11a99: 84, + 0x11c30: 84, + 0x11c31: 84, + 0x11c32: 84, + 0x11c33: 84, + 0x11c34: 84, + 0x11c35: 84, + 0x11c36: 84, + 0x11c38: 84, + 0x11c39: 84, + 0x11c3a: 84, + 0x11c3b: 84, + 0x11c3c: 84, + 0x11c3d: 84, + 0x11c3f: 84, + 0x11c92: 84, + 0x11c93: 84, + 0x11c94: 84, + 0x11c95: 84, + 0x11c96: 84, + 0x11c97: 84, + 0x11c98: 84, + 0x11c99: 84, + 0x11c9a: 84, + 0x11c9b: 84, + 0x11c9c: 84, + 0x11c9d: 84, + 0x11c9e: 84, + 0x11c9f: 84, + 0x11ca0: 84, + 0x11ca1: 84, + 0x11ca2: 84, + 0x11ca3: 84, + 0x11ca4: 84, + 0x11ca5: 84, + 0x11ca6: 84, + 0x11ca7: 84, + 0x11caa: 84, + 0x11cab: 84, + 0x11cac: 84, + 0x11cad: 84, + 0x11cae: 84, + 0x11caf: 84, + 0x11cb0: 84, + 0x11cb2: 84, + 0x11cb3: 84, + 0x11cb5: 84, + 0x11cb6: 84, + 0x11d31: 84, + 0x11d32: 84, + 0x11d33: 84, + 0x11d34: 84, + 0x11d35: 84, + 0x11d36: 84, + 0x11d3a: 84, + 0x11d3c: 84, + 0x11d3d: 84, + 0x11d3f: 84, + 0x11d40: 84, + 0x11d41: 84, + 0x11d42: 84, + 0x11d43: 84, + 0x11d44: 84, + 0x11d45: 84, + 0x11d47: 84, + 0x11d90: 84, + 0x11d91: 84, + 0x11d95: 84, + 0x11d97: 84, + 0x11ef3: 84, + 0x11ef4: 84, + 0x11f00: 84, + 0x11f01: 84, + 0x11f36: 84, + 0x11f37: 84, + 0x11f38: 84, + 0x11f39: 84, + 0x11f3a: 84, + 0x11f40: 84, + 0x11f42: 84, + 0x13430: 84, + 0x13431: 84, + 0x13432: 84, + 0x13433: 84, + 0x13434: 84, + 0x13435: 84, + 0x13436: 84, + 0x13437: 84, + 0x13438: 84, + 0x13439: 84, + 0x1343a: 84, + 0x1343b: 84, + 0x1343c: 84, + 0x1343d: 84, + 0x1343e: 84, + 0x1343f: 84, + 0x13440: 84, + 0x13447: 84, + 0x13448: 84, + 0x13449: 84, + 0x1344a: 84, + 0x1344b: 84, + 0x1344c: 84, + 0x1344d: 84, + 0x1344e: 84, + 0x1344f: 84, + 0x13450: 84, + 0x13451: 84, + 0x13452: 84, + 0x13453: 84, + 0x13454: 84, + 0x13455: 84, + 0x16af0: 84, + 0x16af1: 84, + 0x16af2: 84, + 0x16af3: 84, + 0x16af4: 84, + 0x16b30: 84, + 0x16b31: 84, + 0x16b32: 84, + 0x16b33: 84, + 0x16b34: 84, + 0x16b35: 84, + 0x16b36: 84, + 0x16f4f: 84, + 0x16f8f: 84, + 0x16f90: 84, + 0x16f91: 84, + 0x16f92: 84, + 0x16fe4: 84, + 0x1bc9d: 84, + 0x1bc9e: 84, + 0x1bca0: 84, + 0x1bca1: 84, + 0x1bca2: 84, + 0x1bca3: 84, + 0x1cf00: 84, + 0x1cf01: 84, + 0x1cf02: 84, + 0x1cf03: 84, + 0x1cf04: 84, + 0x1cf05: 84, + 0x1cf06: 84, + 0x1cf07: 84, + 0x1cf08: 84, + 0x1cf09: 84, + 0x1cf0a: 84, + 0x1cf0b: 84, + 0x1cf0c: 84, + 0x1cf0d: 84, + 0x1cf0e: 84, + 0x1cf0f: 84, + 0x1cf10: 84, + 0x1cf11: 84, + 0x1cf12: 84, + 0x1cf13: 84, + 0x1cf14: 84, + 0x1cf15: 84, + 0x1cf16: 84, + 0x1cf17: 84, + 0x1cf18: 84, + 0x1cf19: 84, + 0x1cf1a: 84, + 0x1cf1b: 84, + 0x1cf1c: 84, + 0x1cf1d: 84, + 0x1cf1e: 84, + 0x1cf1f: 84, + 0x1cf20: 84, + 0x1cf21: 84, + 0x1cf22: 84, + 0x1cf23: 84, + 0x1cf24: 84, + 0x1cf25: 84, + 0x1cf26: 84, + 0x1cf27: 84, + 0x1cf28: 84, + 0x1cf29: 84, + 0x1cf2a: 84, + 0x1cf2b: 84, + 0x1cf2c: 84, + 0x1cf2d: 84, + 0x1cf30: 84, + 0x1cf31: 84, + 0x1cf32: 84, + 0x1cf33: 84, + 0x1cf34: 84, + 0x1cf35: 84, + 0x1cf36: 84, + 0x1cf37: 84, + 0x1cf38: 84, + 0x1cf39: 84, + 0x1cf3a: 84, + 0x1cf3b: 84, + 0x1cf3c: 84, + 0x1cf3d: 84, + 0x1cf3e: 84, + 0x1cf3f: 84, + 0x1cf40: 84, + 0x1cf41: 84, + 0x1cf42: 84, + 0x1cf43: 84, + 0x1cf44: 84, + 0x1cf45: 84, + 0x1cf46: 84, + 0x1d167: 84, + 0x1d168: 84, + 0x1d169: 84, + 0x1d173: 84, + 0x1d174: 84, + 0x1d175: 84, + 0x1d176: 84, + 0x1d177: 84, + 0x1d178: 84, + 0x1d179: 84, + 0x1d17a: 84, + 0x1d17b: 84, + 0x1d17c: 84, + 0x1d17d: 84, + 0x1d17e: 84, + 0x1d17f: 84, + 0x1d180: 84, + 0x1d181: 84, + 0x1d182: 84, + 0x1d185: 84, + 0x1d186: 84, + 0x1d187: 84, + 0x1d188: 84, + 0x1d189: 84, + 0x1d18a: 84, + 0x1d18b: 84, + 0x1d1aa: 84, + 0x1d1ab: 84, + 0x1d1ac: 84, + 0x1d1ad: 84, + 0x1d242: 84, + 0x1d243: 84, + 0x1d244: 84, + 0x1da00: 84, + 0x1da01: 84, + 0x1da02: 84, + 0x1da03: 84, + 0x1da04: 84, + 0x1da05: 84, + 0x1da06: 84, + 0x1da07: 84, + 0x1da08: 84, + 0x1da09: 84, + 0x1da0a: 84, + 0x1da0b: 84, + 0x1da0c: 84, + 0x1da0d: 84, + 0x1da0e: 84, + 0x1da0f: 84, + 0x1da10: 84, + 0x1da11: 84, + 0x1da12: 84, + 0x1da13: 84, + 0x1da14: 84, + 0x1da15: 84, + 0x1da16: 84, + 0x1da17: 84, + 0x1da18: 84, + 0x1da19: 84, + 0x1da1a: 84, + 0x1da1b: 84, + 0x1da1c: 84, + 0x1da1d: 84, + 0x1da1e: 84, + 0x1da1f: 84, + 0x1da20: 84, + 0x1da21: 84, + 0x1da22: 84, + 0x1da23: 84, + 0x1da24: 84, + 0x1da25: 84, + 0x1da26: 84, + 0x1da27: 84, + 0x1da28: 84, + 0x1da29: 84, + 0x1da2a: 84, + 0x1da2b: 84, + 0x1da2c: 84, + 0x1da2d: 84, + 0x1da2e: 84, + 0x1da2f: 84, + 0x1da30: 84, + 0x1da31: 84, + 0x1da32: 84, + 0x1da33: 84, + 0x1da34: 84, + 0x1da35: 84, + 0x1da36: 84, + 0x1da3b: 84, + 0x1da3c: 84, + 0x1da3d: 84, + 0x1da3e: 84, + 0x1da3f: 84, + 0x1da40: 84, + 0x1da41: 84, + 0x1da42: 84, + 0x1da43: 84, + 0x1da44: 84, + 0x1da45: 84, + 0x1da46: 84, + 0x1da47: 84, + 0x1da48: 84, + 0x1da49: 84, + 0x1da4a: 84, + 0x1da4b: 84, + 0x1da4c: 84, + 0x1da4d: 84, + 0x1da4e: 84, + 0x1da4f: 84, + 0x1da50: 84, + 0x1da51: 84, + 0x1da52: 84, + 0x1da53: 84, + 0x1da54: 84, + 0x1da55: 84, + 0x1da56: 84, + 0x1da57: 84, + 0x1da58: 84, + 0x1da59: 84, + 0x1da5a: 84, + 0x1da5b: 84, + 0x1da5c: 84, + 0x1da5d: 84, + 0x1da5e: 84, + 0x1da5f: 84, + 0x1da60: 84, + 0x1da61: 84, + 0x1da62: 84, + 0x1da63: 84, + 0x1da64: 84, + 0x1da65: 84, + 0x1da66: 84, + 0x1da67: 84, + 0x1da68: 84, + 0x1da69: 84, + 0x1da6a: 84, + 0x1da6b: 84, + 0x1da6c: 84, + 0x1da75: 84, + 0x1da84: 84, + 0x1da9b: 84, + 0x1da9c: 84, + 0x1da9d: 84, + 0x1da9e: 84, + 0x1da9f: 84, + 0x1daa1: 84, + 0x1daa2: 84, + 0x1daa3: 84, + 0x1daa4: 84, + 0x1daa5: 84, + 0x1daa6: 84, + 0x1daa7: 84, + 0x1daa8: 84, + 0x1daa9: 84, + 0x1daaa: 84, + 0x1daab: 84, + 0x1daac: 84, + 0x1daad: 84, + 0x1daae: 84, + 0x1daaf: 84, + 0x1e000: 84, + 0x1e001: 84, + 0x1e002: 84, + 0x1e003: 84, + 0x1e004: 84, + 0x1e005: 84, + 0x1e006: 84, + 0x1e008: 84, + 0x1e009: 84, + 0x1e00a: 84, + 0x1e00b: 84, + 0x1e00c: 84, + 0x1e00d: 84, + 0x1e00e: 84, + 0x1e00f: 84, + 0x1e010: 84, + 0x1e011: 84, + 0x1e012: 84, + 0x1e013: 84, + 0x1e014: 84, + 0x1e015: 84, + 0x1e016: 84, + 0x1e017: 84, + 0x1e018: 84, + 0x1e01b: 84, + 0x1e01c: 84, + 0x1e01d: 84, + 0x1e01e: 84, + 0x1e01f: 84, + 0x1e020: 84, + 0x1e021: 84, + 0x1e023: 84, + 0x1e024: 84, + 0x1e026: 84, + 0x1e027: 84, + 0x1e028: 84, + 0x1e029: 84, + 0x1e02a: 84, + 0x1e08f: 84, + 0x1e130: 84, + 0x1e131: 84, + 0x1e132: 84, + 0x1e133: 84, + 0x1e134: 84, + 0x1e135: 84, + 0x1e136: 84, + 0x1e2ae: 84, + 0x1e2ec: 84, + 0x1e2ed: 84, + 0x1e2ee: 84, + 0x1e2ef: 84, + 0x1e4ec: 84, + 0x1e4ed: 84, + 0x1e4ee: 84, + 0x1e4ef: 84, + 0x1e8d0: 84, + 0x1e8d1: 84, + 0x1e8d2: 84, + 0x1e8d3: 84, + 0x1e8d4: 84, + 0x1e8d5: 84, + 0x1e8d6: 84, 0x1e900: 68, 0x1e901: 68, 0x1e902: 68, @@ -928,7 +2680,351 @@ 0x1e941: 68, 0x1e942: 68, 0x1e943: 68, + 0x1e944: 84, + 0x1e945: 84, + 0x1e946: 84, + 0x1e947: 84, + 0x1e948: 84, + 0x1e949: 84, + 0x1e94a: 84, 0x1e94b: 84, + 0xe0001: 84, + 0xe0020: 84, + 0xe0021: 84, + 0xe0022: 84, + 0xe0023: 84, + 0xe0024: 84, + 0xe0025: 84, + 0xe0026: 84, + 0xe0027: 84, + 0xe0028: 84, + 0xe0029: 84, + 0xe002a: 84, + 0xe002b: 84, + 0xe002c: 84, + 0xe002d: 84, + 0xe002e: 84, + 0xe002f: 84, + 0xe0030: 84, + 0xe0031: 84, + 0xe0032: 84, + 0xe0033: 84, + 0xe0034: 84, + 0xe0035: 84, + 0xe0036: 84, + 0xe0037: 84, + 0xe0038: 84, + 0xe0039: 84, + 0xe003a: 84, + 0xe003b: 84, + 0xe003c: 84, + 0xe003d: 84, + 0xe003e: 84, + 0xe003f: 84, + 0xe0040: 84, + 0xe0041: 84, + 0xe0042: 84, + 0xe0043: 84, + 0xe0044: 84, + 0xe0045: 84, + 0xe0046: 84, + 0xe0047: 84, + 0xe0048: 84, + 0xe0049: 84, + 0xe004a: 84, + 0xe004b: 84, + 0xe004c: 84, + 0xe004d: 84, + 0xe004e: 84, + 0xe004f: 84, + 0xe0050: 84, + 0xe0051: 84, + 0xe0052: 84, + 0xe0053: 84, + 0xe0054: 84, + 0xe0055: 84, + 0xe0056: 84, + 0xe0057: 84, + 0xe0058: 84, + 0xe0059: 84, + 0xe005a: 84, + 0xe005b: 84, + 0xe005c: 84, + 0xe005d: 84, + 0xe005e: 84, + 0xe005f: 84, + 0xe0060: 84, + 0xe0061: 84, + 0xe0062: 84, + 0xe0063: 84, + 0xe0064: 84, + 0xe0065: 84, + 0xe0066: 84, + 0xe0067: 84, + 0xe0068: 84, + 0xe0069: 84, + 0xe006a: 84, + 0xe006b: 84, + 0xe006c: 84, + 0xe006d: 84, + 0xe006e: 84, + 0xe006f: 84, + 0xe0070: 84, + 0xe0071: 84, + 0xe0072: 84, + 0xe0073: 84, + 0xe0074: 84, + 0xe0075: 84, + 0xe0076: 84, + 0xe0077: 84, + 0xe0078: 84, + 0xe0079: 84, + 0xe007a: 84, + 0xe007b: 84, + 0xe007c: 84, + 0xe007d: 84, + 0xe007e: 84, + 0xe007f: 84, + 0xe0100: 84, + 0xe0101: 84, + 0xe0102: 84, + 0xe0103: 84, + 0xe0104: 84, + 0xe0105: 84, + 0xe0106: 84, + 0xe0107: 84, + 0xe0108: 84, + 0xe0109: 84, + 0xe010a: 84, + 0xe010b: 84, + 0xe010c: 84, + 0xe010d: 84, + 0xe010e: 84, + 0xe010f: 84, + 0xe0110: 84, + 0xe0111: 84, + 0xe0112: 84, + 0xe0113: 84, + 0xe0114: 84, + 0xe0115: 84, + 0xe0116: 84, + 0xe0117: 84, + 0xe0118: 84, + 0xe0119: 84, + 0xe011a: 84, + 0xe011b: 84, + 0xe011c: 84, + 0xe011d: 84, + 0xe011e: 84, + 0xe011f: 84, + 0xe0120: 84, + 0xe0121: 84, + 0xe0122: 84, + 0xe0123: 84, + 0xe0124: 84, + 0xe0125: 84, + 0xe0126: 84, + 0xe0127: 84, + 0xe0128: 84, + 0xe0129: 84, + 0xe012a: 84, + 0xe012b: 84, + 0xe012c: 84, + 0xe012d: 84, + 0xe012e: 84, + 0xe012f: 84, + 0xe0130: 84, + 0xe0131: 84, + 0xe0132: 84, + 0xe0133: 84, + 0xe0134: 84, + 0xe0135: 84, + 0xe0136: 84, + 0xe0137: 84, + 0xe0138: 84, + 0xe0139: 84, + 0xe013a: 84, + 0xe013b: 84, + 0xe013c: 84, + 0xe013d: 84, + 0xe013e: 84, + 0xe013f: 84, + 0xe0140: 84, + 0xe0141: 84, + 0xe0142: 84, + 0xe0143: 84, + 0xe0144: 84, + 0xe0145: 84, + 0xe0146: 84, + 0xe0147: 84, + 0xe0148: 84, + 0xe0149: 84, + 0xe014a: 84, + 0xe014b: 84, + 0xe014c: 84, + 0xe014d: 84, + 0xe014e: 84, + 0xe014f: 84, + 0xe0150: 84, + 0xe0151: 84, + 0xe0152: 84, + 0xe0153: 84, + 0xe0154: 84, + 0xe0155: 84, + 0xe0156: 84, + 0xe0157: 84, + 0xe0158: 84, + 0xe0159: 84, + 0xe015a: 84, + 0xe015b: 84, + 0xe015c: 84, + 0xe015d: 84, + 0xe015e: 84, + 0xe015f: 84, + 0xe0160: 84, + 0xe0161: 84, + 0xe0162: 84, + 0xe0163: 84, + 0xe0164: 84, + 0xe0165: 84, + 0xe0166: 84, + 0xe0167: 84, + 0xe0168: 84, + 0xe0169: 84, + 0xe016a: 84, + 0xe016b: 84, + 0xe016c: 84, + 0xe016d: 84, + 0xe016e: 84, + 0xe016f: 84, + 0xe0170: 84, + 0xe0171: 84, + 0xe0172: 84, + 0xe0173: 84, + 0xe0174: 84, + 0xe0175: 84, + 0xe0176: 84, + 0xe0177: 84, + 0xe0178: 84, + 0xe0179: 84, + 0xe017a: 84, + 0xe017b: 84, + 0xe017c: 84, + 0xe017d: 84, + 0xe017e: 84, + 0xe017f: 84, + 0xe0180: 84, + 0xe0181: 84, + 0xe0182: 84, + 0xe0183: 84, + 0xe0184: 84, + 0xe0185: 84, + 0xe0186: 84, + 0xe0187: 84, + 0xe0188: 84, + 0xe0189: 84, + 0xe018a: 84, + 0xe018b: 84, + 0xe018c: 84, + 0xe018d: 84, + 0xe018e: 84, + 0xe018f: 84, + 0xe0190: 84, + 0xe0191: 84, + 0xe0192: 84, + 0xe0193: 84, + 0xe0194: 84, + 0xe0195: 84, + 0xe0196: 84, + 0xe0197: 84, + 0xe0198: 84, + 0xe0199: 84, + 0xe019a: 84, + 0xe019b: 84, + 0xe019c: 84, + 0xe019d: 84, + 0xe019e: 84, + 0xe019f: 84, + 0xe01a0: 84, + 0xe01a1: 84, + 0xe01a2: 84, + 0xe01a3: 84, + 0xe01a4: 84, + 0xe01a5: 84, + 0xe01a6: 84, + 0xe01a7: 84, + 0xe01a8: 84, + 0xe01a9: 84, + 0xe01aa: 84, + 0xe01ab: 84, + 0xe01ac: 84, + 0xe01ad: 84, + 0xe01ae: 84, + 0xe01af: 84, + 0xe01b0: 84, + 0xe01b1: 84, + 0xe01b2: 84, + 0xe01b3: 84, + 0xe01b4: 84, + 0xe01b5: 84, + 0xe01b6: 84, + 0xe01b7: 84, + 0xe01b8: 84, + 0xe01b9: 84, + 0xe01ba: 84, + 0xe01bb: 84, + 0xe01bc: 84, + 0xe01bd: 84, + 0xe01be: 84, + 0xe01bf: 84, + 0xe01c0: 84, + 0xe01c1: 84, + 0xe01c2: 84, + 0xe01c3: 84, + 0xe01c4: 84, + 0xe01c5: 84, + 0xe01c6: 84, + 0xe01c7: 84, + 0xe01c8: 84, + 0xe01c9: 84, + 0xe01ca: 84, + 0xe01cb: 84, + 0xe01cc: 84, + 0xe01cd: 84, + 0xe01ce: 84, + 0xe01cf: 84, + 0xe01d0: 84, + 0xe01d1: 84, + 0xe01d2: 84, + 0xe01d3: 84, + 0xe01d4: 84, + 0xe01d5: 84, + 0xe01d6: 84, + 0xe01d7: 84, + 0xe01d8: 84, + 0xe01d9: 84, + 0xe01da: 84, + 0xe01db: 84, + 0xe01dc: 84, + 0xe01dd: 84, + 0xe01de: 84, + 0xe01df: 84, + 0xe01e0: 84, + 0xe01e1: 84, + 0xe01e2: 84, + 0xe01e3: 84, + 0xe01e4: 84, + 0xe01e5: 84, + 0xe01e6: 84, + 0xe01e7: 84, + 0xe01e8: 84, + 0xe01e9: 84, + 0xe01ea: 84, + 0xe01eb: 84, + 0xe01ec: 84, + 0xe01ed: 84, + 0xe01ee: 84, + 0xe01ef: 84, } codepoint_classes = { 'PVALID': ( @@ -2110,7 +4206,6 @@ 0x1e01b0001e022, 0x1e0230001e025, 0x1e0260001e02b, - 0x1e0300001e06e, 0x1e08f0001e090, 0x1e1000001e12d, 0x1e1300001e13e, diff --git a/src/pip/_vendor/idna/package_data.py b/src/pip/_vendor/idna/package_data.py index c5b7220c970..ed811133633 100644 --- a/src/pip/_vendor/idna/package_data.py +++ b/src/pip/_vendor/idna/package_data.py @@ -1,2 +1,2 @@ -__version__ = '3.6' +__version__ = '3.7' diff --git a/src/pip/_vendor/vendor.txt b/src/pip/_vendor/vendor.txt index ab207435d0d..15406823f3e 100644 --- a/src/pip/_vendor/vendor.txt +++ b/src/pip/_vendor/vendor.txt @@ -10,7 +10,7 @@ pyproject-hooks==1.0.0 requests==2.31.0 certifi==2024.2.2 chardet==5.2.0 - idna==3.6 + idna==3.7 urllib3==1.26.18 rich==13.7.0 pygments==2.17.2 From 26ab64a3534bf2f575ccee2af61bacccc3d8c3ee Mon Sep 17 00:00:00 2001 From: Pradyun Gedam Date: Fri, 3 May 2024 21:04:52 +0100 Subject: [PATCH 061/113] Upgrade rich to 13.7.1 --- news/rich.vendor.rst | 2 +- src/pip/_vendor/rich/_cell_widths.py | 367 ++++++++++++++------------- src/pip/_vendor/rich/text.py | 2 +- src/pip/_vendor/vendor.txt | 2 +- 4 files changed, 188 insertions(+), 185 deletions(-) diff --git a/news/rich.vendor.rst b/news/rich.vendor.rst index 586a4617228..b3c8f257742 100644 --- a/news/rich.vendor.rst +++ b/news/rich.vendor.rst @@ -1 +1 @@ -Upgrade rich to 13.7.0 +Upgrade rich to 13.7.1 diff --git a/src/pip/_vendor/rich/_cell_widths.py b/src/pip/_vendor/rich/_cell_widths.py index 36286df379e..608ae3a75d1 100644 --- a/src/pip/_vendor/rich/_cell_widths.py +++ b/src/pip/_vendor/rich/_cell_widths.py @@ -4,6 +4,7 @@ (0, 0, 0), (1, 31, -1), (127, 159, -1), + (173, 173, 0), (768, 879, 0), (1155, 1161, 0), (1425, 1469, 0), @@ -11,13 +12,16 @@ (1473, 1474, 0), (1476, 1477, 0), (1479, 1479, 0), + (1536, 1541, 0), (1552, 1562, 0), + (1564, 1564, 0), (1611, 1631, 0), (1648, 1648, 0), - (1750, 1756, 0), + (1750, 1757, 0), (1759, 1764, 0), (1767, 1768, 0), (1770, 1773, 0), + (1807, 1807, 0), (1809, 1809, 0), (1840, 1866, 0), (1958, 1968, 0), @@ -28,149 +32,137 @@ (2085, 2087, 0), (2089, 2093, 0), (2137, 2139, 0), - (2259, 2273, 0), - (2275, 2306, 0), - (2362, 2362, 0), - (2364, 2364, 0), - (2369, 2376, 0), - (2381, 2381, 0), + (2192, 2193, 0), + (2200, 2207, 0), + (2250, 2307, 0), + (2362, 2364, 0), + (2366, 2383, 0), (2385, 2391, 0), (2402, 2403, 0), - (2433, 2433, 0), + (2433, 2435, 0), (2492, 2492, 0), - (2497, 2500, 0), - (2509, 2509, 0), + (2494, 2500, 0), + (2503, 2504, 0), + (2507, 2509, 0), + (2519, 2519, 0), (2530, 2531, 0), (2558, 2558, 0), - (2561, 2562, 0), + (2561, 2563, 0), (2620, 2620, 0), - (2625, 2626, 0), + (2622, 2626, 0), (2631, 2632, 0), (2635, 2637, 0), (2641, 2641, 0), (2672, 2673, 0), (2677, 2677, 0), - (2689, 2690, 0), + (2689, 2691, 0), (2748, 2748, 0), - (2753, 2757, 0), - (2759, 2760, 0), - (2765, 2765, 0), + (2750, 2757, 0), + (2759, 2761, 0), + (2763, 2765, 0), (2786, 2787, 0), (2810, 2815, 0), - (2817, 2817, 0), + (2817, 2819, 0), (2876, 2876, 0), - (2879, 2879, 0), - (2881, 2884, 0), - (2893, 2893, 0), - (2901, 2902, 0), + (2878, 2884, 0), + (2887, 2888, 0), + (2891, 2893, 0), + (2901, 2903, 0), (2914, 2915, 0), (2946, 2946, 0), - (3008, 3008, 0), - (3021, 3021, 0), - (3072, 3072, 0), - (3076, 3076, 0), - (3134, 3136, 0), + (3006, 3010, 0), + (3014, 3016, 0), + (3018, 3021, 0), + (3031, 3031, 0), + (3072, 3076, 0), + (3132, 3132, 0), + (3134, 3140, 0), (3142, 3144, 0), (3146, 3149, 0), (3157, 3158, 0), (3170, 3171, 0), - (3201, 3201, 0), + (3201, 3203, 0), (3260, 3260, 0), - (3263, 3263, 0), - (3270, 3270, 0), - (3276, 3277, 0), + (3262, 3268, 0), + (3270, 3272, 0), + (3274, 3277, 0), + (3285, 3286, 0), (3298, 3299, 0), - (3328, 3329, 0), + (3315, 3315, 0), + (3328, 3331, 0), (3387, 3388, 0), - (3393, 3396, 0), - (3405, 3405, 0), + (3390, 3396, 0), + (3398, 3400, 0), + (3402, 3405, 0), + (3415, 3415, 0), (3426, 3427, 0), - (3457, 3457, 0), + (3457, 3459, 0), (3530, 3530, 0), - (3538, 3540, 0), + (3535, 3540, 0), (3542, 3542, 0), + (3544, 3551, 0), + (3570, 3571, 0), (3633, 3633, 0), (3636, 3642, 0), (3655, 3662, 0), (3761, 3761, 0), (3764, 3772, 0), - (3784, 3789, 0), + (3784, 3790, 0), (3864, 3865, 0), (3893, 3893, 0), (3895, 3895, 0), (3897, 3897, 0), - (3953, 3966, 0), - (3968, 3972, 0), + (3902, 3903, 0), + (3953, 3972, 0), (3974, 3975, 0), (3981, 3991, 0), (3993, 4028, 0), (4038, 4038, 0), - (4141, 4144, 0), - (4146, 4151, 0), - (4153, 4154, 0), - (4157, 4158, 0), - (4184, 4185, 0), + (4139, 4158, 0), + (4182, 4185, 0), (4190, 4192, 0), + (4194, 4196, 0), + (4199, 4205, 0), (4209, 4212, 0), - (4226, 4226, 0), - (4229, 4230, 0), - (4237, 4237, 0), - (4253, 4253, 0), + (4226, 4237, 0), + (4239, 4239, 0), + (4250, 4253, 0), (4352, 4447, 2), + (4448, 4607, 0), (4957, 4959, 0), - (5906, 5908, 0), + (5906, 5909, 0), (5938, 5940, 0), (5970, 5971, 0), (6002, 6003, 0), - (6068, 6069, 0), - (6071, 6077, 0), - (6086, 6086, 0), - (6089, 6099, 0), + (6068, 6099, 0), (6109, 6109, 0), - (6155, 6157, 0), + (6155, 6159, 0), (6277, 6278, 0), (6313, 6313, 0), - (6432, 6434, 0), - (6439, 6440, 0), - (6450, 6450, 0), - (6457, 6459, 0), - (6679, 6680, 0), - (6683, 6683, 0), - (6742, 6742, 0), - (6744, 6750, 0), - (6752, 6752, 0), - (6754, 6754, 0), - (6757, 6764, 0), - (6771, 6780, 0), + (6432, 6443, 0), + (6448, 6459, 0), + (6679, 6683, 0), + (6741, 6750, 0), + (6752, 6780, 0), (6783, 6783, 0), - (6832, 6848, 0), - (6912, 6915, 0), - (6964, 6964, 0), - (6966, 6970, 0), - (6972, 6972, 0), - (6978, 6978, 0), + (6832, 6862, 0), + (6912, 6916, 0), + (6964, 6980, 0), (7019, 7027, 0), - (7040, 7041, 0), - (7074, 7077, 0), - (7080, 7081, 0), - (7083, 7085, 0), - (7142, 7142, 0), - (7144, 7145, 0), - (7149, 7149, 0), - (7151, 7153, 0), - (7212, 7219, 0), - (7222, 7223, 0), + (7040, 7042, 0), + (7073, 7085, 0), + (7142, 7155, 0), + (7204, 7223, 0), (7376, 7378, 0), - (7380, 7392, 0), - (7394, 7400, 0), + (7380, 7400, 0), (7405, 7405, 0), (7412, 7412, 0), - (7416, 7417, 0), - (7616, 7673, 0), - (7675, 7679, 0), + (7415, 7417, 0), + (7616, 7679, 0), (8203, 8207, 0), (8232, 8238, 0), - (8288, 8291, 0), + (8288, 8292, 0), + (8294, 8303, 0), (8400, 8432, 0), (8986, 8987, 2), (9001, 9002, 2), @@ -212,17 +204,16 @@ (11904, 11929, 2), (11931, 12019, 2), (12032, 12245, 2), - (12272, 12283, 2), - (12288, 12329, 2), - (12330, 12333, 0), - (12334, 12350, 2), + (12272, 12329, 2), + (12330, 12335, 0), + (12336, 12350, 2), (12353, 12438, 2), (12441, 12442, 0), (12443, 12543, 2), (12549, 12591, 2), (12593, 12686, 2), (12688, 12771, 2), - (12784, 12830, 2), + (12783, 12830, 2), (12832, 12871, 2), (12880, 19903, 2), (19968, 42124, 2), @@ -234,36 +225,33 @@ (43010, 43010, 0), (43014, 43014, 0), (43019, 43019, 0), - (43045, 43046, 0), + (43043, 43047, 0), (43052, 43052, 0), - (43204, 43205, 0), + (43136, 43137, 0), + (43188, 43205, 0), (43232, 43249, 0), (43263, 43263, 0), (43302, 43309, 0), - (43335, 43345, 0), + (43335, 43347, 0), (43360, 43388, 2), - (43392, 43394, 0), - (43443, 43443, 0), - (43446, 43449, 0), - (43452, 43453, 0), + (43392, 43395, 0), + (43443, 43456, 0), (43493, 43493, 0), - (43561, 43566, 0), - (43569, 43570, 0), - (43573, 43574, 0), + (43561, 43574, 0), (43587, 43587, 0), - (43596, 43596, 0), - (43644, 43644, 0), + (43596, 43597, 0), + (43643, 43645, 0), (43696, 43696, 0), (43698, 43700, 0), (43703, 43704, 0), (43710, 43711, 0), (43713, 43713, 0), - (43756, 43757, 0), - (43766, 43766, 0), - (44005, 44005, 0), - (44008, 44008, 0), - (44013, 44013, 0), + (43755, 43759, 0), + (43765, 43766, 0), + (44003, 44010, 0), + (44012, 44013, 0), (44032, 55203, 2), + (55216, 55295, 0), (63744, 64255, 2), (64286, 64286, 0), (65024, 65039, 0), @@ -272,8 +260,10 @@ (65072, 65106, 2), (65108, 65126, 2), (65128, 65131, 2), + (65279, 65279, 0), (65281, 65376, 2), (65504, 65510, 2), + (65529, 65531, 0), (66045, 66045, 0), (66272, 66272, 0), (66422, 66426, 0), @@ -285,102 +275,108 @@ (68325, 68326, 0), (68900, 68903, 0), (69291, 69292, 0), + (69373, 69375, 0), (69446, 69456, 0), - (69633, 69633, 0), + (69506, 69509, 0), + (69632, 69634, 0), (69688, 69702, 0), - (69759, 69761, 0), - (69811, 69814, 0), - (69817, 69818, 0), + (69744, 69744, 0), + (69747, 69748, 0), + (69759, 69762, 0), + (69808, 69818, 0), + (69821, 69821, 0), + (69826, 69826, 0), + (69837, 69837, 0), (69888, 69890, 0), - (69927, 69931, 0), - (69933, 69940, 0), + (69927, 69940, 0), + (69957, 69958, 0), (70003, 70003, 0), - (70016, 70017, 0), - (70070, 70078, 0), + (70016, 70018, 0), + (70067, 70080, 0), (70089, 70092, 0), - (70095, 70095, 0), - (70191, 70193, 0), - (70196, 70196, 0), - (70198, 70199, 0), + (70094, 70095, 0), + (70188, 70199, 0), (70206, 70206, 0), - (70367, 70367, 0), - (70371, 70378, 0), - (70400, 70401, 0), + (70209, 70209, 0), + (70367, 70378, 0), + (70400, 70403, 0), (70459, 70460, 0), - (70464, 70464, 0), + (70462, 70468, 0), + (70471, 70472, 0), + (70475, 70477, 0), + (70487, 70487, 0), + (70498, 70499, 0), (70502, 70508, 0), (70512, 70516, 0), - (70712, 70719, 0), - (70722, 70724, 0), - (70726, 70726, 0), + (70709, 70726, 0), (70750, 70750, 0), - (70835, 70840, 0), - (70842, 70842, 0), - (70847, 70848, 0), - (70850, 70851, 0), - (71090, 71093, 0), - (71100, 71101, 0), - (71103, 71104, 0), + (70832, 70851, 0), + (71087, 71093, 0), + (71096, 71104, 0), (71132, 71133, 0), - (71219, 71226, 0), - (71229, 71229, 0), - (71231, 71232, 0), - (71339, 71339, 0), - (71341, 71341, 0), - (71344, 71349, 0), - (71351, 71351, 0), - (71453, 71455, 0), - (71458, 71461, 0), - (71463, 71467, 0), - (71727, 71735, 0), - (71737, 71738, 0), - (71995, 71996, 0), - (71998, 71998, 0), - (72003, 72003, 0), - (72148, 72151, 0), - (72154, 72155, 0), - (72160, 72160, 0), + (71216, 71232, 0), + (71339, 71351, 0), + (71453, 71467, 0), + (71724, 71738, 0), + (71984, 71989, 0), + (71991, 71992, 0), + (71995, 71998, 0), + (72000, 72000, 0), + (72002, 72003, 0), + (72145, 72151, 0), + (72154, 72160, 0), + (72164, 72164, 0), (72193, 72202, 0), - (72243, 72248, 0), + (72243, 72249, 0), (72251, 72254, 0), (72263, 72263, 0), - (72273, 72278, 0), - (72281, 72283, 0), - (72330, 72342, 0), - (72344, 72345, 0), - (72752, 72758, 0), - (72760, 72765, 0), - (72767, 72767, 0), + (72273, 72283, 0), + (72330, 72345, 0), + (72751, 72758, 0), + (72760, 72767, 0), (72850, 72871, 0), - (72874, 72880, 0), - (72882, 72883, 0), - (72885, 72886, 0), + (72873, 72886, 0), (73009, 73014, 0), (73018, 73018, 0), (73020, 73021, 0), (73023, 73029, 0), (73031, 73031, 0), + (73098, 73102, 0), (73104, 73105, 0), - (73109, 73109, 0), - (73111, 73111, 0), - (73459, 73460, 0), + (73107, 73111, 0), + (73459, 73462, 0), + (73472, 73473, 0), + (73475, 73475, 0), + (73524, 73530, 0), + (73534, 73538, 0), + (78896, 78912, 0), + (78919, 78933, 0), (92912, 92916, 0), (92976, 92982, 0), (94031, 94031, 0), + (94033, 94087, 0), (94095, 94098, 0), (94176, 94179, 2), (94180, 94180, 0), - (94192, 94193, 2), + (94192, 94193, 0), (94208, 100343, 2), (100352, 101589, 2), (101632, 101640, 2), - (110592, 110878, 2), + (110576, 110579, 2), + (110581, 110587, 2), + (110589, 110590, 2), + (110592, 110882, 2), + (110898, 110898, 2), (110928, 110930, 2), + (110933, 110933, 2), (110948, 110951, 2), (110960, 111355, 2), (113821, 113822, 0), - (119143, 119145, 0), - (119163, 119170, 0), + (113824, 113827, 0), + (118528, 118573, 0), + (118576, 118598, 0), + (119141, 119145, 0), + (119149, 119170, 0), (119173, 119179, 0), (119210, 119213, 0), (119362, 119364, 0), @@ -395,8 +391,11 @@ (122907, 122913, 0), (122915, 122916, 0), (122918, 122922, 0), + (123023, 123023, 0), (123184, 123190, 0), + (123566, 123566, 0), (123628, 123631, 0), + (124140, 124143, 0), (125136, 125142, 0), (125252, 125258, 0), (126980, 126980, 2), @@ -416,7 +415,9 @@ (127951, 127955, 2), (127968, 127984, 2), (127988, 127988, 2), - (127992, 128062, 2), + (127992, 127994, 2), + (127995, 127999, 0), + (128000, 128062, 2), (128064, 128064, 2), (128066, 128252, 2), (128255, 128317, 2), @@ -430,22 +431,24 @@ (128716, 128716, 2), (128720, 128722, 2), (128725, 128727, 2), + (128732, 128735, 2), (128747, 128748, 2), (128756, 128764, 2), (128992, 129003, 2), + (129008, 129008, 2), (129292, 129338, 2), (129340, 129349, 2), - (129351, 129400, 2), - (129402, 129483, 2), - (129485, 129535, 2), - (129648, 129652, 2), - (129656, 129658, 2), - (129664, 129670, 2), - (129680, 129704, 2), - (129712, 129718, 2), - (129728, 129730, 2), - (129744, 129750, 2), + (129351, 129535, 2), + (129648, 129660, 2), + (129664, 129672, 2), + (129680, 129725, 2), + (129727, 129733, 2), + (129742, 129755, 2), + (129760, 129768, 2), + (129776, 129784, 2), (131072, 196605, 2), (196608, 262141, 2), + (917505, 917505, 0), + (917536, 917631, 0), (917760, 917999, 0), ] diff --git a/src/pip/_vendor/rich/text.py b/src/pip/_vendor/rich/text.py index 09f881e7296..209aa943483 100644 --- a/src/pip/_vendor/rich/text.py +++ b/src/pip/_vendor/rich/text.py @@ -38,7 +38,7 @@ _re_whitespace = re.compile(r"\s+$") TextType = Union[str, "Text"] -"""A plain string or a [Text][rich.text.Text] instance.""" +"""A plain string or a :class:`Text` instance.""" GetStyleCallable = Callable[[str], Optional[StyleType]] diff --git a/src/pip/_vendor/vendor.txt b/src/pip/_vendor/vendor.txt index 15406823f3e..bba94517734 100644 --- a/src/pip/_vendor/vendor.txt +++ b/src/pip/_vendor/vendor.txt @@ -12,7 +12,7 @@ requests==2.31.0 chardet==5.2.0 idna==3.7 urllib3==1.26.18 -rich==13.7.0 +rich==13.7.1 pygments==2.17.2 typing_extensions==4.9.0 resolvelib==1.0.1 From f9d5e7d9f702472ec06f46aee3cd426439e2c7a9 Mon Sep 17 00:00:00 2001 From: Pradyun Gedam Date: Fri, 3 May 2024 21:05:34 +0100 Subject: [PATCH 062/113] Upgrade typing_extensions to 4.11.0 --- news/typing_extensions.vendor.rst | 2 +- src/pip/_vendor/typing_extensions.py | 433 ++++++++++++++++++++++----- src/pip/_vendor/vendor.txt | 2 +- 3 files changed, 358 insertions(+), 79 deletions(-) diff --git a/news/typing_extensions.vendor.rst b/news/typing_extensions.vendor.rst index d23d4bbced0..c605c366678 100644 --- a/news/typing_extensions.vendor.rst +++ b/news/typing_extensions.vendor.rst @@ -1 +1 @@ -Upgrade typing_extensions to 4.9.0 +Upgrade typing_extensions to 4.11.0 diff --git a/src/pip/_vendor/typing_extensions.py b/src/pip/_vendor/typing_extensions.py index 351036faf7f..d60315a6adc 100644 --- a/src/pip/_vendor/typing_extensions.py +++ b/src/pip/_vendor/typing_extensions.py @@ -83,6 +83,7 @@ 'TypeAlias', 'TypeAliasType', 'TypeGuard', + 'TypeIs', 'TYPE_CHECKING', 'Never', 'NoReturn', @@ -146,27 +147,6 @@ def __repr__(self): _marker = _Sentinel() -def _check_generic(cls, parameters, elen=_marker): - """Check correct count for parameters of a generic cls (internal helper). - This gives a nice error message in case of count mismatch. - """ - if not elen: - raise TypeError(f"{cls} is not a generic class") - if elen is _marker: - if not hasattr(cls, "__parameters__") or not cls.__parameters__: - raise TypeError(f"{cls} is not a generic class") - elen = len(cls.__parameters__) - alen = len(parameters) - if alen != elen: - if hasattr(cls, "__parameters__"): - parameters = [p for p in cls.__parameters__ if not _is_unpack(p)] - num_tv_tuples = sum(isinstance(p, TypeVarTuple) for p in parameters) - if (num_tv_tuples > 0) and (alen >= elen - num_tv_tuples): - return - raise TypeError(f"Too {'many' if alen > elen else 'few'} parameters for {cls};" - f" actual {alen}, expected {elen}") - - if sys.version_info >= (3, 10): def _should_collect_from_parameters(t): return isinstance( @@ -180,27 +160,6 @@ def _should_collect_from_parameters(t): return isinstance(t, typing._GenericAlias) and not t._special -def _collect_type_vars(types, typevar_types=None): - """Collect all type variable contained in types in order of - first appearance (lexicographic order). For example:: - - _collect_type_vars((T, List[S, T])) == (T, S) - """ - if typevar_types is None: - typevar_types = typing.TypeVar - tvars = [] - for t in types: - if ( - isinstance(t, typevar_types) and - t not in tvars and - not _is_unpack(t) - ): - tvars.append(t) - if _should_collect_from_parameters(t): - tvars.extend([t for t in t.__parameters__ if t not in tvars]) - return tuple(tvars) - - NoReturn = typing.NoReturn # Some unconstrained type variables. These are used by the container types. @@ -473,7 +432,7 @@ def clear_overloads(): "_is_runtime_protocol", "__dict__", "__slots__", "__parameters__", "__orig_bases__", "__module__", "_MutableMapping__marker", "__doc__", "__subclasshook__", "__orig_class__", "__init__", "__new__", - "__protocol_attrs__", "__callable_proto_members_only__", + "__protocol_attrs__", "__non_callable_proto_members__", "__match_args__", } @@ -521,6 +480,22 @@ def _no_init(self, *args, **kwargs): if type(self)._is_protocol: raise TypeError('Protocols cannot be instantiated') + def _type_check_issubclass_arg_1(arg): + """Raise TypeError if `arg` is not an instance of `type` + in `issubclass(arg, )`. + + In most cases, this is verified by type.__subclasscheck__. + Checking it again unnecessarily would slow down issubclass() checks, + so, we don't perform this check unless we absolutely have to. + + For various error paths, however, + we want to ensure that *this* error message is shown to the user + where relevant, rather than a typing.py-specific error message. + """ + if not isinstance(arg, type): + # Same error message as for issubclass(1, int). + raise TypeError('issubclass() arg 1 must be a class') + # Inheriting from typing._ProtocolMeta isn't actually desirable, # but is necessary to allow typing.Protocol and typing_extensions.Protocol # to mix without getting TypeErrors about "metaclass conflict" @@ -551,11 +526,6 @@ def __init__(cls, *args, **kwargs): abc.ABCMeta.__init__(cls, *args, **kwargs) if getattr(cls, "_is_protocol", False): cls.__protocol_attrs__ = _get_protocol_attrs(cls) - # PEP 544 prohibits using issubclass() - # with protocols that have non-method members. - cls.__callable_proto_members_only__ = all( - callable(getattr(cls, attr, None)) for attr in cls.__protocol_attrs__ - ) def __subclasscheck__(cls, other): if cls is Protocol: @@ -564,26 +534,23 @@ def __subclasscheck__(cls, other): getattr(cls, '_is_protocol', False) and not _allow_reckless_class_checks() ): - if not isinstance(other, type): - # Same error message as for issubclass(1, int). - raise TypeError('issubclass() arg 1 must be a class') + if not getattr(cls, '_is_runtime_protocol', False): + _type_check_issubclass_arg_1(other) + raise TypeError( + "Instance and class checks can only be used with " + "@runtime_checkable protocols" + ) if ( - not cls.__callable_proto_members_only__ + # this attribute is set by @runtime_checkable: + cls.__non_callable_proto_members__ and cls.__dict__.get("__subclasshook__") is _proto_hook ): - non_method_attrs = sorted( - attr for attr in cls.__protocol_attrs__ - if not callable(getattr(cls, attr, None)) - ) + _type_check_issubclass_arg_1(other) + non_method_attrs = sorted(cls.__non_callable_proto_members__) raise TypeError( "Protocols with non-method members don't support issubclass()." f" Non-method members: {str(non_method_attrs)[1:-1]}." ) - if not getattr(cls, '_is_runtime_protocol', False): - raise TypeError( - "Instance and class checks can only be used with " - "@runtime_checkable protocols" - ) return abc.ABCMeta.__subclasscheck__(cls, other) def __instancecheck__(cls, instance): @@ -610,7 +577,8 @@ def __instancecheck__(cls, instance): val = inspect.getattr_static(instance, attr) except AttributeError: break - if val is None and callable(getattr(cls, attr, None)): + # this attribute is set by @runtime_checkable: + if val is None and attr not in cls.__non_callable_proto_members__: break else: return True @@ -678,8 +646,58 @@ def __init_subclass__(cls, *args, **kwargs): cls.__init__ = _no_init +if sys.version_info >= (3, 13): + runtime_checkable = typing.runtime_checkable +else: + def runtime_checkable(cls): + """Mark a protocol class as a runtime protocol. + + Such protocol can be used with isinstance() and issubclass(). + Raise TypeError if applied to a non-protocol class. + This allows a simple-minded structural check very similar to + one trick ponies in collections.abc such as Iterable. + + For example:: + + @runtime_checkable + class Closable(Protocol): + def close(self): ... + + assert isinstance(open('/some/file'), Closable) + + Warning: this will check only the presence of the required methods, + not their type signatures! + """ + if not issubclass(cls, typing.Generic) or not getattr(cls, '_is_protocol', False): + raise TypeError('@runtime_checkable can be only applied to protocol classes,' + ' got %r' % cls) + cls._is_runtime_protocol = True + + # Only execute the following block if it's a typing_extensions.Protocol class. + # typing.Protocol classes don't need it. + if isinstance(cls, _ProtocolMeta): + # PEP 544 prohibits using issubclass() + # with protocols that have non-method members. + # See gh-113320 for why we compute this attribute here, + # rather than in `_ProtocolMeta.__init__` + cls.__non_callable_proto_members__ = set() + for attr in cls.__protocol_attrs__: + try: + is_callable = callable(getattr(cls, attr, None)) + except Exception as e: + raise TypeError( + f"Failed to determine whether protocol member {attr!r} " + "is a method member" + ) from e + else: + if not is_callable: + cls.__non_callable_proto_members__.add(attr) + + return cls + + # The "runtime" alias exists for backwards compatibility. -runtime = runtime_checkable = typing.runtime_checkable +runtime = runtime_checkable # Our version of runtime-checkable protocols is faster on Python 3.8-3.11 @@ -774,7 +792,11 @@ def inner(func): return inner -if hasattr(typing, "ReadOnly"): +# Update this to something like >=3.13.0b1 if and when +# PEP 728 is implemented in CPython +_PEP_728_IMPLEMENTED = False + +if _PEP_728_IMPLEMENTED: # The standard library TypedDict in Python 3.8 does not store runtime information # about which (if any) keys are optional. See https://bugs.python.org/issue38834 # The standard library TypedDict in Python 3.9.0/1 does not honour the "total" @@ -785,7 +807,8 @@ def inner(func): # Aaaand on 3.12 we add __orig_bases__ to TypedDict # to enable better runtime introspection. # On 3.13 we deprecate some odd ways of creating TypedDicts. - # PEP 705 proposes adding the ReadOnly[] qualifier. + # Also on 3.13, PEP 705 adds the ReadOnly[] qualifier. + # PEP 728 (still pending) makes more changes. TypedDict = typing.TypedDict _TypedDictMeta = typing._TypedDictMeta is_typeddict = typing.is_typeddict @@ -815,7 +838,7 @@ def _get_typeddict_qualifiers(annotation_type): break class _TypedDictMeta(type): - def __new__(cls, name, bases, ns, *, total=True): + def __new__(cls, name, bases, ns, *, total=True, closed=False): """Create new typed dict class object. This method is called when TypedDict is subclassed, @@ -860,6 +883,7 @@ def __new__(cls, name, bases, ns, *, total=True): optional_keys = set() readonly_keys = set() mutable_keys = set() + extra_items_type = None for base in bases: base_dict = base.__dict__ @@ -869,6 +893,26 @@ def __new__(cls, name, bases, ns, *, total=True): optional_keys.update(base_dict.get('__optional_keys__', ())) readonly_keys.update(base_dict.get('__readonly_keys__', ())) mutable_keys.update(base_dict.get('__mutable_keys__', ())) + base_extra_items_type = base_dict.get('__extra_items__', None) + if base_extra_items_type is not None: + extra_items_type = base_extra_items_type + + if closed and extra_items_type is None: + extra_items_type = Never + if closed and "__extra_items__" in own_annotations: + annotation_type = own_annotations.pop("__extra_items__") + qualifiers = set(_get_typeddict_qualifiers(annotation_type)) + if Required in qualifiers: + raise TypeError( + "Special key __extra_items__ does not support " + "Required" + ) + if NotRequired in qualifiers: + raise TypeError( + "Special key __extra_items__ does not support " + "NotRequired" + ) + extra_items_type = annotation_type annotations.update(own_annotations) for annotation_key, annotation_type in own_annotations.items(): @@ -883,11 +927,7 @@ def __new__(cls, name, bases, ns, *, total=True): else: optional_keys.add(annotation_key) if ReadOnly in qualifiers: - if annotation_key in mutable_keys: - raise TypeError( - f"Cannot override mutable key {annotation_key!r}" - " with read-only key" - ) + mutable_keys.discard(annotation_key) readonly_keys.add(annotation_key) else: mutable_keys.add(annotation_key) @@ -900,6 +940,8 @@ def __new__(cls, name, bases, ns, *, total=True): tp_dict.__mutable_keys__ = frozenset(mutable_keys) if not hasattr(tp_dict, '__total__'): tp_dict.__total__ = total + tp_dict.__closed__ = closed + tp_dict.__extra_items__ = extra_items_type return tp_dict __call__ = dict # static method @@ -913,7 +955,7 @@ def __subclasscheck__(cls, other): _TypedDict = type.__new__(_TypedDictMeta, 'TypedDict', (), {}) @_ensure_subclassable(lambda bases: (_TypedDict,)) - def TypedDict(typename, fields=_marker, /, *, total=True, **kwargs): + def TypedDict(typename, fields=_marker, /, *, total=True, closed=False, **kwargs): """A simple typed namespace. At runtime it is equivalent to a plain dict. TypedDict creates a dictionary type such that a type checker will expect all @@ -973,6 +1015,9 @@ class Point2D(TypedDict): "using the functional syntax, pass an empty dictionary, e.g. " ) + example + "." warnings.warn(deprecation_msg, DeprecationWarning, stacklevel=2) + if closed is not False and closed is not True: + kwargs["closed"] = closed + closed = False fields = kwargs elif kwargs: raise TypeError("TypedDict takes either a dict or keyword arguments," @@ -994,7 +1039,7 @@ class Point2D(TypedDict): # Setting correct module is necessary to make typed dict classes pickleable. ns['__module__'] = module - td = _TypedDictMeta(typename, (), ns, total=total) + td = _TypedDictMeta(typename, (), ns, total=total, closed=closed) td.__orig_bases__ = (TypedDict,) return td @@ -1040,15 +1085,15 @@ def greet(name: str) -> None: return val -if hasattr(typing, "Required"): # 3.11+ +if hasattr(typing, "ReadOnly"): # 3.13+ get_type_hints = typing.get_type_hints -else: # <=3.10 +else: # <=3.13 # replaces _strip_annotations() def _strip_extras(t): """Strips Annotated, Required and NotRequired from a given type.""" if isinstance(t, _AnnotatedAlias): return _strip_extras(t.__origin__) - if hasattr(t, "__origin__") and t.__origin__ in (Required, NotRequired): + if hasattr(t, "__origin__") and t.__origin__ in (Required, NotRequired, ReadOnly): return _strip_extras(t.__args__[0]) if isinstance(t, typing._GenericAlias): stripped_args = tuple(_strip_extras(a) for a in t.__args__) @@ -1768,6 +1813,98 @@ def is_str(val: Union[str, float]): PEP 647 (User-Defined Type Guards). """) +# 3.13+ +if hasattr(typing, 'TypeIs'): + TypeIs = typing.TypeIs +# 3.9 +elif sys.version_info[:2] >= (3, 9): + @_ExtensionsSpecialForm + def TypeIs(self, parameters): + """Special typing form used to annotate the return type of a user-defined + type narrower function. ``TypeIs`` only accepts a single type argument. + At runtime, functions marked this way should return a boolean. + + ``TypeIs`` aims to benefit *type narrowing* -- a technique used by static + type checkers to determine a more precise type of an expression within a + program's code flow. Usually type narrowing is done by analyzing + conditional code flow and applying the narrowing to a block of code. The + conditional expression here is sometimes referred to as a "type guard". + + Sometimes it would be convenient to use a user-defined boolean function + as a type guard. Such a function should use ``TypeIs[...]`` as its + return type to alert static type checkers to this intention. + + Using ``-> TypeIs`` tells the static type checker that for a given + function: + + 1. The return value is a boolean. + 2. If the return value is ``True``, the type of its argument + is the intersection of the type inside ``TypeGuard`` and the argument's + previously known type. + + For example:: + + def is_awaitable(val: object) -> TypeIs[Awaitable[Any]]: + return hasattr(val, '__await__') + + def f(val: Union[int, Awaitable[int]]) -> int: + if is_awaitable(val): + assert_type(val, Awaitable[int]) + else: + assert_type(val, int) + + ``TypeIs`` also works with type variables. For more information, see + PEP 742 (Narrowing types with TypeIs). + """ + item = typing._type_check(parameters, f'{self} accepts only a single type.') + return typing._GenericAlias(self, (item,)) +# 3.8 +else: + class _TypeIsForm(_ExtensionsSpecialForm, _root=True): + def __getitem__(self, parameters): + item = typing._type_check(parameters, + f'{self._name} accepts only a single type') + return typing._GenericAlias(self, (item,)) + + TypeIs = _TypeIsForm( + 'TypeIs', + doc="""Special typing form used to annotate the return type of a user-defined + type narrower function. ``TypeIs`` only accepts a single type argument. + At runtime, functions marked this way should return a boolean. + + ``TypeIs`` aims to benefit *type narrowing* -- a technique used by static + type checkers to determine a more precise type of an expression within a + program's code flow. Usually type narrowing is done by analyzing + conditional code flow and applying the narrowing to a block of code. The + conditional expression here is sometimes referred to as a "type guard". + + Sometimes it would be convenient to use a user-defined boolean function + as a type guard. Such a function should use ``TypeIs[...]`` as its + return type to alert static type checkers to this intention. + + Using ``-> TypeIs`` tells the static type checker that for a given + function: + + 1. The return value is a boolean. + 2. If the return value is ``True``, the type of its argument + is the intersection of the type inside ``TypeGuard`` and the argument's + previously known type. + + For example:: + + def is_awaitable(val: object) -> TypeIs[Awaitable[Any]]: + return hasattr(val, '__await__') + + def f(val: Union[int, Awaitable[int]]) -> int: + if is_awaitable(val): + assert_type(val, Awaitable[int]) + else: + assert_type(val, int) + + ``TypeIs`` also works with type variables. For more information, see + PEP 742 (Narrowing types with TypeIs). + """) + # Vendored from cpython typing._SpecialFrom class _SpecialForm(typing._Final, _root=True): @@ -2515,9 +2652,151 @@ def wrapper(*args, **kwargs): # counting generic parameters, so that when we subscript a generic, # the runtime doesn't try to substitute the Unpack with the subscripted type. if not hasattr(typing, "TypeVarTuple"): + def _check_generic(cls, parameters, elen=_marker): + """Check correct count for parameters of a generic cls (internal helper). + + This gives a nice error message in case of count mismatch. + """ + if not elen: + raise TypeError(f"{cls} is not a generic class") + if elen is _marker: + if not hasattr(cls, "__parameters__") or not cls.__parameters__: + raise TypeError(f"{cls} is not a generic class") + elen = len(cls.__parameters__) + alen = len(parameters) + if alen != elen: + expect_val = elen + if hasattr(cls, "__parameters__"): + parameters = [p for p in cls.__parameters__ if not _is_unpack(p)] + num_tv_tuples = sum(isinstance(p, TypeVarTuple) for p in parameters) + if (num_tv_tuples > 0) and (alen >= elen - num_tv_tuples): + return + + # deal with TypeVarLike defaults + # required TypeVarLikes cannot appear after a defaulted one. + if alen < elen: + # since we validate TypeVarLike default in _collect_type_vars + # or _collect_parameters we can safely check parameters[alen] + if getattr(parameters[alen], '__default__', None) is not None: + return + + num_default_tv = sum(getattr(p, '__default__', None) + is not None for p in parameters) + + elen -= num_default_tv + + expect_val = f"at least {elen}" + + things = "arguments" if sys.version_info >= (3, 10) else "parameters" + raise TypeError(f"Too {'many' if alen > elen else 'few'} {things}" + f" for {cls}; actual {alen}, expected {expect_val}") +else: + # Python 3.11+ + + def _check_generic(cls, parameters, elen): + """Check correct count for parameters of a generic cls (internal helper). + + This gives a nice error message in case of count mismatch. + """ + if not elen: + raise TypeError(f"{cls} is not a generic class") + alen = len(parameters) + if alen != elen: + expect_val = elen + if hasattr(cls, "__parameters__"): + parameters = [p for p in cls.__parameters__ if not _is_unpack(p)] + + # deal with TypeVarLike defaults + # required TypeVarLikes cannot appear after a defaulted one. + if alen < elen: + # since we validate TypeVarLike default in _collect_type_vars + # or _collect_parameters we can safely check parameters[alen] + if getattr(parameters[alen], '__default__', None) is not None: + return + + num_default_tv = sum(getattr(p, '__default__', None) + is not None for p in parameters) + + elen -= num_default_tv + + expect_val = f"at least {elen}" + + raise TypeError(f"Too {'many' if alen > elen else 'few'} arguments" + f" for {cls}; actual {alen}, expected {expect_val}") + +typing._check_generic = _check_generic + +# Python 3.11+ _collect_type_vars was renamed to _collect_parameters +if hasattr(typing, '_collect_type_vars'): + def _collect_type_vars(types, typevar_types=None): + """Collect all type variable contained in types in order of + first appearance (lexicographic order). For example:: + + _collect_type_vars((T, List[S, T])) == (T, S) + """ + if typevar_types is None: + typevar_types = typing.TypeVar + tvars = [] + # required TypeVarLike cannot appear after TypeVarLike with default + default_encountered = False + for t in types: + if ( + isinstance(t, typevar_types) and + t not in tvars and + not _is_unpack(t) + ): + if getattr(t, '__default__', None) is not None: + default_encountered = True + elif default_encountered: + raise TypeError(f'Type parameter {t!r} without a default' + ' follows type parameter with a default') + + tvars.append(t) + if _should_collect_from_parameters(t): + tvars.extend([t for t in t.__parameters__ if t not in tvars]) + return tuple(tvars) + typing._collect_type_vars = _collect_type_vars - typing._check_generic = _check_generic +else: + def _collect_parameters(args): + """Collect all type variables and parameter specifications in args + in order of first appearance (lexicographic order). + + For example:: + + assert _collect_parameters((T, Callable[P, T])) == (T, P) + """ + parameters = [] + # required TypeVarLike cannot appear after TypeVarLike with default + default_encountered = False + for t in args: + if isinstance(t, type): + # We don't want __parameters__ descriptor of a bare Python class. + pass + elif isinstance(t, tuple): + # `t` might be a tuple, when `ParamSpec` is substituted with + # `[T, int]`, or `[int, *Ts]`, etc. + for x in t: + for collected in _collect_parameters([x]): + if collected not in parameters: + parameters.append(collected) + elif hasattr(t, '__typing_subst__'): + if t not in parameters: + if getattr(t, '__default__', None) is not None: + default_encountered = True + elif default_encountered: + raise TypeError(f'Type parameter {t!r} without a default' + ' follows type parameter with a default') + + parameters.append(t) + else: + for x in getattr(t, '__parameters__', ()): + if x not in parameters: + parameters.append(x) + + return tuple(parameters) + typing._collect_parameters = _collect_parameters # Backport typing.NamedTuple as it exists in Python 3.13. # In 3.11, the ability to define generic `NamedTuple`s was supported. diff --git a/src/pip/_vendor/vendor.txt b/src/pip/_vendor/vendor.txt index bba94517734..04ccb0c5aa0 100644 --- a/src/pip/_vendor/vendor.txt +++ b/src/pip/_vendor/vendor.txt @@ -14,7 +14,7 @@ requests==2.31.0 urllib3==1.26.18 rich==13.7.1 pygments==2.17.2 - typing_extensions==4.9.0 + typing_extensions==4.11.0 resolvelib==1.0.1 setuptools==69.1.1 six==1.16.0 From d24ddc3267fded605ec8bf248f1d5923941d6bc8 Mon Sep 17 00:00:00 2001 From: Pradyun Gedam Date: Fri, 3 May 2024 21:06:03 +0100 Subject: [PATCH 063/113] Upgrade setuptools to 69.5.1 --- news/setuptools.vendor.rst | 2 +- src/pip/_vendor/pkg_resources/__init__.py | 68 ++++++++++------------- src/pip/_vendor/vendor.txt | 2 +- 3 files changed, 30 insertions(+), 42 deletions(-) diff --git a/news/setuptools.vendor.rst b/news/setuptools.vendor.rst index 135850e0a97..70703ddc15e 100644 --- a/news/setuptools.vendor.rst +++ b/news/setuptools.vendor.rst @@ -1 +1 @@ -Upgrade setuptools to 69.1.1 +Upgrade setuptools to 69.5.1 diff --git a/src/pip/_vendor/pkg_resources/__init__.py b/src/pip/_vendor/pkg_resources/__init__.py index 89f49570f4a..417a537d6f6 100644 --- a/src/pip/_vendor/pkg_resources/__init__.py +++ b/src/pip/_vendor/pkg_resources/__init__.py @@ -27,7 +27,7 @@ import time import re import types -from typing import Protocol +from typing import List, Protocol import zipfile import zipimport import warnings @@ -85,9 +85,7 @@ require = None working_set = None add_activation_listener = None -resources_stream = None cleanup_resources = None -resource_dir = None resource_stream = None set_extraction_path = None resource_isdir = None @@ -491,19 +489,6 @@ def compatible_platforms(provided, required): return False -def run_script(dist_spec, script_name): - """Locate distribution `dist_spec` and run its `script_name` script""" - ns = sys._getframe(1).f_globals - name = ns['__name__'] - ns.clear() - ns['__name__'] = name - require(dist_spec)[0].run_script(script_name, ns) - - -# backward compatibility -run_main = run_script - - def get_distribution(dist): """Return a current distribution object for a Requirement or string""" if isinstance(dist, str): @@ -531,7 +516,7 @@ def get_entry_info(dist, group, name): class IMetadataProvider(Protocol): - def has_metadata(self, name): + def has_metadata(self, name) -> bool: """Does the package's distribution contain the named metadata?""" def get_metadata(self, name): @@ -543,7 +528,7 @@ def get_metadata_lines(self, name): Leading and trailing whitespace is stripped from each line, and lines with ``#`` as the first non-blank character are omitted.""" - def metadata_isdir(self, name): + def metadata_isdir(self, name) -> bool: """Is the named metadata a directory? (like ``os.path.isdir()``)""" def metadata_listdir(self, name): @@ -566,8 +551,8 @@ def get_resource_stream(self, manager, resource_name): `manager` must be an ``IResourceManager``""" - def get_resource_string(self, manager, resource_name): - """Return a string containing the contents of `resource_name` + def get_resource_string(self, manager, resource_name) -> bytes: + """Return the contents of `resource_name` as :obj:`bytes` `manager` must be an ``IResourceManager``""" @@ -1203,8 +1188,8 @@ def resource_stream(self, package_or_requirement, resource_name): self, resource_name ) - def resource_string(self, package_or_requirement, resource_name): - """Return specified resource as a string""" + def resource_string(self, package_or_requirement, resource_name) -> bytes: + """Return specified resource as :obj:`bytes`""" return get_provider(package_or_requirement).get_resource_string( self, resource_name ) @@ -1339,7 +1324,7 @@ def set_extraction_path(self, path): self.extraction_path = path - def cleanup_resources(self, force=False): + def cleanup_resources(self, force=False) -> List[str]: """ Delete all extracted resource files and directories, returning a list of the file and directory names that could not be successfully removed. @@ -1351,6 +1336,7 @@ def cleanup_resources(self, force=False): directory used for extractions. """ # XXX + return [] def get_default_cache(): @@ -1479,7 +1465,7 @@ def get_resource_filename(self, manager, resource_name): def get_resource_stream(self, manager, resource_name): return io.BytesIO(self.get_resource_string(manager, resource_name)) - def get_resource_string(self, manager, resource_name): + def get_resource_string(self, manager, resource_name) -> bytes: return self._get(self._fn(self.module_path, resource_name)) def has_resource(self, resource_name): @@ -1488,9 +1474,9 @@ def has_resource(self, resource_name): def _get_metadata_path(self, name): return self._fn(self.egg_info, name) - def has_metadata(self, name): + def has_metadata(self, name) -> bool: if not self.egg_info: - return self.egg_info + return False path = self._get_metadata_path(name) return self._has(path) @@ -1514,8 +1500,8 @@ def get_metadata_lines(self, name): def resource_isdir(self, resource_name): return self._isdir(self._fn(self.module_path, resource_name)) - def metadata_isdir(self, name): - return self.egg_info and self._isdir(self._fn(self.egg_info, name)) + def metadata_isdir(self, name) -> bool: + return bool(self.egg_info and self._isdir(self._fn(self.egg_info, name))) def resource_listdir(self, resource_name): return self._listdir(self._fn(self.module_path, resource_name)) @@ -1554,12 +1540,12 @@ def run_script(self, script_name, namespace): script_code = compile(script_text, script_filename, 'exec') exec(script_code, namespace, namespace) - def _has(self, path): + def _has(self, path) -> bool: raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) - def _isdir(self, path): + def _isdir(self, path) -> bool: raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) @@ -1649,7 +1635,7 @@ def _validate_resource_path(path): DeprecationWarning, ) - def _get(self, path): + def _get(self, path) -> bytes: if hasattr(self.loader, 'get_data'): return self.loader.get_data(path) raise NotImplementedError( @@ -1694,10 +1680,10 @@ def _set_egg(self, path): class DefaultProvider(EggProvider): """Provides access to package resources in the filesystem""" - def _has(self, path): + def _has(self, path) -> bool: return os.path.exists(path) - def _isdir(self, path): + def _isdir(self, path) -> bool: return os.path.isdir(path) def _listdir(self, path): @@ -1706,7 +1692,7 @@ def _listdir(self, path): def get_resource_stream(self, manager, resource_name): return open(self._fn(self.module_path, resource_name), 'rb') - def _get(self, path): + def _get(self, path) -> bytes: with open(path, 'rb') as stream: return stream.read() @@ -1731,8 +1717,8 @@ class EmptyProvider(NullProvider): _isdir = _has = lambda self, path: False - def _get(self, path): - return '' + def _get(self, path) -> bytes: + return b'' def _listdir(self, path): return [] @@ -1939,11 +1925,11 @@ def _index(self): self._dirindex = ind return ind - def _has(self, fspath): + def _has(self, fspath) -> bool: zip_path = self._zipinfo_name(fspath) return zip_path in self.zipinfo or zip_path in self._index() - def _isdir(self, fspath): + def _isdir(self, fspath) -> bool: return self._zipinfo_name(fspath) in self._index() def _listdir(self, fspath): @@ -1977,7 +1963,7 @@ def __init__(self, path): def _get_metadata_path(self, name): return self.path - def has_metadata(self, name): + def has_metadata(self, name) -> bool: return name == 'PKG-INFO' and os.path.isfile(self.path) def get_metadata(self, name): @@ -3207,7 +3193,9 @@ def _find_adapter(registry, ob): for t in types: if t in registry: return registry[t] - return None + # _find_adapter would previously return None, and immediately be called. + # So we're raising a TypeError to keep backward compatibility if anyone depended on that behaviour. + raise TypeError(f"Could not find adapter for {registry} and {ob}") def ensure_directory(path): diff --git a/src/pip/_vendor/vendor.txt b/src/pip/_vendor/vendor.txt index 04ccb0c5aa0..e9694adc037 100644 --- a/src/pip/_vendor/vendor.txt +++ b/src/pip/_vendor/vendor.txt @@ -16,7 +16,7 @@ rich==13.7.1 pygments==2.17.2 typing_extensions==4.11.0 resolvelib==1.0.1 -setuptools==69.1.1 +setuptools==69.5.1 six==1.16.0 tenacity==8.2.3 tomli==2.0.1 From 8547b52e26061f6f385400626aed3b1a74bfd441 Mon Sep 17 00:00:00 2001 From: Seth Michael Larson Date: Fri, 3 May 2024 15:27:57 -0500 Subject: [PATCH 064/113] Upgrade vendored truststore to 0.9.0 (#12662) --- news/truststore.vendor.rst | 1 + src/pip/_vendor/truststore/__init__.py | 2 +- src/pip/_vendor/truststore/_api.py | 39 +++++++++++++++----------- src/pip/_vendor/truststore/_macos.py | 14 ++++----- src/pip/_vendor/truststore/_windows.py | 30 +++++++++++++------- src/pip/_vendor/vendor.txt | 2 +- 6 files changed, 52 insertions(+), 36 deletions(-) create mode 100644 news/truststore.vendor.rst diff --git a/news/truststore.vendor.rst b/news/truststore.vendor.rst new file mode 100644 index 00000000000..a18235085f9 --- /dev/null +++ b/news/truststore.vendor.rst @@ -0,0 +1 @@ +Upgrade truststore to 0.9.0 diff --git a/src/pip/_vendor/truststore/__init__.py b/src/pip/_vendor/truststore/__init__.py index 59930f455b0..9e7f26795fc 100644 --- a/src/pip/_vendor/truststore/__init__.py +++ b/src/pip/_vendor/truststore/__init__.py @@ -10,4 +10,4 @@ del _api, _sys # type: ignore[name-defined] # noqa: F821 __all__ = ["SSLContext", "inject_into_ssl", "extract_from_ssl"] -__version__ = "0.8.0" +__version__ = "0.9.0" diff --git a/src/pip/_vendor/truststore/_api.py b/src/pip/_vendor/truststore/_api.py index 829aff72672..f1ac6676813 100644 --- a/src/pip/_vendor/truststore/_api.py +++ b/src/pip/_vendor/truststore/_api.py @@ -2,10 +2,9 @@ import platform import socket import ssl +import sys import typing -import _ssl # type: ignore[import] - from ._ssl_constants import ( _original_SSLContext, _original_super_SSLContext, @@ -49,7 +48,7 @@ def extract_from_ssl() -> None: try: import pip._vendor.urllib3.util.ssl_ as urllib3_ssl - urllib3_ssl.SSLContext = _original_SSLContext + urllib3_ssl.SSLContext = _original_SSLContext # type: ignore[assignment] except ImportError: pass @@ -171,16 +170,13 @@ def cert_store_stats(self) -> dict[str, int]: @typing.overload def get_ca_certs( self, binary_form: typing.Literal[False] = ... - ) -> list[typing.Any]: - ... + ) -> list[typing.Any]: ... @typing.overload - def get_ca_certs(self, binary_form: typing.Literal[True] = ...) -> list[bytes]: - ... + def get_ca_certs(self, binary_form: typing.Literal[True] = ...) -> list[bytes]: ... @typing.overload - def get_ca_certs(self, binary_form: bool = ...) -> typing.Any: - ... + def get_ca_certs(self, binary_form: bool = ...) -> typing.Any: ... def get_ca_certs(self, binary_form: bool = False) -> list[typing.Any] | list[bytes]: raise NotImplementedError() @@ -276,6 +272,23 @@ def verify_mode(self, value: ssl.VerifyMode) -> None: ) +# Python 3.13+ makes get_unverified_chain() a public API that only returns DER +# encoded certificates. We detect whether we need to call public_bytes() for 3.10->3.12 +# Pre-3.13 returned None instead of an empty list from get_unverified_chain() +if sys.version_info >= (3, 13): + + def _get_unverified_chain_bytes(sslobj: ssl.SSLObject) -> list[bytes]: + unverified_chain = sslobj.get_unverified_chain() or () # type: ignore[attr-defined] + return [cert for cert in unverified_chain] + +else: + import _ssl # type: ignore[import-not-found] + + def _get_unverified_chain_bytes(sslobj: ssl.SSLObject) -> list[bytes]: + unverified_chain = sslobj.get_unverified_chain() or () # type: ignore[attr-defined] + return [cert.public_bytes(_ssl.ENCODING_DER) for cert in unverified_chain] + + def _verify_peercerts( sock_or_sslobj: ssl.SSLSocket | ssl.SSLObject, server_hostname: str | None ) -> None: @@ -290,13 +303,7 @@ def _verify_peercerts( except AttributeError: pass - # SSLObject.get_unverified_chain() returns 'None' - # if the peer sends no certificates. This is common - # for the server-side scenario. - unverified_chain: typing.Sequence[_ssl.Certificate] = ( - sslobj.get_unverified_chain() or () # type: ignore[attr-defined] - ) - cert_bytes = [cert.public_bytes(_ssl.ENCODING_DER) for cert in unverified_chain] + cert_bytes = _get_unverified_chain_bytes(sslobj) _verify_peercerts_impl( sock_or_sslobj.context, cert_bytes, server_hostname=server_hostname ) diff --git a/src/pip/_vendor/truststore/_macos.py b/src/pip/_vendor/truststore/_macos.py index 7dc440bf362..b234ffec723 100644 --- a/src/pip/_vendor/truststore/_macos.py +++ b/src/pip/_vendor/truststore/_macos.py @@ -96,9 +96,6 @@ def _load_cdll(name: str, macos10_16_path: str) -> CDLL: Security.SecTrustSetAnchorCertificatesOnly.argtypes = [SecTrustRef, Boolean] Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus - Security.SecTrustEvaluate.argtypes = [SecTrustRef, POINTER(SecTrustResultType)] - Security.SecTrustEvaluate.restype = OSStatus - Security.SecPolicyCreateRevocation.argtypes = [CFOptionFlags] Security.SecPolicyCreateRevocation.restype = SecPolicyRef @@ -259,6 +256,7 @@ def _handle_osstatus(result: OSStatus, _: typing.Any, args: typing.Any) -> typin Security.SecTrustCreateWithCertificates.errcheck = _handle_osstatus # type: ignore[assignment] Security.SecTrustSetAnchorCertificates.errcheck = _handle_osstatus # type: ignore[assignment] +Security.SecTrustSetAnchorCertificatesOnly.errcheck = _handle_osstatus # type: ignore[assignment] Security.SecTrustGetTrustResult.errcheck = _handle_osstatus # type: ignore[assignment] @@ -417,21 +415,21 @@ def _verify_peercerts_impl( CoreFoundation.CFRelease(certs) # If there are additional trust anchors to load we need to transform - # the list of DER-encoded certificates into a CFArray. Otherwise - # pass 'None' to signal that we only want system / fetched certificates. + # the list of DER-encoded certificates into a CFArray. ctx_ca_certs_der: list[bytes] | None = ssl_context.get_ca_certs( binary_form=True ) if ctx_ca_certs_der: ctx_ca_certs = None try: - ctx_ca_certs = _der_certs_to_cf_cert_array(cert_chain) + ctx_ca_certs = _der_certs_to_cf_cert_array(ctx_ca_certs_der) Security.SecTrustSetAnchorCertificates(trust, ctx_ca_certs) finally: if ctx_ca_certs: CoreFoundation.CFRelease(ctx_ca_certs) - else: - Security.SecTrustSetAnchorCertificates(trust, None) + + # We always want system certificates. + Security.SecTrustSetAnchorCertificatesOnly(trust, False) cf_error = CoreFoundation.CFErrorRef() sec_trust_eval_result = Security.SecTrustEvaluateWithError( diff --git a/src/pip/_vendor/truststore/_windows.py b/src/pip/_vendor/truststore/_windows.py index 3de4960a1b0..3d00d467f99 100644 --- a/src/pip/_vendor/truststore/_windows.py +++ b/src/pip/_vendor/truststore/_windows.py @@ -325,6 +325,12 @@ def _verify_peercerts_impl( server_hostname: str | None = None, ) -> None: """Verify the cert_chain from the server using Windows APIs.""" + + # If the peer didn't send any certificates then + # we can't do verification. Raise an error. + if not cert_chain: + raise ssl.SSLCertVerificationError("Peer sent no certificates to verify") + pCertContext = None hIntermediateCertStore = CertOpenStore(CERT_STORE_PROV_MEMORY, 0, None, 0, None) try: @@ -375,7 +381,7 @@ def _verify_peercerts_impl( server_hostname, chain_flags=chain_flags, ) - except ssl.SSLCertVerificationError: + except ssl.SSLCertVerificationError as e: # If that fails but custom CA certs have been added # to the SSLContext using load_verify_locations, # try verifying using a custom chain engine @@ -384,15 +390,19 @@ def _verify_peercerts_impl( binary_form=True ) if custom_ca_certs: - _verify_using_custom_ca_certs( - ssl_context, - custom_ca_certs, - hIntermediateCertStore, - pCertContext, - pChainPara, - server_hostname, - chain_flags=chain_flags, - ) + try: + _verify_using_custom_ca_certs( + ssl_context, + custom_ca_certs, + hIntermediateCertStore, + pCertContext, + pChainPara, + server_hostname, + chain_flags=chain_flags, + ) + # Raise the original error, not the new error. + except ssl.SSLCertVerificationError: + raise e from None else: raise finally: diff --git a/src/pip/_vendor/vendor.txt b/src/pip/_vendor/vendor.txt index f00a8c02db4..14166a8192d 100644 --- a/src/pip/_vendor/vendor.txt +++ b/src/pip/_vendor/vendor.txt @@ -20,5 +20,5 @@ setuptools==69.1.1 six==1.16.0 tenacity==8.2.3 tomli==2.0.1 -truststore==0.8.0 +truststore==0.9.0 webencodings==0.5.1 From bc844911fc59efd40b8aa330f6802690388ec7c8 Mon Sep 17 00:00:00 2001 From: Pradyun Gedam Date: Sat, 4 May 2024 01:58:33 +0100 Subject: [PATCH 065/113] Revert "Upgrade vendored truststore to 0.9.0" (#12671) This reverts commit 8547b52e26061f6f385400626aed3b1a74bfd441. --- news/truststore.vendor.rst | 1 - src/pip/_vendor/truststore/__init__.py | 2 +- src/pip/_vendor/truststore/_api.py | 39 +++++++++++--------------- src/pip/_vendor/truststore/_macos.py | 14 +++++---- src/pip/_vendor/truststore/_windows.py | 30 +++++++------------- src/pip/_vendor/vendor.txt | 2 +- 6 files changed, 36 insertions(+), 52 deletions(-) delete mode 100644 news/truststore.vendor.rst diff --git a/news/truststore.vendor.rst b/news/truststore.vendor.rst deleted file mode 100644 index a18235085f9..00000000000 --- a/news/truststore.vendor.rst +++ /dev/null @@ -1 +0,0 @@ -Upgrade truststore to 0.9.0 diff --git a/src/pip/_vendor/truststore/__init__.py b/src/pip/_vendor/truststore/__init__.py index 9e7f26795fc..59930f455b0 100644 --- a/src/pip/_vendor/truststore/__init__.py +++ b/src/pip/_vendor/truststore/__init__.py @@ -10,4 +10,4 @@ del _api, _sys # type: ignore[name-defined] # noqa: F821 __all__ = ["SSLContext", "inject_into_ssl", "extract_from_ssl"] -__version__ = "0.9.0" +__version__ = "0.8.0" diff --git a/src/pip/_vendor/truststore/_api.py b/src/pip/_vendor/truststore/_api.py index f1ac6676813..829aff72672 100644 --- a/src/pip/_vendor/truststore/_api.py +++ b/src/pip/_vendor/truststore/_api.py @@ -2,9 +2,10 @@ import platform import socket import ssl -import sys import typing +import _ssl # type: ignore[import] + from ._ssl_constants import ( _original_SSLContext, _original_super_SSLContext, @@ -48,7 +49,7 @@ def extract_from_ssl() -> None: try: import pip._vendor.urllib3.util.ssl_ as urllib3_ssl - urllib3_ssl.SSLContext = _original_SSLContext # type: ignore[assignment] + urllib3_ssl.SSLContext = _original_SSLContext except ImportError: pass @@ -170,13 +171,16 @@ def cert_store_stats(self) -> dict[str, int]: @typing.overload def get_ca_certs( self, binary_form: typing.Literal[False] = ... - ) -> list[typing.Any]: ... + ) -> list[typing.Any]: + ... @typing.overload - def get_ca_certs(self, binary_form: typing.Literal[True] = ...) -> list[bytes]: ... + def get_ca_certs(self, binary_form: typing.Literal[True] = ...) -> list[bytes]: + ... @typing.overload - def get_ca_certs(self, binary_form: bool = ...) -> typing.Any: ... + def get_ca_certs(self, binary_form: bool = ...) -> typing.Any: + ... def get_ca_certs(self, binary_form: bool = False) -> list[typing.Any] | list[bytes]: raise NotImplementedError() @@ -272,23 +276,6 @@ def verify_mode(self, value: ssl.VerifyMode) -> None: ) -# Python 3.13+ makes get_unverified_chain() a public API that only returns DER -# encoded certificates. We detect whether we need to call public_bytes() for 3.10->3.12 -# Pre-3.13 returned None instead of an empty list from get_unverified_chain() -if sys.version_info >= (3, 13): - - def _get_unverified_chain_bytes(sslobj: ssl.SSLObject) -> list[bytes]: - unverified_chain = sslobj.get_unverified_chain() or () # type: ignore[attr-defined] - return [cert for cert in unverified_chain] - -else: - import _ssl # type: ignore[import-not-found] - - def _get_unverified_chain_bytes(sslobj: ssl.SSLObject) -> list[bytes]: - unverified_chain = sslobj.get_unverified_chain() or () # type: ignore[attr-defined] - return [cert.public_bytes(_ssl.ENCODING_DER) for cert in unverified_chain] - - def _verify_peercerts( sock_or_sslobj: ssl.SSLSocket | ssl.SSLObject, server_hostname: str | None ) -> None: @@ -303,7 +290,13 @@ def _verify_peercerts( except AttributeError: pass - cert_bytes = _get_unverified_chain_bytes(sslobj) + # SSLObject.get_unverified_chain() returns 'None' + # if the peer sends no certificates. This is common + # for the server-side scenario. + unverified_chain: typing.Sequence[_ssl.Certificate] = ( + sslobj.get_unverified_chain() or () # type: ignore[attr-defined] + ) + cert_bytes = [cert.public_bytes(_ssl.ENCODING_DER) for cert in unverified_chain] _verify_peercerts_impl( sock_or_sslobj.context, cert_bytes, server_hostname=server_hostname ) diff --git a/src/pip/_vendor/truststore/_macos.py b/src/pip/_vendor/truststore/_macos.py index b234ffec723..7dc440bf362 100644 --- a/src/pip/_vendor/truststore/_macos.py +++ b/src/pip/_vendor/truststore/_macos.py @@ -96,6 +96,9 @@ def _load_cdll(name: str, macos10_16_path: str) -> CDLL: Security.SecTrustSetAnchorCertificatesOnly.argtypes = [SecTrustRef, Boolean] Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus + Security.SecTrustEvaluate.argtypes = [SecTrustRef, POINTER(SecTrustResultType)] + Security.SecTrustEvaluate.restype = OSStatus + Security.SecPolicyCreateRevocation.argtypes = [CFOptionFlags] Security.SecPolicyCreateRevocation.restype = SecPolicyRef @@ -256,7 +259,6 @@ def _handle_osstatus(result: OSStatus, _: typing.Any, args: typing.Any) -> typin Security.SecTrustCreateWithCertificates.errcheck = _handle_osstatus # type: ignore[assignment] Security.SecTrustSetAnchorCertificates.errcheck = _handle_osstatus # type: ignore[assignment] -Security.SecTrustSetAnchorCertificatesOnly.errcheck = _handle_osstatus # type: ignore[assignment] Security.SecTrustGetTrustResult.errcheck = _handle_osstatus # type: ignore[assignment] @@ -415,21 +417,21 @@ def _verify_peercerts_impl( CoreFoundation.CFRelease(certs) # If there are additional trust anchors to load we need to transform - # the list of DER-encoded certificates into a CFArray. + # the list of DER-encoded certificates into a CFArray. Otherwise + # pass 'None' to signal that we only want system / fetched certificates. ctx_ca_certs_der: list[bytes] | None = ssl_context.get_ca_certs( binary_form=True ) if ctx_ca_certs_der: ctx_ca_certs = None try: - ctx_ca_certs = _der_certs_to_cf_cert_array(ctx_ca_certs_der) + ctx_ca_certs = _der_certs_to_cf_cert_array(cert_chain) Security.SecTrustSetAnchorCertificates(trust, ctx_ca_certs) finally: if ctx_ca_certs: CoreFoundation.CFRelease(ctx_ca_certs) - - # We always want system certificates. - Security.SecTrustSetAnchorCertificatesOnly(trust, False) + else: + Security.SecTrustSetAnchorCertificates(trust, None) cf_error = CoreFoundation.CFErrorRef() sec_trust_eval_result = Security.SecTrustEvaluateWithError( diff --git a/src/pip/_vendor/truststore/_windows.py b/src/pip/_vendor/truststore/_windows.py index 3d00d467f99..3de4960a1b0 100644 --- a/src/pip/_vendor/truststore/_windows.py +++ b/src/pip/_vendor/truststore/_windows.py @@ -325,12 +325,6 @@ def _verify_peercerts_impl( server_hostname: str | None = None, ) -> None: """Verify the cert_chain from the server using Windows APIs.""" - - # If the peer didn't send any certificates then - # we can't do verification. Raise an error. - if not cert_chain: - raise ssl.SSLCertVerificationError("Peer sent no certificates to verify") - pCertContext = None hIntermediateCertStore = CertOpenStore(CERT_STORE_PROV_MEMORY, 0, None, 0, None) try: @@ -381,7 +375,7 @@ def _verify_peercerts_impl( server_hostname, chain_flags=chain_flags, ) - except ssl.SSLCertVerificationError as e: + except ssl.SSLCertVerificationError: # If that fails but custom CA certs have been added # to the SSLContext using load_verify_locations, # try verifying using a custom chain engine @@ -390,19 +384,15 @@ def _verify_peercerts_impl( binary_form=True ) if custom_ca_certs: - try: - _verify_using_custom_ca_certs( - ssl_context, - custom_ca_certs, - hIntermediateCertStore, - pCertContext, - pChainPara, - server_hostname, - chain_flags=chain_flags, - ) - # Raise the original error, not the new error. - except ssl.SSLCertVerificationError: - raise e from None + _verify_using_custom_ca_certs( + ssl_context, + custom_ca_certs, + hIntermediateCertStore, + pCertContext, + pChainPara, + server_hostname, + chain_flags=chain_flags, + ) else: raise finally: diff --git a/src/pip/_vendor/vendor.txt b/src/pip/_vendor/vendor.txt index 14166a8192d..f00a8c02db4 100644 --- a/src/pip/_vendor/vendor.txt +++ b/src/pip/_vendor/vendor.txt @@ -20,5 +20,5 @@ setuptools==69.1.1 six==1.16.0 tenacity==8.2.3 tomli==2.0.1 -truststore==0.9.0 +truststore==0.8.0 webencodings==0.5.1 From 47a8480065d4eac252edb3673cfaecccb7dbf3c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Fri, 29 Sep 2023 22:11:12 +0200 Subject: [PATCH 066/113] Upgrade vendored packaging lib --- news/packaging.vendor.rst | 1 + src/pip/_internal/commands/check.py | 2 - src/pip/_internal/commands/download.py | 1 - src/pip/_internal/commands/index.py | 6 +- src/pip/_internal/commands/install.py | 3 - src/pip/_internal/commands/list.py | 4 +- src/pip/_internal/commands/wheel.py | 1 - src/pip/_internal/metadata/base.py | 6 +- .../_internal/metadata/importlib/_dists.py | 4 +- src/pip/_internal/metadata/pkg_resources.py | 4 +- src/pip/_internal/operations/check.py | 44 +- src/pip/_internal/req/req_set.py | 37 - .../_internal/resolution/resolvelib/base.py | 7 +- .../resolution/resolvelib/candidates.py | 16 +- .../resolution/resolvelib/factory.py | 11 +- src/pip/_internal/self_outdated_check.py | 4 +- src/pip/_vendor/packaging/__about__.py | 26 - src/pip/_vendor/packaging/__init__.py | 30 +- src/pip/_vendor/packaging/_elffile.py | 108 +++ src/pip/_vendor/packaging/_manylinux.py | 211 ++-- src/pip/_vendor/packaging/_musllinux.py | 89 +- src/pip/_vendor/packaging/_parser.py | 359 +++++++ src/pip/_vendor/packaging/_tokenizer.py | 192 ++++ src/pip/_vendor/packaging/markers.py | 204 ++-- src/pip/_vendor/packaging/metadata.py | 822 ++++++++++++++++ src/pip/_vendor/packaging/requirements.py | 154 +-- src/pip/_vendor/packaging/specifiers.py | 918 +++++++++++------- src/pip/_vendor/packaging/tags.py | 94 +- src/pip/_vendor/packaging/utils.py | 50 +- src/pip/_vendor/packaging/version.py | 395 ++++---- src/pip/_vendor/vendor.txt | 2 +- 31 files changed, 2662 insertions(+), 1143 deletions(-) create mode 100644 news/packaging.vendor.rst delete mode 100644 src/pip/_vendor/packaging/__about__.py create mode 100644 src/pip/_vendor/packaging/_elffile.py create mode 100644 src/pip/_vendor/packaging/_parser.py create mode 100644 src/pip/_vendor/packaging/_tokenizer.py create mode 100644 src/pip/_vendor/packaging/metadata.py diff --git a/news/packaging.vendor.rst b/news/packaging.vendor.rst new file mode 100644 index 00000000000..1834f6df449 --- /dev/null +++ b/news/packaging.vendor.rst @@ -0,0 +1 @@ +Upgrade packaging to 23.2 diff --git a/src/pip/_internal/commands/check.py b/src/pip/_internal/commands/check.py index 5efd0a34160..584df9f55c5 100644 --- a/src/pip/_internal/commands/check.py +++ b/src/pip/_internal/commands/check.py @@ -7,7 +7,6 @@ from pip._internal.operations.check import ( check_package_set, create_package_set_from_installed, - warn_legacy_versions_and_specifiers, ) from pip._internal.utils.misc import write_output @@ -22,7 +21,6 @@ class CheckCommand(Command): def run(self, options: Values, args: List[str]) -> int: package_set, parsing_probs = create_package_set_from_installed() - warn_legacy_versions_and_specifiers(package_set) missing, conflicting = check_package_set(package_set) for project_name in missing: diff --git a/src/pip/_internal/commands/download.py b/src/pip/_internal/commands/download.py index 54247a78a65..917bbb91d83 100644 --- a/src/pip/_internal/commands/download.py +++ b/src/pip/_internal/commands/download.py @@ -139,7 +139,6 @@ def run(self, options: Values, args: List[str]) -> int: downloaded.append(req.name) preparer.prepare_linked_requirements_more(requirement_set.requirements.values()) - requirement_set.warn_legacy_versions_and_specifiers() if downloaded: write_output("Successfully downloaded %s", " ".join(downloaded)) diff --git a/src/pip/_internal/commands/index.py b/src/pip/_internal/commands/index.py index f55e9e49974..2e2661bba71 100644 --- a/src/pip/_internal/commands/index.py +++ b/src/pip/_internal/commands/index.py @@ -1,8 +1,8 @@ import logging from optparse import Values -from typing import Any, Iterable, List, Optional, Union +from typing import Any, Iterable, List, Optional -from pip._vendor.packaging.version import LegacyVersion, Version +from pip._vendor.packaging.version import Version from pip._internal.cli import cmdoptions from pip._internal.cli.req_command import IndexGroupCommand @@ -115,7 +115,7 @@ def get_available_package_versions(self, options: Values, args: List[Any]) -> No ignore_requires_python=options.ignore_requires_python, ) - versions: Iterable[Union[LegacyVersion, Version]] = ( + versions: Iterable[Version] = ( candidate.version for candidate in finder.find_all_candidates(query) ) diff --git a/src/pip/_internal/commands/install.py b/src/pip/_internal/commands/install.py index 6cf7571e4a6..a9e83f9d8cd 100644 --- a/src/pip/_internal/commands/install.py +++ b/src/pip/_internal/commands/install.py @@ -387,9 +387,6 @@ def run(self, options: Values, args: List[str]) -> int: json.dump(report.to_dict(), f, indent=2, ensure_ascii=False) if options.dry_run: - # In non dry-run mode, the legacy versions and specifiers check - # will be done as part of conflict detection. - requirement_set.warn_legacy_versions_and_specifiers() would_install_items = sorted( (r.metadata["name"], r.metadata["version"]) for r in requirement_set.requirements_to_install diff --git a/src/pip/_internal/commands/list.py b/src/pip/_internal/commands/list.py index f510919910e..171461aebaf 100644 --- a/src/pip/_internal/commands/list.py +++ b/src/pip/_internal/commands/list.py @@ -4,6 +4,7 @@ from typing import TYPE_CHECKING, Generator, List, Optional, Sequence, Tuple, cast from pip._vendor.packaging.utils import canonicalize_name +from pip._vendor.packaging.version import Version from pip._internal.cli import cmdoptions from pip._internal.cli.req_command import IndexGroupCommand @@ -18,7 +19,6 @@ from pip._internal.utils.misc import tabulate, write_output if TYPE_CHECKING: - from pip._internal.metadata.base import DistributionVersion class _DistWithLatestInfo(BaseDistribution): """Give the distribution object a couple of extra fields. @@ -27,7 +27,7 @@ class _DistWithLatestInfo(BaseDistribution): makes the rest of the code much cleaner. """ - latest_version: DistributionVersion + latest_version: Version latest_filetype: str _ProcessedDists = Sequence[_DistWithLatestInfo] diff --git a/src/pip/_internal/commands/wheel.py b/src/pip/_internal/commands/wheel.py index ed578aa2500..278719f4e0c 100644 --- a/src/pip/_internal/commands/wheel.py +++ b/src/pip/_internal/commands/wheel.py @@ -154,7 +154,6 @@ def run(self, options: Values, args: List[str]) -> int: reqs_to_build.append(req) preparer.prepare_linked_requirements_more(requirement_set.requirements.values()) - requirement_set.warn_legacy_versions_and_specifiers() # build wheels build_successes, build_failures = build( diff --git a/src/pip/_internal/metadata/base.py b/src/pip/_internal/metadata/base.py index af0412c819c..843a4e60aeb 100644 --- a/src/pip/_internal/metadata/base.py +++ b/src/pip/_internal/metadata/base.py @@ -25,7 +25,7 @@ from pip._vendor.packaging.requirements import Requirement from pip._vendor.packaging.specifiers import InvalidSpecifier, SpecifierSet from pip._vendor.packaging.utils import NormalizedName, canonicalize_name -from pip._vendor.packaging.version import LegacyVersion, Version +from pip._vendor.packaging.version import Version from pip._internal.exceptions import NoneMetadataError from pip._internal.locations import site_packages, user_site @@ -41,8 +41,6 @@ from ._json import msg_to_json -DistributionVersion = Union[LegacyVersion, Version] - InfoPath = Union[str, pathlib.PurePath] logger = logging.getLogger(__name__) @@ -274,7 +272,7 @@ def canonical_name(self) -> NormalizedName: raise NotImplementedError() @property - def version(self) -> DistributionVersion: + def version(self) -> Version: raise NotImplementedError() @property diff --git a/src/pip/_internal/metadata/importlib/_dists.py b/src/pip/_internal/metadata/importlib/_dists.py index 8591029f16e..9eee474ea5b 100644 --- a/src/pip/_internal/metadata/importlib/_dists.py +++ b/src/pip/_internal/metadata/importlib/_dists.py @@ -16,13 +16,13 @@ from pip._vendor.packaging.requirements import Requirement from pip._vendor.packaging.utils import NormalizedName, canonicalize_name +from pip._vendor.packaging.version import Version from pip._vendor.packaging.version import parse as parse_version from pip._internal.exceptions import InvalidWheel, UnsupportedWheel from pip._internal.metadata.base import ( BaseDistribution, BaseEntryPoint, - DistributionVersion, InfoPath, Wheel, ) @@ -171,7 +171,7 @@ def canonical_name(self) -> NormalizedName: return canonicalize_name(name) @property - def version(self) -> DistributionVersion: + def version(self) -> Version: return parse_version(self._dist.version) def is_file(self, path: InfoPath) -> bool: diff --git a/src/pip/_internal/metadata/pkg_resources.py b/src/pip/_internal/metadata/pkg_resources.py index bb11e5bd8a5..57a9fdba1bc 100644 --- a/src/pip/_internal/metadata/pkg_resources.py +++ b/src/pip/_internal/metadata/pkg_resources.py @@ -8,6 +8,7 @@ from pip._vendor import pkg_resources from pip._vendor.packaging.requirements import Requirement from pip._vendor.packaging.utils import NormalizedName, canonicalize_name +from pip._vendor.packaging.version import Version from pip._vendor.packaging.version import parse as parse_version from pip._internal.exceptions import InvalidWheel, NoneMetadataError, UnsupportedWheel @@ -19,7 +20,6 @@ BaseDistribution, BaseEntryPoint, BaseEnvironment, - DistributionVersion, InfoPath, Wheel, ) @@ -168,7 +168,7 @@ def canonical_name(self) -> NormalizedName: return canonicalize_name(self._dist.project_name) @property - def version(self) -> DistributionVersion: + def version(self) -> Version: return parse_version(self._dist.version) def is_file(self, path: InfoPath) -> bool: diff --git a/src/pip/_internal/operations/check.py b/src/pip/_internal/operations/check.py index 90c6a58a55e..ee1a5bcfe06 100644 --- a/src/pip/_internal/operations/check.py +++ b/src/pip/_internal/operations/check.py @@ -5,28 +5,25 @@ from typing import Callable, Dict, List, NamedTuple, Optional, Set, Tuple from pip._vendor.packaging.requirements import Requirement -from pip._vendor.packaging.specifiers import LegacySpecifier from pip._vendor.packaging.utils import NormalizedName, canonicalize_name -from pip._vendor.packaging.version import LegacyVersion +from pip._vendor.packaging.version import Version from pip._internal.distributions import make_distribution_for_install_requirement from pip._internal.metadata import get_default_environment -from pip._internal.metadata.base import DistributionVersion from pip._internal.req.req_install import InstallRequirement -from pip._internal.utils.deprecation import deprecated logger = logging.getLogger(__name__) class PackageDetails(NamedTuple): - version: DistributionVersion + version: Version dependencies: List[Requirement] # Shorthands PackageSet = Dict[NormalizedName, PackageDetails] Missing = Tuple[NormalizedName, Requirement] -Conflicting = Tuple[NormalizedName, DistributionVersion, Requirement] +Conflicting = Tuple[NormalizedName, Version, Requirement] MissingDict = Dict[NormalizedName, List[Missing]] ConflictingDict = Dict[NormalizedName, List[Conflicting]] @@ -60,8 +57,6 @@ def check_package_set( package name and returns a boolean. """ - warn_legacy_versions_and_specifiers(package_set) - missing = {} conflicting = {} @@ -152,36 +147,3 @@ def _create_whitelist( break return packages_affected - - -def warn_legacy_versions_and_specifiers(package_set: PackageSet) -> None: - for project_name, package_details in package_set.items(): - if isinstance(package_details.version, LegacyVersion): - deprecated( - reason=( - f"{project_name} {package_details.version} " - f"has a non-standard version number." - ), - replacement=( - f"to upgrade to a newer version of {project_name} " - f"or contact the author to suggest that they " - f"release a version with a conforming version number" - ), - issue=12063, - gone_in="24.1", - ) - for dep in package_details.dependencies: - if any(isinstance(spec, LegacySpecifier) for spec in dep.specifier): - deprecated( - reason=( - f"{project_name} {package_details.version} " - f"has a non-standard dependency specifier {dep}." - ), - replacement=( - f"to upgrade to a newer version of {project_name} " - f"or contact the author to suggest that they " - f"release a version with a conforming dependency specifiers" - ), - issue=12063, - gone_in="24.1", - ) diff --git a/src/pip/_internal/req/req_set.py b/src/pip/_internal/req/req_set.py index bf36114e802..ec7a6e07a25 100644 --- a/src/pip/_internal/req/req_set.py +++ b/src/pip/_internal/req/req_set.py @@ -2,12 +2,9 @@ from collections import OrderedDict from typing import Dict, List -from pip._vendor.packaging.specifiers import LegacySpecifier from pip._vendor.packaging.utils import canonicalize_name -from pip._vendor.packaging.version import LegacyVersion from pip._internal.req.req_install import InstallRequirement -from pip._internal.utils.deprecation import deprecated logger = logging.getLogger(__name__) @@ -83,37 +80,3 @@ def requirements_to_install(self) -> List[InstallRequirement]: for install_req in self.all_requirements if not install_req.constraint and not install_req.satisfied_by ] - - def warn_legacy_versions_and_specifiers(self) -> None: - for req in self.requirements_to_install: - version = req.get_dist().version - if isinstance(version, LegacyVersion): - deprecated( - reason=( - f"pip has selected the non standard version {version} " - f"of {req}. In the future this version will be " - f"ignored as it isn't standard compliant." - ), - replacement=( - "set or update constraints to select another version " - "or contact the package author to fix the version number" - ), - issue=12063, - gone_in="24.1", - ) - for dep in req.get_dist().iter_dependencies(): - if any(isinstance(spec, LegacySpecifier) for spec in dep.specifier): - deprecated( - reason=( - f"pip has selected {req} {version} which has non " - f"standard dependency specifier {dep}. " - f"In the future this version of {req} will be " - f"ignored as it isn't standard compliant." - ), - replacement=( - "set or update constraints to select another version " - "or contact the package author to fix the version number" - ), - issue=12063, - gone_in="24.1", - ) diff --git a/src/pip/_internal/resolution/resolvelib/base.py b/src/pip/_internal/resolution/resolvelib/base.py index 4269c7fbecc..0f31dc9b307 100644 --- a/src/pip/_internal/resolution/resolvelib/base.py +++ b/src/pip/_internal/resolution/resolvelib/base.py @@ -1,16 +1,15 @@ from dataclasses import dataclass -from typing import FrozenSet, Iterable, Optional, Tuple, Union +from typing import FrozenSet, Iterable, Optional, Tuple from pip._vendor.packaging.specifiers import SpecifierSet from pip._vendor.packaging.utils import NormalizedName -from pip._vendor.packaging.version import LegacyVersion, Version +from pip._vendor.packaging.version import Version from pip._internal.models.link import Link, links_equivalent from pip._internal.req.req_install import InstallRequirement from pip._internal.utils.hashes import Hashes CandidateLookup = Tuple[Optional["Candidate"], Optional[InstallRequirement]] -CandidateVersion = Union[LegacyVersion, Version] def format_name(project: NormalizedName, extras: FrozenSet[NormalizedName]) -> str: @@ -115,7 +114,7 @@ def name(self) -> str: raise NotImplementedError("Override in subclass") @property - def version(self) -> CandidateVersion: + def version(self) -> Version: raise NotImplementedError("Override in subclass") @property diff --git a/src/pip/_internal/resolution/resolvelib/candidates.py b/src/pip/_internal/resolution/resolvelib/candidates.py index 019ff60a0a5..029b26a3368 100644 --- a/src/pip/_internal/resolution/resolvelib/candidates.py +++ b/src/pip/_internal/resolution/resolvelib/candidates.py @@ -21,7 +21,7 @@ from pip._internal.utils.direct_url_helpers import direct_url_from_link from pip._internal.utils.misc import normalize_version_info -from .base import Candidate, CandidateVersion, Requirement, format_name +from .base import Candidate, Requirement, format_name if TYPE_CHECKING: from .factory import Factory @@ -145,7 +145,7 @@ def __init__( ireq: InstallRequirement, factory: "Factory", name: Optional[NormalizedName] = None, - version: Optional[CandidateVersion] = None, + version: Optional[Version] = None, ) -> None: self._link = link self._source_link = source_link @@ -190,7 +190,7 @@ def name(self) -> str: return self.project_name @property - def version(self) -> CandidateVersion: + def version(self) -> Version: if self._version is None: self._version = self.dist.version return self._version @@ -257,7 +257,7 @@ def __init__( template: InstallRequirement, factory: "Factory", name: Optional[NormalizedName] = None, - version: Optional[CandidateVersion] = None, + version: Optional[Version] = None, ) -> None: source_link = link cache_entry = factory.get_wheel_cache_entry(source_link, name) @@ -314,7 +314,7 @@ def __init__( template: InstallRequirement, factory: "Factory", name: Optional[NormalizedName] = None, - version: Optional[CandidateVersion] = None, + version: Optional[Version] = None, ) -> None: super().__init__( link=link, @@ -374,7 +374,7 @@ def name(self) -> str: return self.project_name @property - def version(self) -> CandidateVersion: + def version(self) -> Version: if self._version is None: self._version = self.dist.version return self._version @@ -473,7 +473,7 @@ def name(self) -> str: return format_name(self.base.project_name, self.extras) @property - def version(self) -> CandidateVersion: + def version(self) -> Version: return self.base.version def format_for_error(self) -> str: @@ -588,7 +588,7 @@ def name(self) -> str: return REQUIRES_PYTHON_IDENTIFIER @property - def version(self) -> CandidateVersion: + def version(self) -> Version: return self._version def format_for_error(self) -> str: diff --git a/src/pip/_internal/resolution/resolvelib/factory.py b/src/pip/_internal/resolution/resolvelib/factory.py index e36df15d459..6de5d698e16 100644 --- a/src/pip/_internal/resolution/resolvelib/factory.py +++ b/src/pip/_internal/resolution/resolvelib/factory.py @@ -23,6 +23,7 @@ from pip._vendor.packaging.requirements import InvalidRequirement from pip._vendor.packaging.specifiers import SpecifierSet from pip._vendor.packaging.utils import NormalizedName, canonicalize_name +from pip._vendor.packaging.version import Version from pip._vendor.resolvelib import ResolutionImpossible from pip._internal.cache import CacheEntry, WheelCache @@ -52,7 +53,7 @@ from pip._internal.utils.packaging import get_requirement from pip._internal.utils.virtualenv import running_under_virtualenv -from .base import Candidate, CandidateVersion, Constraint, Requirement +from .base import Candidate, Constraint, Requirement from .candidates import ( AlreadyInstalledCandidate, BaseCandidate, @@ -178,7 +179,7 @@ def _make_candidate_from_link( extras: FrozenSet[str], template: InstallRequirement, name: Optional[NormalizedName], - version: Optional[CandidateVersion], + version: Optional[Version], ) -> Optional[Candidate]: base: Optional[BaseCandidate] = self._make_base_candidate_from_link( link, template, name, version @@ -192,7 +193,7 @@ def _make_base_candidate_from_link( link: Link, template: InstallRequirement, name: Optional[NormalizedName], - version: Optional[CandidateVersion], + version: Optional[Version], ) -> Optional[BaseCandidate]: # TODO: Check already installed candidate, and use it if the link and # editable flag match. @@ -670,8 +671,8 @@ def _report_single_requirement_conflict( cands = self._finder.find_all_candidates(req.project_name) skipped_by_requires_python = self._finder.requires_python_skipped_reasons() - versions_set: Set[CandidateVersion] = set() - yanked_versions_set: Set[CandidateVersion] = set() + versions_set: Set[Version] = set() + yanked_versions_set: Set[Version] = set() for c in cands: is_yanked = c.link.is_yanked if c.link else False if is_yanked: diff --git a/src/pip/_internal/self_outdated_check.py b/src/pip/_internal/self_outdated_check.py index 0f64ae0e614..2185f2fb108 100644 --- a/src/pip/_internal/self_outdated_check.py +++ b/src/pip/_internal/self_outdated_check.py @@ -9,6 +9,7 @@ from dataclasses import dataclass from typing import Any, Callable, Dict, Optional +from pip._vendor.packaging.version import Version from pip._vendor.packaging.version import parse as parse_version from pip._vendor.rich.console import Group from pip._vendor.rich.markup import escape @@ -17,7 +18,6 @@ from pip._internal.index.collector import LinkCollector from pip._internal.index.package_finder import PackageFinder from pip._internal.metadata import get_default_environment -from pip._internal.metadata.base import DistributionVersion from pip._internal.models.selection_prefs import SelectionPreferences from pip._internal.network.session import PipSession from pip._internal.utils.compat import WINDOWS @@ -191,7 +191,7 @@ def _self_version_check_logic( *, state: SelfCheckState, current_time: datetime.datetime, - local_version: DistributionVersion, + local_version: Version, get_remote_version: Callable[[], Optional[str]], ) -> Optional[UpgradePrompt]: remote_version_str = state.get(current_time) diff --git a/src/pip/_vendor/packaging/__about__.py b/src/pip/_vendor/packaging/__about__.py deleted file mode 100644 index 3551bc2d298..00000000000 --- a/src/pip/_vendor/packaging/__about__.py +++ /dev/null @@ -1,26 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -__all__ = [ - "__title__", - "__summary__", - "__uri__", - "__version__", - "__author__", - "__email__", - "__license__", - "__copyright__", -] - -__title__ = "packaging" -__summary__ = "Core utilities for Python packages" -__uri__ = "https://github.com/pypa/packaging" - -__version__ = "21.3" - -__author__ = "Donald Stufft and individual contributors" -__email__ = "donald@stufft.io" - -__license__ = "BSD-2-Clause or Apache-2.0" -__copyright__ = "2014-2019 %s" % __author__ diff --git a/src/pip/_vendor/packaging/__init__.py b/src/pip/_vendor/packaging/__init__.py index 3c50c5dcfee..22809cfd5dc 100644 --- a/src/pip/_vendor/packaging/__init__.py +++ b/src/pip/_vendor/packaging/__init__.py @@ -2,24 +2,14 @@ # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. -from .__about__ import ( - __author__, - __copyright__, - __email__, - __license__, - __summary__, - __title__, - __uri__, - __version__, -) +__title__ = "packaging" +__summary__ = "Core utilities for Python packages" +__uri__ = "https://github.com/pypa/packaging" -__all__ = [ - "__title__", - "__summary__", - "__uri__", - "__version__", - "__author__", - "__email__", - "__license__", - "__copyright__", -] +__version__ = "23.2" + +__author__ = "Donald Stufft and individual contributors" +__email__ = "donald@stufft.io" + +__license__ = "BSD-2-Clause or Apache-2.0" +__copyright__ = "2014 %s" % __author__ diff --git a/src/pip/_vendor/packaging/_elffile.py b/src/pip/_vendor/packaging/_elffile.py new file mode 100644 index 00000000000..6fb19b30bb5 --- /dev/null +++ b/src/pip/_vendor/packaging/_elffile.py @@ -0,0 +1,108 @@ +""" +ELF file parser. + +This provides a class ``ELFFile`` that parses an ELF executable in a similar +interface to ``ZipFile``. Only the read interface is implemented. + +Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca +ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html +""" + +import enum +import os +import struct +from typing import IO, Optional, Tuple + + +class ELFInvalid(ValueError): + pass + + +class EIClass(enum.IntEnum): + C32 = 1 + C64 = 2 + + +class EIData(enum.IntEnum): + Lsb = 1 + Msb = 2 + + +class EMachine(enum.IntEnum): + I386 = 3 + S390 = 22 + Arm = 40 + X8664 = 62 + AArc64 = 183 + + +class ELFFile: + """ + Representation of an ELF executable. + """ + + def __init__(self, f: IO[bytes]) -> None: + self._f = f + + try: + ident = self._read("16B") + except struct.error: + raise ELFInvalid("unable to parse identification") + magic = bytes(ident[:4]) + if magic != b"\x7fELF": + raise ELFInvalid(f"invalid magic: {magic!r}") + + self.capacity = ident[4] # Format for program header (bitness). + self.encoding = ident[5] # Data structure encoding (endianness). + + try: + # e_fmt: Format for program header. + # p_fmt: Format for section header. + # p_idx: Indexes to find p_type, p_offset, and p_filesz. + e_fmt, self._p_fmt, self._p_idx = { + (1, 1): ("HHIIIIIHHH", ">IIIIIIII", (0, 1, 4)), # 32-bit MSB. + (2, 1): ("HHIQQQIHHH", ">IIQQQQQQ", (0, 2, 5)), # 64-bit MSB. + }[(self.capacity, self.encoding)] + except KeyError: + raise ELFInvalid( + f"unrecognized capacity ({self.capacity}) or " + f"encoding ({self.encoding})" + ) + + try: + ( + _, + self.machine, # Architecture type. + _, + _, + self._e_phoff, # Offset of program header. + _, + self.flags, # Processor-specific flags. + _, + self._e_phentsize, # Size of section. + self._e_phnum, # Number of sections. + ) = self._read(e_fmt) + except struct.error as e: + raise ELFInvalid("unable to parse machine and section information") from e + + def _read(self, fmt: str) -> Tuple[int, ...]: + return struct.unpack(fmt, self._f.read(struct.calcsize(fmt))) + + @property + def interpreter(self) -> Optional[str]: + """ + The path recorded in the ``PT_INTERP`` section header. + """ + for index in range(self._e_phnum): + self._f.seek(self._e_phoff + self._e_phentsize * index) + try: + data = self._read(self._p_fmt) + except struct.error: + continue + if data[self._p_idx[0]] != 3: # Not PT_INTERP. + continue + self._f.seek(data[self._p_idx[1]]) + return os.fsdecode(self._f.read(data[self._p_idx[2]])).strip("\0") + return None diff --git a/src/pip/_vendor/packaging/_manylinux.py b/src/pip/_vendor/packaging/_manylinux.py index 4c379aa6f69..3705d50db91 100644 --- a/src/pip/_vendor/packaging/_manylinux.py +++ b/src/pip/_vendor/packaging/_manylinux.py @@ -1,122 +1,62 @@ import collections +import contextlib import functools import os import re -import struct import sys import warnings -from typing import IO, Dict, Iterator, NamedTuple, Optional, Tuple - - -# Python does not provide platform information at sufficient granularity to -# identify the architecture of the running executable in some cases, so we -# determine it dynamically by reading the information from the running -# process. This only applies on Linux, which uses the ELF format. -class _ELFFileHeader: - # https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header - class _InvalidELFFileHeader(ValueError): - """ - An invalid ELF file header was found. - """ - - ELF_MAGIC_NUMBER = 0x7F454C46 - ELFCLASS32 = 1 - ELFCLASS64 = 2 - ELFDATA2LSB = 1 - ELFDATA2MSB = 2 - EM_386 = 3 - EM_S390 = 22 - EM_ARM = 40 - EM_X86_64 = 62 - EF_ARM_ABIMASK = 0xFF000000 - EF_ARM_ABI_VER5 = 0x05000000 - EF_ARM_ABI_FLOAT_HARD = 0x00000400 - - def __init__(self, file: IO[bytes]) -> None: - def unpack(fmt: str) -> int: - try: - data = file.read(struct.calcsize(fmt)) - result: Tuple[int, ...] = struct.unpack(fmt, data) - except struct.error: - raise _ELFFileHeader._InvalidELFFileHeader() - return result[0] - - self.e_ident_magic = unpack(">I") - if self.e_ident_magic != self.ELF_MAGIC_NUMBER: - raise _ELFFileHeader._InvalidELFFileHeader() - self.e_ident_class = unpack("B") - if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}: - raise _ELFFileHeader._InvalidELFFileHeader() - self.e_ident_data = unpack("B") - if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}: - raise _ELFFileHeader._InvalidELFFileHeader() - self.e_ident_version = unpack("B") - self.e_ident_osabi = unpack("B") - self.e_ident_abiversion = unpack("B") - self.e_ident_pad = file.read(7) - format_h = "H" - format_i = "I" - format_q = "Q" - format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q - self.e_type = unpack(format_h) - self.e_machine = unpack(format_h) - self.e_version = unpack(format_i) - self.e_entry = unpack(format_p) - self.e_phoff = unpack(format_p) - self.e_shoff = unpack(format_p) - self.e_flags = unpack(format_i) - self.e_ehsize = unpack(format_h) - self.e_phentsize = unpack(format_h) - self.e_phnum = unpack(format_h) - self.e_shentsize = unpack(format_h) - self.e_shnum = unpack(format_h) - self.e_shstrndx = unpack(format_h) - - -def _get_elf_header() -> Optional[_ELFFileHeader]: +from typing import Dict, Generator, Iterator, NamedTuple, Optional, Sequence, Tuple + +from ._elffile import EIClass, EIData, ELFFile, EMachine + +EF_ARM_ABIMASK = 0xFF000000 +EF_ARM_ABI_VER5 = 0x05000000 +EF_ARM_ABI_FLOAT_HARD = 0x00000400 + + +# `os.PathLike` not a generic type until Python 3.9, so sticking with `str` +# as the type for `path` until then. +@contextlib.contextmanager +def _parse_elf(path: str) -> Generator[Optional[ELFFile], None, None]: try: - with open(sys.executable, "rb") as f: - elf_header = _ELFFileHeader(f) - except (OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader): - return None - return elf_header + with open(path, "rb") as f: + yield ELFFile(f) + except (OSError, TypeError, ValueError): + yield None -def _is_linux_armhf() -> bool: +def _is_linux_armhf(executable: str) -> bool: # hard-float ABI can be detected from the ELF header of the running # process # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf - elf_header = _get_elf_header() - if elf_header is None: - return False - result = elf_header.e_ident_class == elf_header.ELFCLASS32 - result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB - result &= elf_header.e_machine == elf_header.EM_ARM - result &= ( - elf_header.e_flags & elf_header.EF_ARM_ABIMASK - ) == elf_header.EF_ARM_ABI_VER5 - result &= ( - elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD - ) == elf_header.EF_ARM_ABI_FLOAT_HARD - return result - - -def _is_linux_i686() -> bool: - elf_header = _get_elf_header() - if elf_header is None: - return False - result = elf_header.e_ident_class == elf_header.ELFCLASS32 - result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB - result &= elf_header.e_machine == elf_header.EM_386 - return result + with _parse_elf(executable) as f: + return ( + f is not None + and f.capacity == EIClass.C32 + and f.encoding == EIData.Lsb + and f.machine == EMachine.Arm + and f.flags & EF_ARM_ABIMASK == EF_ARM_ABI_VER5 + and f.flags & EF_ARM_ABI_FLOAT_HARD == EF_ARM_ABI_FLOAT_HARD + ) + + +def _is_linux_i686(executable: str) -> bool: + with _parse_elf(executable) as f: + return ( + f is not None + and f.capacity == EIClass.C32 + and f.encoding == EIData.Lsb + and f.machine == EMachine.I386 + ) -def _have_compatible_abi(arch: str) -> bool: - if arch == "armv7l": - return _is_linux_armhf() - if arch == "i686": - return _is_linux_i686() - return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"} +def _have_compatible_abi(executable: str, archs: Sequence[str]) -> bool: + if "armv7l" in archs: + return _is_linux_armhf(executable) + if "i686" in archs: + return _is_linux_i686(executable) + allowed_archs = {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x", "loongarch64"} + return any(arch in allowed_archs for arch in archs) # If glibc ever changes its major version, we need to know what the last @@ -141,10 +81,10 @@ def _glibc_version_string_confstr() -> Optional[str]: # platform module. # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183 try: - # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17". - version_string = os.confstr("CS_GNU_LIBC_VERSION") + # Should be a string like "glibc 2.17". + version_string: str = getattr(os, "confstr")("CS_GNU_LIBC_VERSION") assert version_string is not None - _, version = version_string.split() + _, version = version_string.rsplit() except (AssertionError, AttributeError, OSError, ValueError): # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)... return None @@ -211,8 +151,8 @@ def _parse_glibc_version(version_str: str) -> Tuple[int, int]: m = re.match(r"(?P[0-9]+)\.(?P[0-9]+)", version_str) if not m: warnings.warn( - "Expected glibc version with 2 components major.minor," - " got: %s" % version_str, + f"Expected glibc version with 2 components major.minor," + f" got: {version_str}", RuntimeWarning, ) return -1, -1 @@ -228,7 +168,7 @@ def _get_glibc_version() -> Tuple[int, int]: # From PEP 513, PEP 600 -def _is_compatible(name: str, arch: str, version: _GLibCVersion) -> bool: +def _is_compatible(arch: str, version: _GLibCVersion) -> bool: sys_glibc = _get_glibc_version() if sys_glibc < version: return False @@ -264,12 +204,22 @@ def _is_compatible(name: str, arch: str, version: _GLibCVersion) -> bool: } -def platform_tags(linux: str, arch: str) -> Iterator[str]: - if not _have_compatible_abi(arch): +def platform_tags(archs: Sequence[str]) -> Iterator[str]: + """Generate manylinux tags compatible to the current platform. + + :param archs: Sequence of compatible architectures. + The first one shall be the closest to the actual architecture and be the part of + platform tag after the ``linux_`` prefix, e.g. ``x86_64``. + The ``linux_`` prefix is assumed as a prerequisite for the current platform to + be manylinux-compatible. + + :returns: An iterator of compatible manylinux tags. + """ + if not _have_compatible_abi(sys.executable, archs): return # Oldest glibc to be supported regardless of architecture is (2, 17). too_old_glibc2 = _GLibCVersion(2, 16) - if arch in {"x86_64", "i686"}: + if set(archs) & {"x86_64", "i686"}: # On x86/i686 also oldest glibc to be supported is (2, 5). too_old_glibc2 = _GLibCVersion(2, 4) current_glibc = _GLibCVersion(*_get_glibc_version()) @@ -283,19 +233,20 @@ def platform_tags(linux: str, arch: str) -> Iterator[str]: for glibc_major in range(current_glibc.major - 1, 1, -1): glibc_minor = _LAST_GLIBC_MINOR[glibc_major] glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor)) - for glibc_max in glibc_max_list: - if glibc_max.major == too_old_glibc2.major: - min_minor = too_old_glibc2.minor - else: - # For other glibc major versions oldest supported is (x, 0). - min_minor = -1 - for glibc_minor in range(glibc_max.minor, min_minor, -1): - glibc_version = _GLibCVersion(glibc_max.major, glibc_minor) - tag = "manylinux_{}_{}".format(*glibc_version) - if _is_compatible(tag, arch, glibc_version): - yield linux.replace("linux", tag) - # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags. - if glibc_version in _LEGACY_MANYLINUX_MAP: - legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version] - if _is_compatible(legacy_tag, arch, glibc_version): - yield linux.replace("linux", legacy_tag) + for arch in archs: + for glibc_max in glibc_max_list: + if glibc_max.major == too_old_glibc2.major: + min_minor = too_old_glibc2.minor + else: + # For other glibc major versions oldest supported is (x, 0). + min_minor = -1 + for glibc_minor in range(glibc_max.minor, min_minor, -1): + glibc_version = _GLibCVersion(glibc_max.major, glibc_minor) + tag = "manylinux_{}_{}".format(*glibc_version) + if _is_compatible(arch, glibc_version): + yield f"{tag}_{arch}" + # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags. + if glibc_version in _LEGACY_MANYLINUX_MAP: + legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version] + if _is_compatible(arch, glibc_version): + yield f"{legacy_tag}_{arch}" diff --git a/src/pip/_vendor/packaging/_musllinux.py b/src/pip/_vendor/packaging/_musllinux.py index 8ac3059ba3c..86419df9d70 100644 --- a/src/pip/_vendor/packaging/_musllinux.py +++ b/src/pip/_vendor/packaging/_musllinux.py @@ -4,68 +4,13 @@ linked against musl, and what musl version is used. """ -import contextlib import functools -import operator -import os import re -import struct import subprocess import sys -from typing import IO, Iterator, NamedTuple, Optional, Tuple +from typing import Iterator, NamedTuple, Optional, Sequence - -def _read_unpacked(f: IO[bytes], fmt: str) -> Tuple[int, ...]: - return struct.unpack(fmt, f.read(struct.calcsize(fmt))) - - -def _parse_ld_musl_from_elf(f: IO[bytes]) -> Optional[str]: - """Detect musl libc location by parsing the Python executable. - - Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca - ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html - """ - f.seek(0) - try: - ident = _read_unpacked(f, "16B") - except struct.error: - return None - if ident[:4] != tuple(b"\x7fELF"): # Invalid magic, not ELF. - return None - f.seek(struct.calcsize("HHI"), 1) # Skip file type, machine, and version. - - try: - # e_fmt: Format for program header. - # p_fmt: Format for section header. - # p_idx: Indexes to find p_type, p_offset, and p_filesz. - e_fmt, p_fmt, p_idx = { - 1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)), # 32-bit. - 2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)), # 64-bit. - }[ident[4]] - except KeyError: - return None - else: - p_get = operator.itemgetter(*p_idx) - - # Find the interpreter section and return its content. - try: - _, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt) - except struct.error: - return None - for i in range(e_phnum + 1): - f.seek(e_phoff + e_phentsize * i) - try: - p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt)) - except struct.error: - return None - if p_type != 3: # Not PT_INTERP. - continue - f.seek(p_offset) - interpreter = os.fsdecode(f.read(p_filesz)).strip("\0") - if "musl" not in interpreter: - return None - return interpreter - return None +from ._elffile import ELFFile class _MuslVersion(NamedTuple): @@ -95,32 +40,34 @@ def _get_musl_version(executable: str) -> Optional[_MuslVersion]: Version 1.2.2 Dynamic Program Loader """ - with contextlib.ExitStack() as stack: - try: - f = stack.enter_context(open(executable, "rb")) - except OSError: - return None - ld = _parse_ld_musl_from_elf(f) - if not ld: + try: + with open(executable, "rb") as f: + ld = ELFFile(f).interpreter + except (OSError, TypeError, ValueError): + return None + if ld is None or "musl" not in ld: return None - proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True) + proc = subprocess.run([ld], stderr=subprocess.PIPE, text=True) return _parse_musl_version(proc.stderr) -def platform_tags(arch: str) -> Iterator[str]: +def platform_tags(archs: Sequence[str]) -> Iterator[str]: """Generate musllinux tags compatible to the current platform. - :param arch: Should be the part of platform tag after the ``linux_`` - prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a - prerequisite for the current platform to be musllinux-compatible. + :param archs: Sequence of compatible architectures. + The first one shall be the closest to the actual architecture and be the part of + platform tag after the ``linux_`` prefix, e.g. ``x86_64``. + The ``linux_`` prefix is assumed as a prerequisite for the current platform to + be musllinux-compatible. :returns: An iterator of compatible musllinux tags. """ sys_musl = _get_musl_version(sys.executable) if sys_musl is None: # Python not dynamically linked against musl. return - for minor in range(sys_musl.minor, -1, -1): - yield f"musllinux_{sys_musl.major}_{minor}_{arch}" + for arch in archs: + for minor in range(sys_musl.minor, -1, -1): + yield f"musllinux_{sys_musl.major}_{minor}_{arch}" if __name__ == "__main__": # pragma: no cover diff --git a/src/pip/_vendor/packaging/_parser.py b/src/pip/_vendor/packaging/_parser.py new file mode 100644 index 00000000000..4576981c2dd --- /dev/null +++ b/src/pip/_vendor/packaging/_parser.py @@ -0,0 +1,359 @@ +"""Handwritten parser of dependency specifiers. + +The docstring for each __parse_* function contains ENBF-inspired grammar representing +the implementation. +""" + +import ast +from typing import Any, List, NamedTuple, Optional, Tuple, Union + +from ._tokenizer import DEFAULT_RULES, Tokenizer + + +class Node: + def __init__(self, value: str) -> None: + self.value = value + + def __str__(self) -> str: + return self.value + + def __repr__(self) -> str: + return f"<{self.__class__.__name__}('{self}')>" + + def serialize(self) -> str: + raise NotImplementedError + + +class Variable(Node): + def serialize(self) -> str: + return str(self) + + +class Value(Node): + def serialize(self) -> str: + return f'"{self}"' + + +class Op(Node): + def serialize(self) -> str: + return str(self) + + +MarkerVar = Union[Variable, Value] +MarkerItem = Tuple[MarkerVar, Op, MarkerVar] +# MarkerAtom = Union[MarkerItem, List["MarkerAtom"]] +# MarkerList = List[Union["MarkerList", MarkerAtom, str]] +# mypy does not support recursive type definition +# https://github.com/python/mypy/issues/731 +MarkerAtom = Any +MarkerList = List[Any] + + +class ParsedRequirement(NamedTuple): + name: str + url: str + extras: List[str] + specifier: str + marker: Optional[MarkerList] + + +# -------------------------------------------------------------------------------------- +# Recursive descent parser for dependency specifier +# -------------------------------------------------------------------------------------- +def parse_requirement(source: str) -> ParsedRequirement: + return _parse_requirement(Tokenizer(source, rules=DEFAULT_RULES)) + + +def _parse_requirement(tokenizer: Tokenizer) -> ParsedRequirement: + """ + requirement = WS? IDENTIFIER WS? extras WS? requirement_details + """ + tokenizer.consume("WS") + + name_token = tokenizer.expect( + "IDENTIFIER", expected="package name at the start of dependency specifier" + ) + name = name_token.text + tokenizer.consume("WS") + + extras = _parse_extras(tokenizer) + tokenizer.consume("WS") + + url, specifier, marker = _parse_requirement_details(tokenizer) + tokenizer.expect("END", expected="end of dependency specifier") + + return ParsedRequirement(name, url, extras, specifier, marker) + + +def _parse_requirement_details( + tokenizer: Tokenizer, +) -> Tuple[str, str, Optional[MarkerList]]: + """ + requirement_details = AT URL (WS requirement_marker?)? + | specifier WS? (requirement_marker)? + """ + + specifier = "" + url = "" + marker = None + + if tokenizer.check("AT"): + tokenizer.read() + tokenizer.consume("WS") + + url_start = tokenizer.position + url = tokenizer.expect("URL", expected="URL after @").text + if tokenizer.check("END", peek=True): + return (url, specifier, marker) + + tokenizer.expect("WS", expected="whitespace after URL") + + # The input might end after whitespace. + if tokenizer.check("END", peek=True): + return (url, specifier, marker) + + marker = _parse_requirement_marker( + tokenizer, span_start=url_start, after="URL and whitespace" + ) + else: + specifier_start = tokenizer.position + specifier = _parse_specifier(tokenizer) + tokenizer.consume("WS") + + if tokenizer.check("END", peek=True): + return (url, specifier, marker) + + marker = _parse_requirement_marker( + tokenizer, + span_start=specifier_start, + after=( + "version specifier" + if specifier + else "name and no valid version specifier" + ), + ) + + return (url, specifier, marker) + + +def _parse_requirement_marker( + tokenizer: Tokenizer, *, span_start: int, after: str +) -> MarkerList: + """ + requirement_marker = SEMICOLON marker WS? + """ + + if not tokenizer.check("SEMICOLON"): + tokenizer.raise_syntax_error( + f"Expected end or semicolon (after {after})", + span_start=span_start, + ) + tokenizer.read() + + marker = _parse_marker(tokenizer) + tokenizer.consume("WS") + + return marker + + +def _parse_extras(tokenizer: Tokenizer) -> List[str]: + """ + extras = (LEFT_BRACKET wsp* extras_list? wsp* RIGHT_BRACKET)? + """ + if not tokenizer.check("LEFT_BRACKET", peek=True): + return [] + + with tokenizer.enclosing_tokens( + "LEFT_BRACKET", + "RIGHT_BRACKET", + around="extras", + ): + tokenizer.consume("WS") + extras = _parse_extras_list(tokenizer) + tokenizer.consume("WS") + + return extras + + +def _parse_extras_list(tokenizer: Tokenizer) -> List[str]: + """ + extras_list = identifier (wsp* ',' wsp* identifier)* + """ + extras: List[str] = [] + + if not tokenizer.check("IDENTIFIER"): + return extras + + extras.append(tokenizer.read().text) + + while True: + tokenizer.consume("WS") + if tokenizer.check("IDENTIFIER", peek=True): + tokenizer.raise_syntax_error("Expected comma between extra names") + elif not tokenizer.check("COMMA"): + break + + tokenizer.read() + tokenizer.consume("WS") + + extra_token = tokenizer.expect("IDENTIFIER", expected="extra name after comma") + extras.append(extra_token.text) + + return extras + + +def _parse_specifier(tokenizer: Tokenizer) -> str: + """ + specifier = LEFT_PARENTHESIS WS? version_many WS? RIGHT_PARENTHESIS + | WS? version_many WS? + """ + with tokenizer.enclosing_tokens( + "LEFT_PARENTHESIS", + "RIGHT_PARENTHESIS", + around="version specifier", + ): + tokenizer.consume("WS") + parsed_specifiers = _parse_version_many(tokenizer) + tokenizer.consume("WS") + + return parsed_specifiers + + +def _parse_version_many(tokenizer: Tokenizer) -> str: + """ + version_many = (SPECIFIER (WS? COMMA WS? SPECIFIER)*)? + """ + parsed_specifiers = "" + while tokenizer.check("SPECIFIER"): + span_start = tokenizer.position + parsed_specifiers += tokenizer.read().text + if tokenizer.check("VERSION_PREFIX_TRAIL", peek=True): + tokenizer.raise_syntax_error( + ".* suffix can only be used with `==` or `!=` operators", + span_start=span_start, + span_end=tokenizer.position + 1, + ) + if tokenizer.check("VERSION_LOCAL_LABEL_TRAIL", peek=True): + tokenizer.raise_syntax_error( + "Local version label can only be used with `==` or `!=` operators", + span_start=span_start, + span_end=tokenizer.position, + ) + tokenizer.consume("WS") + if not tokenizer.check("COMMA"): + break + parsed_specifiers += tokenizer.read().text + tokenizer.consume("WS") + + return parsed_specifiers + + +# -------------------------------------------------------------------------------------- +# Recursive descent parser for marker expression +# -------------------------------------------------------------------------------------- +def parse_marker(source: str) -> MarkerList: + return _parse_full_marker(Tokenizer(source, rules=DEFAULT_RULES)) + + +def _parse_full_marker(tokenizer: Tokenizer) -> MarkerList: + retval = _parse_marker(tokenizer) + tokenizer.expect("END", expected="end of marker expression") + return retval + + +def _parse_marker(tokenizer: Tokenizer) -> MarkerList: + """ + marker = marker_atom (BOOLOP marker_atom)+ + """ + expression = [_parse_marker_atom(tokenizer)] + while tokenizer.check("BOOLOP"): + token = tokenizer.read() + expr_right = _parse_marker_atom(tokenizer) + expression.extend((token.text, expr_right)) + return expression + + +def _parse_marker_atom(tokenizer: Tokenizer) -> MarkerAtom: + """ + marker_atom = WS? LEFT_PARENTHESIS WS? marker WS? RIGHT_PARENTHESIS WS? + | WS? marker_item WS? + """ + + tokenizer.consume("WS") + if tokenizer.check("LEFT_PARENTHESIS", peek=True): + with tokenizer.enclosing_tokens( + "LEFT_PARENTHESIS", + "RIGHT_PARENTHESIS", + around="marker expression", + ): + tokenizer.consume("WS") + marker: MarkerAtom = _parse_marker(tokenizer) + tokenizer.consume("WS") + else: + marker = _parse_marker_item(tokenizer) + tokenizer.consume("WS") + return marker + + +def _parse_marker_item(tokenizer: Tokenizer) -> MarkerItem: + """ + marker_item = WS? marker_var WS? marker_op WS? marker_var WS? + """ + tokenizer.consume("WS") + marker_var_left = _parse_marker_var(tokenizer) + tokenizer.consume("WS") + marker_op = _parse_marker_op(tokenizer) + tokenizer.consume("WS") + marker_var_right = _parse_marker_var(tokenizer) + tokenizer.consume("WS") + return (marker_var_left, marker_op, marker_var_right) + + +def _parse_marker_var(tokenizer: Tokenizer) -> MarkerVar: + """ + marker_var = VARIABLE | QUOTED_STRING + """ + if tokenizer.check("VARIABLE"): + return process_env_var(tokenizer.read().text.replace(".", "_")) + elif tokenizer.check("QUOTED_STRING"): + return process_python_str(tokenizer.read().text) + else: + tokenizer.raise_syntax_error( + message="Expected a marker variable or quoted string" + ) + + +def process_env_var(env_var: str) -> Variable: + if ( + env_var == "platform_python_implementation" + or env_var == "python_implementation" + ): + return Variable("platform_python_implementation") + else: + return Variable(env_var) + + +def process_python_str(python_str: str) -> Value: + value = ast.literal_eval(python_str) + return Value(str(value)) + + +def _parse_marker_op(tokenizer: Tokenizer) -> Op: + """ + marker_op = IN | NOT IN | OP + """ + if tokenizer.check("IN"): + tokenizer.read() + return Op("in") + elif tokenizer.check("NOT"): + tokenizer.read() + tokenizer.expect("WS", expected="whitespace after 'not'") + tokenizer.expect("IN", expected="'in' after 'not'") + return Op("not in") + elif tokenizer.check("OP"): + return Op(tokenizer.read().text) + else: + return tokenizer.raise_syntax_error( + "Expected marker operator, one of " + "<=, <, !=, ==, >=, >, ~=, ===, in, not in" + ) diff --git a/src/pip/_vendor/packaging/_tokenizer.py b/src/pip/_vendor/packaging/_tokenizer.py new file mode 100644 index 00000000000..dd0d648d49a --- /dev/null +++ b/src/pip/_vendor/packaging/_tokenizer.py @@ -0,0 +1,192 @@ +import contextlib +import re +from dataclasses import dataclass +from typing import Dict, Iterator, NoReturn, Optional, Tuple, Union + +from .specifiers import Specifier + + +@dataclass +class Token: + name: str + text: str + position: int + + +class ParserSyntaxError(Exception): + """The provided source text could not be parsed correctly.""" + + def __init__( + self, + message: str, + *, + source: str, + span: Tuple[int, int], + ) -> None: + self.span = span + self.message = message + self.source = source + + super().__init__() + + def __str__(self) -> str: + marker = " " * self.span[0] + "~" * (self.span[1] - self.span[0]) + "^" + return "\n ".join([self.message, self.source, marker]) + + +DEFAULT_RULES: "Dict[str, Union[str, re.Pattern[str]]]" = { + "LEFT_PARENTHESIS": r"\(", + "RIGHT_PARENTHESIS": r"\)", + "LEFT_BRACKET": r"\[", + "RIGHT_BRACKET": r"\]", + "SEMICOLON": r";", + "COMMA": r",", + "QUOTED_STRING": re.compile( + r""" + ( + ('[^']*') + | + ("[^"]*") + ) + """, + re.VERBOSE, + ), + "OP": r"(===|==|~=|!=|<=|>=|<|>)", + "BOOLOP": r"\b(or|and)\b", + "IN": r"\bin\b", + "NOT": r"\bnot\b", + "VARIABLE": re.compile( + r""" + \b( + python_version + |python_full_version + |os[._]name + |sys[._]platform + |platform_(release|system) + |platform[._](version|machine|python_implementation) + |python_implementation + |implementation_(name|version) + |extra + )\b + """, + re.VERBOSE, + ), + "SPECIFIER": re.compile( + Specifier._operator_regex_str + Specifier._version_regex_str, + re.VERBOSE | re.IGNORECASE, + ), + "AT": r"\@", + "URL": r"[^ \t]+", + "IDENTIFIER": r"\b[a-zA-Z0-9][a-zA-Z0-9._-]*\b", + "VERSION_PREFIX_TRAIL": r"\.\*", + "VERSION_LOCAL_LABEL_TRAIL": r"\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*", + "WS": r"[ \t]+", + "END": r"$", +} + + +class Tokenizer: + """Context-sensitive token parsing. + + Provides methods to examine the input stream to check whether the next token + matches. + """ + + def __init__( + self, + source: str, + *, + rules: "Dict[str, Union[str, re.Pattern[str]]]", + ) -> None: + self.source = source + self.rules: Dict[str, re.Pattern[str]] = { + name: re.compile(pattern) for name, pattern in rules.items() + } + self.next_token: Optional[Token] = None + self.position = 0 + + def consume(self, name: str) -> None: + """Move beyond provided token name, if at current position.""" + if self.check(name): + self.read() + + def check(self, name: str, *, peek: bool = False) -> bool: + """Check whether the next token has the provided name. + + By default, if the check succeeds, the token *must* be read before + another check. If `peek` is set to `True`, the token is not loaded and + would need to be checked again. + """ + assert ( + self.next_token is None + ), f"Cannot check for {name!r}, already have {self.next_token!r}" + assert name in self.rules, f"Unknown token name: {name!r}" + + expression = self.rules[name] + + match = expression.match(self.source, self.position) + if match is None: + return False + if not peek: + self.next_token = Token(name, match[0], self.position) + return True + + def expect(self, name: str, *, expected: str) -> Token: + """Expect a certain token name next, failing with a syntax error otherwise. + + The token is *not* read. + """ + if not self.check(name): + raise self.raise_syntax_error(f"Expected {expected}") + return self.read() + + def read(self) -> Token: + """Consume the next token and return it.""" + token = self.next_token + assert token is not None + + self.position += len(token.text) + self.next_token = None + + return token + + def raise_syntax_error( + self, + message: str, + *, + span_start: Optional[int] = None, + span_end: Optional[int] = None, + ) -> NoReturn: + """Raise ParserSyntaxError at the given position.""" + span = ( + self.position if span_start is None else span_start, + self.position if span_end is None else span_end, + ) + raise ParserSyntaxError( + message, + source=self.source, + span=span, + ) + + @contextlib.contextmanager + def enclosing_tokens( + self, open_token: str, close_token: str, *, around: str + ) -> Iterator[None]: + if self.check(open_token): + open_position = self.position + self.read() + else: + open_position = None + + yield + + if open_position is None: + return + + if not self.check(close_token): + self.raise_syntax_error( + f"Expected matching {close_token} for {open_token}, after {around}", + span_start=open_position, + ) + + self.read() diff --git a/src/pip/_vendor/packaging/markers.py b/src/pip/_vendor/packaging/markers.py index 540e7a4dc79..8b98fca7233 100644 --- a/src/pip/_vendor/packaging/markers.py +++ b/src/pip/_vendor/packaging/markers.py @@ -8,19 +8,17 @@ import sys from typing import Any, Callable, Dict, List, Optional, Tuple, Union -from pip._vendor.pyparsing import ( # noqa: N817 - Forward, - Group, - Literal as L, - ParseException, - ParseResults, - QuotedString, - ZeroOrMore, - stringEnd, - stringStart, +from ._parser import ( + MarkerAtom, + MarkerList, + Op, + Value, + Variable, + parse_marker as _parse_marker, ) - +from ._tokenizer import ParserSyntaxError from .specifiers import InvalidSpecifier, Specifier +from .utils import canonicalize_name __all__ = [ "InvalidMarker", @@ -52,101 +50,24 @@ class UndefinedEnvironmentName(ValueError): """ -class Node: - def __init__(self, value: Any) -> None: - self.value = value - - def __str__(self) -> str: - return str(self.value) - - def __repr__(self) -> str: - return f"<{self.__class__.__name__}('{self}')>" - - def serialize(self) -> str: - raise NotImplementedError - - -class Variable(Node): - def serialize(self) -> str: - return str(self) - - -class Value(Node): - def serialize(self) -> str: - return f'"{self}"' - - -class Op(Node): - def serialize(self) -> str: - return str(self) - - -VARIABLE = ( - L("implementation_version") - | L("platform_python_implementation") - | L("implementation_name") - | L("python_full_version") - | L("platform_release") - | L("platform_version") - | L("platform_machine") - | L("platform_system") - | L("python_version") - | L("sys_platform") - | L("os_name") - | L("os.name") # PEP-345 - | L("sys.platform") # PEP-345 - | L("platform.version") # PEP-345 - | L("platform.machine") # PEP-345 - | L("platform.python_implementation") # PEP-345 - | L("python_implementation") # undocumented setuptools legacy - | L("extra") # PEP-508 -) -ALIASES = { - "os.name": "os_name", - "sys.platform": "sys_platform", - "platform.version": "platform_version", - "platform.machine": "platform_machine", - "platform.python_implementation": "platform_python_implementation", - "python_implementation": "platform_python_implementation", -} -VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0]))) - -VERSION_CMP = ( - L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<") -) - -MARKER_OP = VERSION_CMP | L("not in") | L("in") -MARKER_OP.setParseAction(lambda s, l, t: Op(t[0])) - -MARKER_VALUE = QuotedString("'") | QuotedString('"') -MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0])) - -BOOLOP = L("and") | L("or") - -MARKER_VAR = VARIABLE | MARKER_VALUE - -MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR) -MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0])) - -LPAREN = L("(").suppress() -RPAREN = L(")").suppress() - -MARKER_EXPR = Forward() -MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN) -MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR) - -MARKER = stringStart + MARKER_EXPR + stringEnd - - -def _coerce_parse_result(results: Union[ParseResults, List[Any]]) -> List[Any]: - if isinstance(results, ParseResults): - return [_coerce_parse_result(i) for i in results] - else: - return results +def _normalize_extra_values(results: Any) -> Any: + """ + Normalize extra values. + """ + if isinstance(results[0], tuple): + lhs, op, rhs = results[0] + if isinstance(lhs, Variable) and lhs.value == "extra": + normalized_extra = canonicalize_name(rhs.value) + rhs = Value(normalized_extra) + elif isinstance(rhs, Variable) and rhs.value == "extra": + normalized_extra = canonicalize_name(lhs.value) + lhs = Value(normalized_extra) + results[0] = lhs, op, rhs + return results def _format_marker( - marker: Union[List[str], Tuple[Node, ...], str], first: Optional[bool] = True + marker: Union[List[str], MarkerAtom, str], first: Optional[bool] = True ) -> str: assert isinstance(marker, (list, tuple, str)) @@ -192,7 +113,7 @@ def _eval_op(lhs: str, op: Op, rhs: str) -> bool: except InvalidSpecifier: pass else: - return spec.contains(lhs) + return spec.contains(lhs, prereleases=True) oper: Optional[Operator] = _operators.get(op.serialize()) if oper is None: @@ -201,25 +122,19 @@ def _eval_op(lhs: str, op: Op, rhs: str) -> bool: return oper(lhs, rhs) -class Undefined: - pass - +def _normalize(*values: str, key: str) -> Tuple[str, ...]: + # PEP 685 – Comparison of extra names for optional distribution dependencies + # https://peps.python.org/pep-0685/ + # > When comparing extra names, tools MUST normalize the names being + # > compared using the semantics outlined in PEP 503 for names + if key == "extra": + return tuple(canonicalize_name(v) for v in values) -_undefined = Undefined() + # other environment markers don't have such standards + return values -def _get_env(environment: Dict[str, str], name: str) -> str: - value: Union[str, Undefined] = environment.get(name, _undefined) - - if isinstance(value, Undefined): - raise UndefinedEnvironmentName( - f"{name!r} does not exist in evaluation environment." - ) - - return value - - -def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool: +def _evaluate_markers(markers: MarkerList, environment: Dict[str, str]) -> bool: groups: List[List[bool]] = [[]] for marker in markers: @@ -231,12 +146,15 @@ def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool: lhs, op, rhs = marker if isinstance(lhs, Variable): - lhs_value = _get_env(environment, lhs.value) + environment_key = lhs.value + lhs_value = environment[environment_key] rhs_value = rhs.value else: lhs_value = lhs.value - rhs_value = _get_env(environment, rhs.value) + environment_key = rhs.value + rhs_value = environment[environment_key] + lhs_value, rhs_value = _normalize(lhs_value, rhs_value, key=environment_key) groups[-1].append(_eval_op(lhs_value, op, rhs_value)) else: assert marker in ["and", "or"] @@ -274,13 +192,29 @@ def default_environment() -> Dict[str, str]: class Marker: def __init__(self, marker: str) -> None: + # Note: We create a Marker object without calling this constructor in + # packaging.requirements.Requirement. If any additional logic is + # added here, make sure to mirror/adapt Requirement. try: - self._markers = _coerce_parse_result(MARKER.parseString(marker)) - except ParseException as e: - raise InvalidMarker( - f"Invalid marker: {marker!r}, parse error at " - f"{marker[e.loc : e.loc + 8]!r}" - ) + self._markers = _normalize_extra_values(_parse_marker(marker)) + # The attribute `_markers` can be described in terms of a recursive type: + # MarkerList = List[Union[Tuple[Node, ...], str, MarkerList]] + # + # For example, the following expression: + # python_version > "3.6" or (python_version == "3.6" and os_name == "unix") + # + # is parsed into: + # [ + # (, ')>, ), + # 'and', + # [ + # (, , ), + # 'or', + # (, , ) + # ] + # ] + except ParserSyntaxError as e: + raise InvalidMarker(str(e)) from e def __str__(self) -> str: return _format_marker(self._markers) @@ -288,6 +222,15 @@ def __str__(self) -> str: def __repr__(self) -> str: return f"" + def __hash__(self) -> int: + return hash((self.__class__.__name__, str(self))) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, Marker): + return NotImplemented + + return str(self) == str(other) + def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool: """Evaluate a marker. @@ -298,7 +241,12 @@ def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool: The environment is determined from the current Python process. """ current_environment = default_environment() + current_environment["extra"] = "" if environment is not None: current_environment.update(environment) + # The API used to allow setting extra to None. We need to handle this + # case for backwards compatibility. + if current_environment["extra"] is None: + current_environment["extra"] = "" return _evaluate_markers(self._markers, current_environment) diff --git a/src/pip/_vendor/packaging/metadata.py b/src/pip/_vendor/packaging/metadata.py new file mode 100644 index 00000000000..87763455ab1 --- /dev/null +++ b/src/pip/_vendor/packaging/metadata.py @@ -0,0 +1,822 @@ +import email.feedparser +import email.header +import email.message +import email.parser +import email.policy +import sys +import typing +from typing import ( + Any, + Callable, + Dict, + Generic, + List, + Optional, + Tuple, + Type, + Union, + cast, +) + +from . import requirements, specifiers, utils, version as version_module + +T = typing.TypeVar("T") +if sys.version_info[:2] >= (3, 8): # pragma: no cover + from typing import Literal, TypedDict +else: # pragma: no cover + if typing.TYPE_CHECKING: + from pip._vendor.typing_extensions import Literal, TypedDict + else: + try: + from pip._vendor.typing_extensions import Literal, TypedDict + except ImportError: + + class Literal: + def __init_subclass__(*_args, **_kwargs): + pass + + class TypedDict: + def __init_subclass__(*_args, **_kwargs): + pass + + +try: + ExceptionGroup = __builtins__.ExceptionGroup # type: ignore[attr-defined] +except AttributeError: + + class ExceptionGroup(Exception): # type: ignore[no-redef] # noqa: N818 + """A minimal implementation of :external:exc:`ExceptionGroup` from Python 3.11. + + If :external:exc:`ExceptionGroup` is already defined by Python itself, + that version is used instead. + """ + + message: str + exceptions: List[Exception] + + def __init__(self, message: str, exceptions: List[Exception]) -> None: + self.message = message + self.exceptions = exceptions + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.message!r}, {self.exceptions!r})" + + +class InvalidMetadata(ValueError): + """A metadata field contains invalid data.""" + + field: str + """The name of the field that contains invalid data.""" + + def __init__(self, field: str, message: str) -> None: + self.field = field + super().__init__(message) + + +# The RawMetadata class attempts to make as few assumptions about the underlying +# serialization formats as possible. The idea is that as long as a serialization +# formats offer some very basic primitives in *some* way then we can support +# serializing to and from that format. +class RawMetadata(TypedDict, total=False): + """A dictionary of raw core metadata. + + Each field in core metadata maps to a key of this dictionary (when data is + provided). The key is lower-case and underscores are used instead of dashes + compared to the equivalent core metadata field. Any core metadata field that + can be specified multiple times or can hold multiple values in a single + field have a key with a plural name. See :class:`Metadata` whose attributes + match the keys of this dictionary. + + Core metadata fields that can be specified multiple times are stored as a + list or dict depending on which is appropriate for the field. Any fields + which hold multiple values in a single field are stored as a list. + + """ + + # Metadata 1.0 - PEP 241 + metadata_version: str + name: str + version: str + platforms: List[str] + summary: str + description: str + keywords: List[str] + home_page: str + author: str + author_email: str + license: str + + # Metadata 1.1 - PEP 314 + supported_platforms: List[str] + download_url: str + classifiers: List[str] + requires: List[str] + provides: List[str] + obsoletes: List[str] + + # Metadata 1.2 - PEP 345 + maintainer: str + maintainer_email: str + requires_dist: List[str] + provides_dist: List[str] + obsoletes_dist: List[str] + requires_python: str + requires_external: List[str] + project_urls: Dict[str, str] + + # Metadata 2.0 + # PEP 426 attempted to completely revamp the metadata format + # but got stuck without ever being able to build consensus on + # it and ultimately ended up withdrawn. + # + # However, a number of tools had started emitting METADATA with + # `2.0` Metadata-Version, so for historical reasons, this version + # was skipped. + + # Metadata 2.1 - PEP 566 + description_content_type: str + provides_extra: List[str] + + # Metadata 2.2 - PEP 643 + dynamic: List[str] + + # Metadata 2.3 - PEP 685 + # No new fields were added in PEP 685, just some edge case were + # tightened up to provide better interoptability. + + +_STRING_FIELDS = { + "author", + "author_email", + "description", + "description_content_type", + "download_url", + "home_page", + "license", + "maintainer", + "maintainer_email", + "metadata_version", + "name", + "requires_python", + "summary", + "version", +} + +_LIST_FIELDS = { + "classifiers", + "dynamic", + "obsoletes", + "obsoletes_dist", + "platforms", + "provides", + "provides_dist", + "provides_extra", + "requires", + "requires_dist", + "requires_external", + "supported_platforms", +} + +_DICT_FIELDS = { + "project_urls", +} + + +def _parse_keywords(data: str) -> List[str]: + """Split a string of comma-separate keyboards into a list of keywords.""" + return [k.strip() for k in data.split(",")] + + +def _parse_project_urls(data: List[str]) -> Dict[str, str]: + """Parse a list of label/URL string pairings separated by a comma.""" + urls = {} + for pair in data: + # Our logic is slightly tricky here as we want to try and do + # *something* reasonable with malformed data. + # + # The main thing that we have to worry about, is data that does + # not have a ',' at all to split the label from the Value. There + # isn't a singular right answer here, and we will fail validation + # later on (if the caller is validating) so it doesn't *really* + # matter, but since the missing value has to be an empty str + # and our return value is dict[str, str], if we let the key + # be the missing value, then they'd have multiple '' values that + # overwrite each other in a accumulating dict. + # + # The other potentional issue is that it's possible to have the + # same label multiple times in the metadata, with no solid "right" + # answer with what to do in that case. As such, we'll do the only + # thing we can, which is treat the field as unparseable and add it + # to our list of unparsed fields. + parts = [p.strip() for p in pair.split(",", 1)] + parts.extend([""] * (max(0, 2 - len(parts)))) # Ensure 2 items + + # TODO: The spec doesn't say anything about if the keys should be + # considered case sensitive or not... logically they should + # be case-preserving and case-insensitive, but doing that + # would open up more cases where we might have duplicate + # entries. + label, url = parts + if label in urls: + # The label already exists in our set of urls, so this field + # is unparseable, and we can just add the whole thing to our + # unparseable data and stop processing it. + raise KeyError("duplicate labels in project urls") + urls[label] = url + + return urls + + +def _get_payload(msg: email.message.Message, source: Union[bytes, str]) -> str: + """Get the body of the message.""" + # If our source is a str, then our caller has managed encodings for us, + # and we don't need to deal with it. + if isinstance(source, str): + payload: str = msg.get_payload() + return payload + # If our source is a bytes, then we're managing the encoding and we need + # to deal with it. + else: + bpayload: bytes = msg.get_payload(decode=True) + try: + return bpayload.decode("utf8", "strict") + except UnicodeDecodeError: + raise ValueError("payload in an invalid encoding") + + +# The various parse_FORMAT functions here are intended to be as lenient as +# possible in their parsing, while still returning a correctly typed +# RawMetadata. +# +# To aid in this, we also generally want to do as little touching of the +# data as possible, except where there are possibly some historic holdovers +# that make valid data awkward to work with. +# +# While this is a lower level, intermediate format than our ``Metadata`` +# class, some light touch ups can make a massive difference in usability. + +# Map METADATA fields to RawMetadata. +_EMAIL_TO_RAW_MAPPING = { + "author": "author", + "author-email": "author_email", + "classifier": "classifiers", + "description": "description", + "description-content-type": "description_content_type", + "download-url": "download_url", + "dynamic": "dynamic", + "home-page": "home_page", + "keywords": "keywords", + "license": "license", + "maintainer": "maintainer", + "maintainer-email": "maintainer_email", + "metadata-version": "metadata_version", + "name": "name", + "obsoletes": "obsoletes", + "obsoletes-dist": "obsoletes_dist", + "platform": "platforms", + "project-url": "project_urls", + "provides": "provides", + "provides-dist": "provides_dist", + "provides-extra": "provides_extra", + "requires": "requires", + "requires-dist": "requires_dist", + "requires-external": "requires_external", + "requires-python": "requires_python", + "summary": "summary", + "supported-platform": "supported_platforms", + "version": "version", +} +_RAW_TO_EMAIL_MAPPING = {raw: email for email, raw in _EMAIL_TO_RAW_MAPPING.items()} + + +def parse_email(data: Union[bytes, str]) -> Tuple[RawMetadata, Dict[str, List[str]]]: + """Parse a distribution's metadata stored as email headers (e.g. from ``METADATA``). + + This function returns a two-item tuple of dicts. The first dict is of + recognized fields from the core metadata specification. Fields that can be + parsed and translated into Python's built-in types are converted + appropriately. All other fields are left as-is. Fields that are allowed to + appear multiple times are stored as lists. + + The second dict contains all other fields from the metadata. This includes + any unrecognized fields. It also includes any fields which are expected to + be parsed into a built-in type but were not formatted appropriately. Finally, + any fields that are expected to appear only once but are repeated are + included in this dict. + + """ + raw: Dict[str, Union[str, List[str], Dict[str, str]]] = {} + unparsed: Dict[str, List[str]] = {} + + if isinstance(data, str): + parsed = email.parser.Parser(policy=email.policy.compat32).parsestr(data) + else: + parsed = email.parser.BytesParser(policy=email.policy.compat32).parsebytes(data) + + # We have to wrap parsed.keys() in a set, because in the case of multiple + # values for a key (a list), the key will appear multiple times in the + # list of keys, but we're avoiding that by using get_all(). + for name in frozenset(parsed.keys()): + # Header names in RFC are case insensitive, so we'll normalize to all + # lower case to make comparisons easier. + name = name.lower() + + # We use get_all() here, even for fields that aren't multiple use, + # because otherwise someone could have e.g. two Name fields, and we + # would just silently ignore it rather than doing something about it. + headers = parsed.get_all(name) or [] + + # The way the email module works when parsing bytes is that it + # unconditionally decodes the bytes as ascii using the surrogateescape + # handler. When you pull that data back out (such as with get_all() ), + # it looks to see if the str has any surrogate escapes, and if it does + # it wraps it in a Header object instead of returning the string. + # + # As such, we'll look for those Header objects, and fix up the encoding. + value = [] + # Flag if we have run into any issues processing the headers, thus + # signalling that the data belongs in 'unparsed'. + valid_encoding = True + for h in headers: + # It's unclear if this can return more types than just a Header or + # a str, so we'll just assert here to make sure. + assert isinstance(h, (email.header.Header, str)) + + # If it's a header object, we need to do our little dance to get + # the real data out of it. In cases where there is invalid data + # we're going to end up with mojibake, but there's no obvious, good + # way around that without reimplementing parts of the Header object + # ourselves. + # + # That should be fine since, if mojibacked happens, this key is + # going into the unparsed dict anyways. + if isinstance(h, email.header.Header): + # The Header object stores it's data as chunks, and each chunk + # can be independently encoded, so we'll need to check each + # of them. + chunks: List[Tuple[bytes, Optional[str]]] = [] + for bin, encoding in email.header.decode_header(h): + try: + bin.decode("utf8", "strict") + except UnicodeDecodeError: + # Enable mojibake. + encoding = "latin1" + valid_encoding = False + else: + encoding = "utf8" + chunks.append((bin, encoding)) + + # Turn our chunks back into a Header object, then let that + # Header object do the right thing to turn them into a + # string for us. + value.append(str(email.header.make_header(chunks))) + # This is already a string, so just add it. + else: + value.append(h) + + # We've processed all of our values to get them into a list of str, + # but we may have mojibake data, in which case this is an unparsed + # field. + if not valid_encoding: + unparsed[name] = value + continue + + raw_name = _EMAIL_TO_RAW_MAPPING.get(name) + if raw_name is None: + # This is a bit of a weird situation, we've encountered a key that + # we don't know what it means, so we don't know whether it's meant + # to be a list or not. + # + # Since we can't really tell one way or another, we'll just leave it + # as a list, even though it may be a single item list, because that's + # what makes the most sense for email headers. + unparsed[name] = value + continue + + # If this is one of our string fields, then we'll check to see if our + # value is a list of a single item. If it is then we'll assume that + # it was emitted as a single string, and unwrap the str from inside + # the list. + # + # If it's any other kind of data, then we haven't the faintest clue + # what we should parse it as, and we have to just add it to our list + # of unparsed stuff. + if raw_name in _STRING_FIELDS and len(value) == 1: + raw[raw_name] = value[0] + # If this is one of our list of string fields, then we can just assign + # the value, since email *only* has strings, and our get_all() call + # above ensures that this is a list. + elif raw_name in _LIST_FIELDS: + raw[raw_name] = value + # Special Case: Keywords + # The keywords field is implemented in the metadata spec as a str, + # but it conceptually is a list of strings, and is serialized using + # ", ".join(keywords), so we'll do some light data massaging to turn + # this into what it logically is. + elif raw_name == "keywords" and len(value) == 1: + raw[raw_name] = _parse_keywords(value[0]) + # Special Case: Project-URL + # The project urls is implemented in the metadata spec as a list of + # specially-formatted strings that represent a key and a value, which + # is fundamentally a mapping, however the email format doesn't support + # mappings in a sane way, so it was crammed into a list of strings + # instead. + # + # We will do a little light data massaging to turn this into a map as + # it logically should be. + elif raw_name == "project_urls": + try: + raw[raw_name] = _parse_project_urls(value) + except KeyError: + unparsed[name] = value + # Nothing that we've done has managed to parse this, so it'll just + # throw it in our unparseable data and move on. + else: + unparsed[name] = value + + # We need to support getting the Description from the message payload in + # addition to getting it from the the headers. This does mean, though, there + # is the possibility of it being set both ways, in which case we put both + # in 'unparsed' since we don't know which is right. + try: + payload = _get_payload(parsed, data) + except ValueError: + unparsed.setdefault("description", []).append( + parsed.get_payload(decode=isinstance(data, bytes)) + ) + else: + if payload: + # Check to see if we've already got a description, if so then both + # it, and this body move to unparseable. + if "description" in raw: + description_header = cast(str, raw.pop("description")) + unparsed.setdefault("description", []).extend( + [description_header, payload] + ) + elif "description" in unparsed: + unparsed["description"].append(payload) + else: + raw["description"] = payload + + # We need to cast our `raw` to a metadata, because a TypedDict only support + # literal key names, but we're computing our key names on purpose, but the + # way this function is implemented, our `TypedDict` can only have valid key + # names. + return cast(RawMetadata, raw), unparsed + + +_NOT_FOUND = object() + + +# Keep the two values in sync. +_VALID_METADATA_VERSIONS = ["1.0", "1.1", "1.2", "2.1", "2.2", "2.3"] +_MetadataVersion = Literal["1.0", "1.1", "1.2", "2.1", "2.2", "2.3"] + +_REQUIRED_ATTRS = frozenset(["metadata_version", "name", "version"]) + + +class _Validator(Generic[T]): + """Validate a metadata field. + + All _process_*() methods correspond to a core metadata field. The method is + called with the field's raw value. If the raw value is valid it is returned + in its "enriched" form (e.g. ``version.Version`` for the ``Version`` field). + If the raw value is invalid, :exc:`InvalidMetadata` is raised (with a cause + as appropriate). + """ + + name: str + raw_name: str + added: _MetadataVersion + + def __init__( + self, + *, + added: _MetadataVersion = "1.0", + ) -> None: + self.added = added + + def __set_name__(self, _owner: "Metadata", name: str) -> None: + self.name = name + self.raw_name = _RAW_TO_EMAIL_MAPPING[name] + + def __get__(self, instance: "Metadata", _owner: Type["Metadata"]) -> T: + # With Python 3.8, the caching can be replaced with functools.cached_property(). + # No need to check the cache as attribute lookup will resolve into the + # instance's __dict__ before __get__ is called. + cache = instance.__dict__ + try: + value = instance._raw[self.name] # type: ignore[literal-required] + except KeyError: + if self.name in _STRING_FIELDS: + value = "" + elif self.name in _LIST_FIELDS: + value = [] + elif self.name in _DICT_FIELDS: + value = {} + else: # pragma: no cover + assert False + + try: + converter: Callable[[Any], T] = getattr(self, f"_process_{self.name}") + except AttributeError: + pass + else: + value = converter(value) + + cache[self.name] = value + try: + del instance._raw[self.name] # type: ignore[misc] + except KeyError: + pass + + return cast(T, value) + + def _invalid_metadata( + self, msg: str, cause: Optional[Exception] = None + ) -> InvalidMetadata: + exc = InvalidMetadata( + self.raw_name, msg.format_map({"field": repr(self.raw_name)}) + ) + exc.__cause__ = cause + return exc + + def _process_metadata_version(self, value: str) -> _MetadataVersion: + # Implicitly makes Metadata-Version required. + if value not in _VALID_METADATA_VERSIONS: + raise self._invalid_metadata(f"{value!r} is not a valid metadata version") + return cast(_MetadataVersion, value) + + def _process_name(self, value: str) -> str: + if not value: + raise self._invalid_metadata("{field} is a required field") + # Validate the name as a side-effect. + try: + utils.canonicalize_name(value, validate=True) + except utils.InvalidName as exc: + raise self._invalid_metadata( + f"{value!r} is invalid for {{field}}", cause=exc + ) + else: + return value + + def _process_version(self, value: str) -> version_module.Version: + if not value: + raise self._invalid_metadata("{field} is a required field") + try: + return version_module.parse(value) + except version_module.InvalidVersion as exc: + raise self._invalid_metadata( + f"{value!r} is invalid for {{field}}", cause=exc + ) + + def _process_summary(self, value: str) -> str: + """Check the field contains no newlines.""" + if "\n" in value: + raise self._invalid_metadata("{field} must be a single line") + return value + + def _process_description_content_type(self, value: str) -> str: + content_types = {"text/plain", "text/x-rst", "text/markdown"} + message = email.message.EmailMessage() + message["content-type"] = value + + content_type, parameters = ( + # Defaults to `text/plain` if parsing failed. + message.get_content_type().lower(), + message["content-type"].params, + ) + # Check if content-type is valid or defaulted to `text/plain` and thus was + # not parseable. + if content_type not in content_types or content_type not in value.lower(): + raise self._invalid_metadata( + f"{{field}} must be one of {list(content_types)}, not {value!r}" + ) + + charset = parameters.get("charset", "UTF-8") + if charset != "UTF-8": + raise self._invalid_metadata( + f"{{field}} can only specify the UTF-8 charset, not {list(charset)}" + ) + + markdown_variants = {"GFM", "CommonMark"} + variant = parameters.get("variant", "GFM") # Use an acceptable default. + if content_type == "text/markdown" and variant not in markdown_variants: + raise self._invalid_metadata( + f"valid Markdown variants for {{field}} are {list(markdown_variants)}, " + f"not {variant!r}", + ) + return value + + def _process_dynamic(self, value: List[str]) -> List[str]: + for dynamic_field in map(str.lower, value): + if dynamic_field in {"name", "version", "metadata-version"}: + raise self._invalid_metadata( + f"{value!r} is not allowed as a dynamic field" + ) + elif dynamic_field not in _EMAIL_TO_RAW_MAPPING: + raise self._invalid_metadata(f"{value!r} is not a valid dynamic field") + return list(map(str.lower, value)) + + def _process_provides_extra( + self, + value: List[str], + ) -> List[utils.NormalizedName]: + normalized_names = [] + try: + for name in value: + normalized_names.append(utils.canonicalize_name(name, validate=True)) + except utils.InvalidName as exc: + raise self._invalid_metadata( + f"{name!r} is invalid for {{field}}", cause=exc + ) + else: + return normalized_names + + def _process_requires_python(self, value: str) -> specifiers.SpecifierSet: + try: + return specifiers.SpecifierSet(value) + except specifiers.InvalidSpecifier as exc: + raise self._invalid_metadata( + f"{value!r} is invalid for {{field}}", cause=exc + ) + + def _process_requires_dist( + self, + value: List[str], + ) -> List[requirements.Requirement]: + reqs = [] + try: + for req in value: + reqs.append(requirements.Requirement(req)) + except requirements.InvalidRequirement as exc: + raise self._invalid_metadata(f"{req!r} is invalid for {{field}}", cause=exc) + else: + return reqs + + +class Metadata: + """Representation of distribution metadata. + + Compared to :class:`RawMetadata`, this class provides objects representing + metadata fields instead of only using built-in types. Any invalid metadata + will cause :exc:`InvalidMetadata` to be raised (with a + :py:attr:`~BaseException.__cause__` attribute as appropriate). + """ + + _raw: RawMetadata + + @classmethod + def from_raw(cls, data: RawMetadata, *, validate: bool = True) -> "Metadata": + """Create an instance from :class:`RawMetadata`. + + If *validate* is true, all metadata will be validated. All exceptions + related to validation will be gathered and raised as an :class:`ExceptionGroup`. + """ + ins = cls() + ins._raw = data.copy() # Mutations occur due to caching enriched values. + + if validate: + exceptions: List[InvalidMetadata] = [] + try: + metadata_version = ins.metadata_version + metadata_age = _VALID_METADATA_VERSIONS.index(metadata_version) + except InvalidMetadata as metadata_version_exc: + exceptions.append(metadata_version_exc) + metadata_version = None + + # Make sure to check for the fields that are present, the required + # fields (so their absence can be reported). + fields_to_check = frozenset(ins._raw) | _REQUIRED_ATTRS + # Remove fields that have already been checked. + fields_to_check -= {"metadata_version"} + + for key in fields_to_check: + try: + if metadata_version: + # Can't use getattr() as that triggers descriptor protocol which + # will fail due to no value for the instance argument. + try: + field_metadata_version = cls.__dict__[key].added + except KeyError: + exc = InvalidMetadata(key, f"unrecognized field: {key!r}") + exceptions.append(exc) + continue + field_age = _VALID_METADATA_VERSIONS.index( + field_metadata_version + ) + if field_age > metadata_age: + field = _RAW_TO_EMAIL_MAPPING[key] + exc = InvalidMetadata( + field, + "{field} introduced in metadata version " + "{field_metadata_version}, not {metadata_version}", + ) + exceptions.append(exc) + continue + getattr(ins, key) + except InvalidMetadata as exc: + exceptions.append(exc) + + if exceptions: + raise ExceptionGroup("invalid metadata", exceptions) + + return ins + + @classmethod + def from_email( + cls, data: Union[bytes, str], *, validate: bool = True + ) -> "Metadata": + """Parse metadata from email headers. + + If *validate* is true, the metadata will be validated. All exceptions + related to validation will be gathered and raised as an :class:`ExceptionGroup`. + """ + exceptions: list[InvalidMetadata] = [] + raw, unparsed = parse_email(data) + + if validate: + for unparsed_key in unparsed: + if unparsed_key in _EMAIL_TO_RAW_MAPPING: + message = f"{unparsed_key!r} has invalid data" + else: + message = f"unrecognized field: {unparsed_key!r}" + exceptions.append(InvalidMetadata(unparsed_key, message)) + + if exceptions: + raise ExceptionGroup("unparsed", exceptions) + + try: + return cls.from_raw(raw, validate=validate) + except ExceptionGroup as exc_group: + exceptions.extend(exc_group.exceptions) + raise ExceptionGroup("invalid or unparsed metadata", exceptions) from None + + metadata_version: _Validator[_MetadataVersion] = _Validator() + """:external:ref:`core-metadata-metadata-version` + (required; validated to be a valid metadata version)""" + name: _Validator[str] = _Validator() + """:external:ref:`core-metadata-name` + (required; validated using :func:`~packaging.utils.canonicalize_name` and its + *validate* parameter)""" + version: _Validator[version_module.Version] = _Validator() + """:external:ref:`core-metadata-version` (required)""" + dynamic: _Validator[List[str]] = _Validator( + added="2.2", + ) + """:external:ref:`core-metadata-dynamic` + (validated against core metadata field names and lowercased)""" + platforms: _Validator[List[str]] = _Validator() + """:external:ref:`core-metadata-platform`""" + supported_platforms: _Validator[List[str]] = _Validator(added="1.1") + """:external:ref:`core-metadata-supported-platform`""" + summary: _Validator[str] = _Validator() + """:external:ref:`core-metadata-summary` (validated to contain no newlines)""" + description: _Validator[str] = _Validator() # TODO 2.1: can be in body + """:external:ref:`core-metadata-description`""" + description_content_type: _Validator[str] = _Validator(added="2.1") + """:external:ref:`core-metadata-description-content-type` (validated)""" + keywords: _Validator[List[str]] = _Validator() + """:external:ref:`core-metadata-keywords`""" + home_page: _Validator[str] = _Validator() + """:external:ref:`core-metadata-home-page`""" + download_url: _Validator[str] = _Validator(added="1.1") + """:external:ref:`core-metadata-download-url`""" + author: _Validator[str] = _Validator() + """:external:ref:`core-metadata-author`""" + author_email: _Validator[str] = _Validator() + """:external:ref:`core-metadata-author-email`""" + maintainer: _Validator[str] = _Validator(added="1.2") + """:external:ref:`core-metadata-maintainer`""" + maintainer_email: _Validator[str] = _Validator(added="1.2") + """:external:ref:`core-metadata-maintainer-email`""" + license: _Validator[str] = _Validator() + """:external:ref:`core-metadata-license`""" + classifiers: _Validator[List[str]] = _Validator(added="1.1") + """:external:ref:`core-metadata-classifier`""" + requires_dist: _Validator[List[requirements.Requirement]] = _Validator(added="1.2") + """:external:ref:`core-metadata-requires-dist`""" + requires_python: _Validator[specifiers.SpecifierSet] = _Validator(added="1.2") + """:external:ref:`core-metadata-requires-python`""" + # Because `Requires-External` allows for non-PEP 440 version specifiers, we + # don't do any processing on the values. + requires_external: _Validator[List[str]] = _Validator(added="1.2") + """:external:ref:`core-metadata-requires-external`""" + project_urls: _Validator[Dict[str, str]] = _Validator(added="1.2") + """:external:ref:`core-metadata-project-url`""" + # PEP 685 lets us raise an error if an extra doesn't pass `Name` validation + # regardless of metadata version. + provides_extra: _Validator[List[utils.NormalizedName]] = _Validator( + added="2.1", + ) + """:external:ref:`core-metadata-provides-extra`""" + provides_dist: _Validator[List[str]] = _Validator(added="1.2") + """:external:ref:`core-metadata-provides-dist`""" + obsoletes_dist: _Validator[List[str]] = _Validator(added="1.2") + """:external:ref:`core-metadata-obsoletes-dist`""" + requires: _Validator[List[str]] = _Validator(added="1.1") + """``Requires`` (deprecated)""" + provides: _Validator[List[str]] = _Validator(added="1.1") + """``Provides`` (deprecated)""" + obsoletes: _Validator[List[str]] = _Validator(added="1.1") + """``Obsoletes`` (deprecated)""" diff --git a/src/pip/_vendor/packaging/requirements.py b/src/pip/_vendor/packaging/requirements.py index 1eab7dd66d9..0c00eba331b 100644 --- a/src/pip/_vendor/packaging/requirements.py +++ b/src/pip/_vendor/packaging/requirements.py @@ -2,26 +2,13 @@ # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. -import re -import string -import urllib.parse -from typing import List, Optional as TOptional, Set - -from pip._vendor.pyparsing import ( # noqa - Combine, - Literal as L, - Optional, - ParseException, - Regex, - Word, - ZeroOrMore, - originalTextFor, - stringEnd, - stringStart, -) - -from .markers import MARKER_EXPR, Marker -from .specifiers import LegacySpecifier, Specifier, SpecifierSet +from typing import Any, Iterator, Optional, Set + +from ._parser import parse_requirement as _parse_requirement +from ._tokenizer import ParserSyntaxError +from .markers import Marker, _normalize_extra_values +from .specifiers import SpecifierSet +from .utils import canonicalize_name class InvalidRequirement(ValueError): @@ -30,60 +17,6 @@ class InvalidRequirement(ValueError): """ -ALPHANUM = Word(string.ascii_letters + string.digits) - -LBRACKET = L("[").suppress() -RBRACKET = L("]").suppress() -LPAREN = L("(").suppress() -RPAREN = L(")").suppress() -COMMA = L(",").suppress() -SEMICOLON = L(";").suppress() -AT = L("@").suppress() - -PUNCTUATION = Word("-_.") -IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM) -IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END)) - -NAME = IDENTIFIER("name") -EXTRA = IDENTIFIER - -URI = Regex(r"[^ ]+")("url") -URL = AT + URI - -EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA) -EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras") - -VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE) -VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE) - -VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY -VERSION_MANY = Combine( - VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False -)("_raw_spec") -_VERSION_SPEC = Optional((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY) -_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "") - -VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier") -VERSION_SPEC.setParseAction(lambda s, l, t: t[1]) - -MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker") -MARKER_EXPR.setParseAction( - lambda s, l, t: Marker(s[t._original_start : t._original_end]) -) -MARKER_SEPARATOR = SEMICOLON -MARKER = MARKER_SEPARATOR + MARKER_EXPR - -VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER) -URL_AND_MARKER = URL + Optional(MARKER) - -NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER) - -REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd -# pyparsing isn't thread safe during initialization, so we do it eagerly, see -# issue #104 -REQUIREMENT.parseString("x[]") - - class Requirement: """Parse a requirement. @@ -99,48 +32,59 @@ class Requirement: def __init__(self, requirement_string: str) -> None: try: - req = REQUIREMENT.parseString(requirement_string) - except ParseException as e: - raise InvalidRequirement( - f'Parse error at "{ requirement_string[e.loc : e.loc + 8]!r}": {e.msg}' - ) - - self.name: str = req.name - if req.url: - parsed_url = urllib.parse.urlparse(req.url) - if parsed_url.scheme == "file": - if urllib.parse.urlunparse(parsed_url) != req.url: - raise InvalidRequirement("Invalid URL given") - elif not (parsed_url.scheme and parsed_url.netloc) or ( - not parsed_url.scheme and not parsed_url.netloc - ): - raise InvalidRequirement(f"Invalid URL: {req.url}") - self.url: TOptional[str] = req.url - else: - self.url = None - self.extras: Set[str] = set(req.extras.asList() if req.extras else []) - self.specifier: SpecifierSet = SpecifierSet(req.specifier) - self.marker: TOptional[Marker] = req.marker if req.marker else None - - def __str__(self) -> str: - parts: List[str] = [self.name] + parsed = _parse_requirement(requirement_string) + except ParserSyntaxError as e: + raise InvalidRequirement(str(e)) from e + + self.name: str = parsed.name + self.url: Optional[str] = parsed.url or None + self.extras: Set[str] = set(parsed.extras if parsed.extras else []) + self.specifier: SpecifierSet = SpecifierSet(parsed.specifier) + self.marker: Optional[Marker] = None + if parsed.marker is not None: + self.marker = Marker.__new__(Marker) + self.marker._markers = _normalize_extra_values(parsed.marker) + + def _iter_parts(self, name: str) -> Iterator[str]: + yield name if self.extras: formatted_extras = ",".join(sorted(self.extras)) - parts.append(f"[{formatted_extras}]") + yield f"[{formatted_extras}]" if self.specifier: - parts.append(str(self.specifier)) + yield str(self.specifier) if self.url: - parts.append(f"@ {self.url}") + yield f"@ {self.url}" if self.marker: - parts.append(" ") + yield " " if self.marker: - parts.append(f"; {self.marker}") + yield f"; {self.marker}" - return "".join(parts) + def __str__(self) -> str: + return "".join(self._iter_parts(self.name)) def __repr__(self) -> str: return f"" + + def __hash__(self) -> int: + return hash( + ( + self.__class__.__name__, + *self._iter_parts(canonicalize_name(self.name)), + ) + ) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, Requirement): + return NotImplemented + + return ( + canonicalize_name(self.name) == canonicalize_name(other.name) + and self.extras == other.extras + and self.specifier == other.specifier + and self.url == other.url + and self.marker == other.marker + ) diff --git a/src/pip/_vendor/packaging/specifiers.py b/src/pip/_vendor/packaging/specifiers.py index 0e218a6f9f7..7617726c385 100644 --- a/src/pip/_vendor/packaging/specifiers.py +++ b/src/pip/_vendor/packaging/specifiers.py @@ -1,20 +1,22 @@ # This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. +""" +.. testsetup:: + + from pip._vendor.packaging.specifiers import Specifier, SpecifierSet, InvalidSpecifier + from pip._vendor.packaging.version import Version +""" import abc -import functools import itertools import re -import warnings from typing import ( Callable, - Dict, Iterable, Iterator, List, Optional, - Pattern, Set, Tuple, TypeVar, @@ -22,17 +24,28 @@ ) from .utils import canonicalize_version -from .version import LegacyVersion, Version, parse +from .version import Version + +UnparsedVersion = Union[Version, str] +UnparsedVersionVar = TypeVar("UnparsedVersionVar", bound=UnparsedVersion) +CallableOperator = Callable[[Version, str], bool] -ParsedVersion = Union[Version, LegacyVersion] -UnparsedVersion = Union[Version, LegacyVersion, str] -VersionTypeVar = TypeVar("VersionTypeVar", bound=UnparsedVersion) -CallableOperator = Callable[[ParsedVersion, str], bool] + +def _coerce_version(version: UnparsedVersion) -> Version: + if not isinstance(version, Version): + version = Version(version) + return version class InvalidSpecifier(ValueError): """ - An invalid specifier was found, users should refer to PEP 440. + Raised when attempting to create a :class:`Specifier` with a specifier + string that is invalid. + + >>> Specifier("lolwat") + Traceback (most recent call last): + ... + packaging.specifiers.InvalidSpecifier: Invalid specifier: 'lolwat' """ @@ -40,35 +53,39 @@ class BaseSpecifier(metaclass=abc.ABCMeta): @abc.abstractmethod def __str__(self) -> str: """ - Returns the str representation of this Specifier like object. This + Returns the str representation of this Specifier-like object. This should be representative of the Specifier itself. """ @abc.abstractmethod def __hash__(self) -> int: """ - Returns a hash value for this Specifier like object. + Returns a hash value for this Specifier-like object. """ @abc.abstractmethod def __eq__(self, other: object) -> bool: """ - Returns a boolean representing whether or not the two Specifier like + Returns a boolean representing whether or not the two Specifier-like objects are equal. + + :param other: The other object to check against. """ - @abc.abstractproperty + @property + @abc.abstractmethod def prereleases(self) -> Optional[bool]: - """ - Returns whether or not pre-releases as a whole are allowed by this - specifier. + """Whether or not pre-releases as a whole are allowed. + + This can be set to either ``True`` or ``False`` to explicitly enable or disable + prereleases or it can be set to ``None`` (the default) to use default semantics. """ @prereleases.setter def prereleases(self, value: bool) -> None: - """ - Sets whether or not pre-releases as a whole are allowed by this - specifier. + """Setter for :attr:`prereleases`. + + :param value: The value to set. """ @abc.abstractmethod @@ -79,227 +96,28 @@ def contains(self, item: str, prereleases: Optional[bool] = None) -> bool: @abc.abstractmethod def filter( - self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None - ) -> Iterable[VersionTypeVar]: + self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None + ) -> Iterator[UnparsedVersionVar]: """ Takes an iterable of items and filters them so that only items which are contained within this specifier are allowed in it. """ -class _IndividualSpecifier(BaseSpecifier): - - _operators: Dict[str, str] = {} - _regex: Pattern[str] - - def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None: - match = self._regex.search(spec) - if not match: - raise InvalidSpecifier(f"Invalid specifier: '{spec}'") - - self._spec: Tuple[str, str] = ( - match.group("operator").strip(), - match.group("version").strip(), - ) - - # Store whether or not this Specifier should accept prereleases - self._prereleases = prereleases - - def __repr__(self) -> str: - pre = ( - f", prereleases={self.prereleases!r}" - if self._prereleases is not None - else "" - ) - - return f"<{self.__class__.__name__}({str(self)!r}{pre})>" - - def __str__(self) -> str: - return "{}{}".format(*self._spec) - - @property - def _canonical_spec(self) -> Tuple[str, str]: - return self._spec[0], canonicalize_version(self._spec[1]) - - def __hash__(self) -> int: - return hash(self._canonical_spec) - - def __eq__(self, other: object) -> bool: - if isinstance(other, str): - try: - other = self.__class__(str(other)) - except InvalidSpecifier: - return NotImplemented - elif not isinstance(other, self.__class__): - return NotImplemented - - return self._canonical_spec == other._canonical_spec - - def _get_operator(self, op: str) -> CallableOperator: - operator_callable: CallableOperator = getattr( - self, f"_compare_{self._operators[op]}" - ) - return operator_callable - - def _coerce_version(self, version: UnparsedVersion) -> ParsedVersion: - if not isinstance(version, (LegacyVersion, Version)): - version = parse(version) - return version +class Specifier(BaseSpecifier): + """This class abstracts handling of version specifiers. - @property - def operator(self) -> str: - return self._spec[0] + .. tip:: - @property - def version(self) -> str: - return self._spec[1] - - @property - def prereleases(self) -> Optional[bool]: - return self._prereleases - - @prereleases.setter - def prereleases(self, value: bool) -> None: - self._prereleases = value - - def __contains__(self, item: str) -> bool: - return self.contains(item) - - def contains( - self, item: UnparsedVersion, prereleases: Optional[bool] = None - ) -> bool: - - # Determine if prereleases are to be allowed or not. - if prereleases is None: - prereleases = self.prereleases - - # Normalize item to a Version or LegacyVersion, this allows us to have - # a shortcut for ``"2.0" in Specifier(">=2") - normalized_item = self._coerce_version(item) - - # Determine if we should be supporting prereleases in this specifier - # or not, if we do not support prereleases than we can short circuit - # logic if this version is a prereleases. - if normalized_item.is_prerelease and not prereleases: - return False - - # Actually do the comparison to determine if this item is contained - # within this Specifier or not. - operator_callable: CallableOperator = self._get_operator(self.operator) - return operator_callable(normalized_item, self.version) - - def filter( - self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None - ) -> Iterable[VersionTypeVar]: - - yielded = False - found_prereleases = [] - - kw = {"prereleases": prereleases if prereleases is not None else True} - - # Attempt to iterate over all the values in the iterable and if any of - # them match, yield them. - for version in iterable: - parsed_version = self._coerce_version(version) - - if self.contains(parsed_version, **kw): - # If our version is a prerelease, and we were not set to allow - # prereleases, then we'll store it for later in case nothing - # else matches this specifier. - if parsed_version.is_prerelease and not ( - prereleases or self.prereleases - ): - found_prereleases.append(version) - # Either this is not a prerelease, or we should have been - # accepting prereleases from the beginning. - else: - yielded = True - yield version - - # Now that we've iterated over everything, determine if we've yielded - # any values, and if we have not and we have any prereleases stored up - # then we will go ahead and yield the prereleases. - if not yielded and found_prereleases: - for version in found_prereleases: - yield version - - -class LegacySpecifier(_IndividualSpecifier): - - _regex_str = r""" - (?P(==|!=|<=|>=|<|>)) - \s* - (?P - [^,;\s)]* # Since this is a "legacy" specifier, and the version - # string can be just about anything, we match everything - # except for whitespace, a semi-colon for marker support, - # a closing paren since versions can be enclosed in - # them, and a comma since it's a version separator. - ) - """ - - _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) - - _operators = { - "==": "equal", - "!=": "not_equal", - "<=": "less_than_equal", - ">=": "greater_than_equal", - "<": "less_than", - ">": "greater_than", - } - - def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None: - super().__init__(spec, prereleases) - - warnings.warn( - "Creating a LegacyVersion has been deprecated and will be " - "removed in the next major release", - DeprecationWarning, - ) - - def _coerce_version(self, version: UnparsedVersion) -> LegacyVersion: - if not isinstance(version, LegacyVersion): - version = LegacyVersion(str(version)) - return version - - def _compare_equal(self, prospective: LegacyVersion, spec: str) -> bool: - return prospective == self._coerce_version(spec) - - def _compare_not_equal(self, prospective: LegacyVersion, spec: str) -> bool: - return prospective != self._coerce_version(spec) - - def _compare_less_than_equal(self, prospective: LegacyVersion, spec: str) -> bool: - return prospective <= self._coerce_version(spec) - - def _compare_greater_than_equal( - self, prospective: LegacyVersion, spec: str - ) -> bool: - return prospective >= self._coerce_version(spec) - - def _compare_less_than(self, prospective: LegacyVersion, spec: str) -> bool: - return prospective < self._coerce_version(spec) - - def _compare_greater_than(self, prospective: LegacyVersion, spec: str) -> bool: - return prospective > self._coerce_version(spec) - - -def _require_version_compare( - fn: Callable[["Specifier", ParsedVersion, str], bool] -) -> Callable[["Specifier", ParsedVersion, str], bool]: - @functools.wraps(fn) - def wrapped(self: "Specifier", prospective: ParsedVersion, spec: str) -> bool: - if not isinstance(prospective, Version): - return False - return fn(self, prospective, spec) - - return wrapped - - -class Specifier(_IndividualSpecifier): + It is generally not required to instantiate this manually. You should instead + prefer to work with :class:`SpecifierSet` instead, which can parse + comma-separated version specifiers (which is what package metadata contains). + """ - _regex_str = r""" + _operator_regex_str = r""" (?P(~=|==|!=|<=|>=|<|>|===)) + """ + _version_regex_str = r""" (?P (?: # The identity operators allow for an escape hatch that will @@ -309,8 +127,10 @@ class Specifier(_IndividualSpecifier): # but included entirely as an escape hatch. (?<====) # Only match for the identity operator \s* - [^\s]* # We just match everything, except for whitespace - # since we are only testing for strict identity. + [^\s;)]* # The arbitrary version can be just about anything, + # we match everything except for whitespace, a + # semi-colon for marker support, and a closing paren + # since versions can be enclosed in them. ) | (?: @@ -323,23 +143,23 @@ class Specifier(_IndividualSpecifier): v? (?:[0-9]+!)? # epoch [0-9]+(?:\.[0-9]+)* # release - (?: # pre release - [-_\.]? - (a|b|c|rc|alpha|beta|pre|preview) - [-_\.]? - [0-9]* - )? - (?: # post release - (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) - )? - # You cannot use a wild card and a dev or local version - # together so group them with a | and make them optional. + # You cannot use a wild card and a pre-release, post-release, a dev or + # local version together so group them with a | and make them optional. (?: + \.\* # Wild card syntax of .* + | + (?: # pre release + [-_\.]? + (alpha|beta|preview|pre|a|b|c|rc) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local - | - \.\* # Wild card syntax of .* )? ) | @@ -354,7 +174,7 @@ class Specifier(_IndividualSpecifier): [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) (?: # pre release [-_\.]? - (a|b|c|rc|alpha|beta|pre|preview) + (alpha|beta|preview|pre|a|b|c|rc) [-_\.]? [0-9]* )? @@ -379,7 +199,7 @@ class Specifier(_IndividualSpecifier): [0-9]+(?:\.[0-9]+)* # release (?: # pre release [-_\.]? - (a|b|c|rc|alpha|beta|pre|preview) + (alpha|beta|preview|pre|a|b|c|rc) [-_\.]? [0-9]* )? @@ -391,7 +211,10 @@ class Specifier(_IndividualSpecifier): ) """ - _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) + _regex = re.compile( + r"^\s*" + _operator_regex_str + _version_regex_str + r"\s*$", + re.VERBOSE | re.IGNORECASE, + ) _operators = { "~=": "compatible", @@ -404,8 +227,153 @@ class Specifier(_IndividualSpecifier): "===": "arbitrary", } - @_require_version_compare - def _compare_compatible(self, prospective: ParsedVersion, spec: str) -> bool: + def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None: + """Initialize a Specifier instance. + + :param spec: + The string representation of a specifier which will be parsed and + normalized before use. + :param prereleases: + This tells the specifier if it should accept prerelease versions if + applicable or not. The default of ``None`` will autodetect it from the + given specifiers. + :raises InvalidSpecifier: + If the given specifier is invalid (i.e. bad syntax). + """ + match = self._regex.search(spec) + if not match: + raise InvalidSpecifier(f"Invalid specifier: '{spec}'") + + self._spec: Tuple[str, str] = ( + match.group("operator").strip(), + match.group("version").strip(), + ) + + # Store whether or not this Specifier should accept prereleases + self._prereleases = prereleases + + # https://github.com/python/mypy/pull/13475#pullrequestreview-1079784515 + @property # type: ignore[override] + def prereleases(self) -> bool: + # If there is an explicit prereleases set for this, then we'll just + # blindly use that. + if self._prereleases is not None: + return self._prereleases + + # Look at all of our specifiers and determine if they are inclusive + # operators, and if they are if they are including an explicit + # prerelease. + operator, version = self._spec + if operator in ["==", ">=", "<=", "~=", "==="]: + # The == specifier can include a trailing .*, if it does we + # want to remove before parsing. + if operator == "==" and version.endswith(".*"): + version = version[:-2] + + # Parse the version, and if it is a pre-release than this + # specifier allows pre-releases. + if Version(version).is_prerelease: + return True + + return False + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + + @property + def operator(self) -> str: + """The operator of this specifier. + + >>> Specifier("==1.2.3").operator + '==' + """ + return self._spec[0] + + @property + def version(self) -> str: + """The version of this specifier. + + >>> Specifier("==1.2.3").version + '1.2.3' + """ + return self._spec[1] + + def __repr__(self) -> str: + """A representation of the Specifier that shows all internal state. + + >>> Specifier('>=1.0.0') + =1.0.0')> + >>> Specifier('>=1.0.0', prereleases=False) + =1.0.0', prereleases=False)> + >>> Specifier('>=1.0.0', prereleases=True) + =1.0.0', prereleases=True)> + """ + pre = ( + f", prereleases={self.prereleases!r}" + if self._prereleases is not None + else "" + ) + + return f"<{self.__class__.__name__}({str(self)!r}{pre})>" + + def __str__(self) -> str: + """A string representation of the Specifier that can be round-tripped. + + >>> str(Specifier('>=1.0.0')) + '>=1.0.0' + >>> str(Specifier('>=1.0.0', prereleases=False)) + '>=1.0.0' + """ + return "{}{}".format(*self._spec) + + @property + def _canonical_spec(self) -> Tuple[str, str]: + canonical_version = canonicalize_version( + self._spec[1], + strip_trailing_zero=(self._spec[0] != "~="), + ) + return self._spec[0], canonical_version + + def __hash__(self) -> int: + return hash(self._canonical_spec) + + def __eq__(self, other: object) -> bool: + """Whether or not the two Specifier-like objects are equal. + + :param other: The other object to check against. + + The value of :attr:`prereleases` is ignored. + + >>> Specifier("==1.2.3") == Specifier("== 1.2.3.0") + True + >>> (Specifier("==1.2.3", prereleases=False) == + ... Specifier("==1.2.3", prereleases=True)) + True + >>> Specifier("==1.2.3") == "==1.2.3" + True + >>> Specifier("==1.2.3") == Specifier("==1.2.4") + False + >>> Specifier("==1.2.3") == Specifier("~=1.2.3") + False + """ + if isinstance(other, str): + try: + other = self.__class__(str(other)) + except InvalidSpecifier: + return NotImplemented + elif not isinstance(other, self.__class__): + return NotImplemented + + return self._canonical_spec == other._canonical_spec + + def _get_operator(self, op: str) -> CallableOperator: + operator_callable: CallableOperator = getattr( + self, f"_compare_{self._operators[op]}" + ) + return operator_callable + + def _compare_compatible(self, prospective: Version, spec: str) -> bool: # Compatible releases have an equivalent combination of >= and ==. That # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to @@ -426,34 +394,35 @@ def _compare_compatible(self, prospective: ParsedVersion, spec: str) -> bool: prospective, prefix ) - @_require_version_compare - def _compare_equal(self, prospective: ParsedVersion, spec: str) -> bool: + def _compare_equal(self, prospective: Version, spec: str) -> bool: # We need special logic to handle prefix matching if spec.endswith(".*"): # In the case of prefix matching we want to ignore local segment. - prospective = Version(prospective.public) + normalized_prospective = canonicalize_version( + prospective.public, strip_trailing_zero=False + ) + # Get the normalized version string ignoring the trailing .* + normalized_spec = canonicalize_version(spec[:-2], strip_trailing_zero=False) # Split the spec out by dots, and pretend that there is an implicit # dot in between a release segment and a pre-release segment. - split_spec = _version_split(spec[:-2]) # Remove the trailing .* + split_spec = _version_split(normalized_spec) # Split the prospective version out by dots, and pretend that there # is an implicit dot in between a release segment and a pre-release # segment. - split_prospective = _version_split(str(prospective)) + split_prospective = _version_split(normalized_prospective) + + # 0-pad the prospective version before shortening it to get the correct + # shortened version. + padded_prospective, _ = _pad_version(split_prospective, split_spec) # Shorten the prospective version to be the same length as the spec # so that we can determine if the specifier is a prefix of the # prospective version or not. - shortened_prospective = split_prospective[: len(split_spec)] + shortened_prospective = padded_prospective[: len(split_spec)] - # Pad out our two sides with zeros so that they both equal the same - # length. - padded_spec, padded_prospective = _pad_version( - split_spec, shortened_prospective - ) - - return padded_prospective == padded_spec + return shortened_prospective == split_spec else: # Convert our spec string into a Version spec_version = Version(spec) @@ -466,30 +435,24 @@ def _compare_equal(self, prospective: ParsedVersion, spec: str) -> bool: return prospective == spec_version - @_require_version_compare - def _compare_not_equal(self, prospective: ParsedVersion, spec: str) -> bool: + def _compare_not_equal(self, prospective: Version, spec: str) -> bool: return not self._compare_equal(prospective, spec) - @_require_version_compare - def _compare_less_than_equal(self, prospective: ParsedVersion, spec: str) -> bool: + def _compare_less_than_equal(self, prospective: Version, spec: str) -> bool: # NB: Local version identifiers are NOT permitted in the version # specifier, so local version labels can be universally removed from # the prospective version. return Version(prospective.public) <= Version(spec) - @_require_version_compare - def _compare_greater_than_equal( - self, prospective: ParsedVersion, spec: str - ) -> bool: + def _compare_greater_than_equal(self, prospective: Version, spec: str) -> bool: # NB: Local version identifiers are NOT permitted in the version # specifier, so local version labels can be universally removed from # the prospective version. return Version(prospective.public) >= Version(spec) - @_require_version_compare - def _compare_less_than(self, prospective: ParsedVersion, spec_str: str) -> bool: + def _compare_less_than(self, prospective: Version, spec_str: str) -> bool: # Convert our spec to a Version instance, since we'll want to work with # it as a version. @@ -514,8 +477,7 @@ def _compare_less_than(self, prospective: ParsedVersion, spec_str: str) -> bool: # version in the spec. return True - @_require_version_compare - def _compare_greater_than(self, prospective: ParsedVersion, spec_str: str) -> bool: + def _compare_greater_than(self, prospective: Version, spec_str: str) -> bool: # Convert our spec to a Version instance, since we'll want to work with # it as a version. @@ -549,34 +511,133 @@ def _compare_greater_than(self, prospective: ParsedVersion, spec_str: str) -> bo def _compare_arbitrary(self, prospective: Version, spec: str) -> bool: return str(prospective).lower() == str(spec).lower() - @property - def prereleases(self) -> bool: + def __contains__(self, item: Union[str, Version]) -> bool: + """Return whether or not the item is contained in this specifier. - # If there is an explicit prereleases set for this, then we'll just - # blindly use that. - if self._prereleases is not None: - return self._prereleases + :param item: The item to check for. - # Look at all of our specifiers and determine if they are inclusive - # operators, and if they are if they are including an explicit - # prerelease. - operator, version = self._spec - if operator in ["==", ">=", "<=", "~=", "==="]: - # The == specifier can include a trailing .*, if it does we - # want to remove before parsing. - if operator == "==" and version.endswith(".*"): - version = version[:-2] + This is used for the ``in`` operator and behaves the same as + :meth:`contains` with no ``prereleases`` argument passed. - # Parse the version, and if it is a pre-release than this - # specifier allows pre-releases. - if parse(version).is_prerelease: - return True + >>> "1.2.3" in Specifier(">=1.2.3") + True + >>> Version("1.2.3") in Specifier(">=1.2.3") + True + >>> "1.0.0" in Specifier(">=1.2.3") + False + >>> "1.3.0a1" in Specifier(">=1.2.3") + False + >>> "1.3.0a1" in Specifier(">=1.2.3", prereleases=True) + True + """ + return self.contains(item) - return False + def contains( + self, item: UnparsedVersion, prereleases: Optional[bool] = None + ) -> bool: + """Return whether or not the item is contained in this specifier. + + :param item: + The item to check for, which can be a version string or a + :class:`Version` instance. + :param prereleases: + Whether or not to match prereleases with this Specifier. If set to + ``None`` (the default), it uses :attr:`prereleases` to determine + whether or not prereleases are allowed. + + >>> Specifier(">=1.2.3").contains("1.2.3") + True + >>> Specifier(">=1.2.3").contains(Version("1.2.3")) + True + >>> Specifier(">=1.2.3").contains("1.0.0") + False + >>> Specifier(">=1.2.3").contains("1.3.0a1") + False + >>> Specifier(">=1.2.3", prereleases=True).contains("1.3.0a1") + True + >>> Specifier(">=1.2.3").contains("1.3.0a1", prereleases=True) + True + """ - @prereleases.setter - def prereleases(self, value: bool) -> None: - self._prereleases = value + # Determine if prereleases are to be allowed or not. + if prereleases is None: + prereleases = self.prereleases + + # Normalize item to a Version, this allows us to have a shortcut for + # "2.0" in Specifier(">=2") + normalized_item = _coerce_version(item) + + # Determine if we should be supporting prereleases in this specifier + # or not, if we do not support prereleases than we can short circuit + # logic if this version is a prereleases. + if normalized_item.is_prerelease and not prereleases: + return False + + # Actually do the comparison to determine if this item is contained + # within this Specifier or not. + operator_callable: CallableOperator = self._get_operator(self.operator) + return operator_callable(normalized_item, self.version) + + def filter( + self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None + ) -> Iterator[UnparsedVersionVar]: + """Filter items in the given iterable, that match the specifier. + + :param iterable: + An iterable that can contain version strings and :class:`Version` instances. + The items in the iterable will be filtered according to the specifier. + :param prereleases: + Whether or not to allow prereleases in the returned iterator. If set to + ``None`` (the default), it will be intelligently decide whether to allow + prereleases or not (based on the :attr:`prereleases` attribute, and + whether the only versions matching are prereleases). + + This method is smarter than just ``filter(Specifier().contains, [...])`` + because it implements the rule from :pep:`440` that a prerelease item + SHOULD be accepted if no other versions match the given specifier. + + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.3", "1.5a1"])) + ['1.3'] + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.2.3", "1.3", Version("1.4")])) + ['1.2.3', '1.3', ] + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.5a1"])) + ['1.5a1'] + >>> list(Specifier(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + >>> list(Specifier(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + """ + + yielded = False + found_prereleases = [] + + kw = {"prereleases": prereleases if prereleases is not None else True} + + # Attempt to iterate over all the values in the iterable and if any of + # them match, yield them. + for version in iterable: + parsed_version = _coerce_version(version) + + if self.contains(parsed_version, **kw): + # If our version is a prerelease, and we were not set to allow + # prereleases, then we'll store it for later in case nothing + # else matches this specifier. + if parsed_version.is_prerelease and not ( + prereleases or self.prereleases + ): + found_prereleases.append(version) + # Either this is not a prerelease, or we should have been + # accepting prereleases from the beginning. + else: + yielded = True + yield version + + # Now that we've iterated over everything, determine if we've yielded + # any values, and if we have not and we have any prereleases stored up + # then we will go ahead and yield the prereleases. + if not yielded and found_prereleases: + for version in found_prereleases: + yield version _prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") @@ -618,22 +679,39 @@ def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str class SpecifierSet(BaseSpecifier): + """This class abstracts handling of a set of version specifiers. + + It can be passed a single specifier (``>=3.0``), a comma-separated list of + specifiers (``>=3.0,!=3.1``), or no specifier at all. + """ + def __init__( self, specifiers: str = "", prereleases: Optional[bool] = None ) -> None: + """Initialize a SpecifierSet instance. + + :param specifiers: + The string representation of a specifier or a comma-separated list of + specifiers which will be parsed and normalized before use. + :param prereleases: + This tells the SpecifierSet if it should accept prerelease versions if + applicable or not. The default of ``None`` will autodetect it from the + given specifiers. + + :raises InvalidSpecifier: + If the given ``specifiers`` are not parseable than this exception will be + raised. + """ - # Split on , to break each individual specifier into it's own item, and + # Split on `,` to break each individual specifier into it's own item, and # strip each item to remove leading/trailing whitespace. split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] # Parsed each individual specifier, attempting first to make it a - # Specifier and falling back to a LegacySpecifier. - parsed: Set[_IndividualSpecifier] = set() + # Specifier. + parsed: Set[Specifier] = set() for specifier in split_specifiers: - try: - parsed.add(Specifier(specifier)) - except InvalidSpecifier: - parsed.add(LegacySpecifier(specifier)) + parsed.add(Specifier(specifier)) # Turn our parsed specifiers into a frozen set and save them for later. self._specs = frozenset(parsed) @@ -642,7 +720,40 @@ def __init__( # we accept prereleases or not. self._prereleases = prereleases + @property + def prereleases(self) -> Optional[bool]: + # If we have been given an explicit prerelease modifier, then we'll + # pass that through here. + if self._prereleases is not None: + return self._prereleases + + # If we don't have any specifiers, and we don't have a forced value, + # then we'll just return None since we don't know if this should have + # pre-releases or not. + if not self._specs: + return None + + # Otherwise we'll see if any of the given specifiers accept + # prereleases, if any of them do we'll return True, otherwise False. + return any(s.prereleases for s in self._specs) + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + def __repr__(self) -> str: + """A representation of the specifier set that shows all internal state. + + Note that the ordering of the individual specifiers within the set may not + match the input string. + + >>> SpecifierSet('>=1.0.0,!=2.0.0') + =1.0.0')> + >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=False) + =1.0.0', prereleases=False)> + >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=True) + =1.0.0', prereleases=True)> + """ pre = ( f", prereleases={self.prereleases!r}" if self._prereleases is not None @@ -652,12 +763,31 @@ def __repr__(self) -> str: return f"" def __str__(self) -> str: + """A string representation of the specifier set that can be round-tripped. + + Note that the ordering of the individual specifiers within the set may not + match the input string. + + >>> str(SpecifierSet(">=1.0.0,!=1.0.1")) + '!=1.0.1,>=1.0.0' + >>> str(SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False)) + '!=1.0.1,>=1.0.0' + """ return ",".join(sorted(str(s) for s in self._specs)) def __hash__(self) -> int: return hash(self._specs) def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet": + """Return a SpecifierSet which is a combination of the two sets. + + :param other: The other object to combine with. + + >>> SpecifierSet(">=1.0.0,!=1.0.1") & '<=2.0.0,!=2.0.1' + =1.0.0')> + >>> SpecifierSet(">=1.0.0,!=1.0.1") & SpecifierSet('<=2.0.0,!=2.0.1') + =1.0.0')> + """ if isinstance(other, str): other = SpecifierSet(other) elif not isinstance(other, SpecifierSet): @@ -681,7 +811,25 @@ def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet": return specifier def __eq__(self, other: object) -> bool: - if isinstance(other, (str, _IndividualSpecifier)): + """Whether or not the two SpecifierSet-like objects are equal. + + :param other: The other object to check against. + + The value of :attr:`prereleases` is ignored. + + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> (SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False) == + ... SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True)) + True + >>> SpecifierSet(">=1.0.0,!=1.0.1") == ">=1.0.0,!=1.0.1" + True + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.2") + False + """ + if isinstance(other, (str, Specifier)): other = SpecifierSet(str(other)) elif not isinstance(other, SpecifierSet): return NotImplemented @@ -689,43 +837,72 @@ def __eq__(self, other: object) -> bool: return self._specs == other._specs def __len__(self) -> int: + """Returns the number of specifiers in this specifier set.""" return len(self._specs) - def __iter__(self) -> Iterator[_IndividualSpecifier]: - return iter(self._specs) - - @property - def prereleases(self) -> Optional[bool]: - - # If we have been given an explicit prerelease modifier, then we'll - # pass that through here. - if self._prereleases is not None: - return self._prereleases - - # If we don't have any specifiers, and we don't have a forced value, - # then we'll just return None since we don't know if this should have - # pre-releases or not. - if not self._specs: - return None - - # Otherwise we'll see if any of the given specifiers accept - # prereleases, if any of them do we'll return True, otherwise False. - return any(s.prereleases for s in self._specs) + def __iter__(self) -> Iterator[Specifier]: + """ + Returns an iterator over all the underlying :class:`Specifier` instances + in this specifier set. - @prereleases.setter - def prereleases(self, value: bool) -> None: - self._prereleases = value + >>> sorted(SpecifierSet(">=1.0.0,!=1.0.1"), key=str) + [, =1.0.0')>] + """ + return iter(self._specs) def __contains__(self, item: UnparsedVersion) -> bool: + """Return whether or not the item is contained in this specifier. + + :param item: The item to check for. + + This is used for the ``in`` operator and behaves the same as + :meth:`contains` with no ``prereleases`` argument passed. + + >>> "1.2.3" in SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> Version("1.2.3") in SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> "1.0.1" in SpecifierSet(">=1.0.0,!=1.0.1") + False + >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1") + False + >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True) + True + """ return self.contains(item) def contains( - self, item: UnparsedVersion, prereleases: Optional[bool] = None + self, + item: UnparsedVersion, + prereleases: Optional[bool] = None, + installed: Optional[bool] = None, ) -> bool: - - # Ensure that our item is a Version or LegacyVersion instance. - if not isinstance(item, (LegacyVersion, Version)): - item = parse(item) + """Return whether or not the item is contained in this SpecifierSet. + + :param item: + The item to check for, which can be a version string or a + :class:`Version` instance. + :param prereleases: + Whether or not to match prereleases with this SpecifierSet. If set to + ``None`` (the default), it uses :attr:`prereleases` to determine + whether or not prereleases are allowed. + + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.2.3") + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains(Version("1.2.3")) + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.0.1") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True).contains("1.3.0a1") + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1", prereleases=True) + True + """ + # Ensure that our item is a Version instance. + if not isinstance(item, Version): + item = Version(item) # Determine if we're forcing a prerelease or not, if we're not forcing # one for this particular filter call, then we'll use whatever the @@ -742,6 +919,9 @@ def contains( if not prereleases and item.is_prerelease: return False + if installed and item.is_prerelease: + item = Version(item.base_version) + # We simply dispatch to the underlying specs here to make sure that the # given version is contained within all of them. # Note: This use of all() here means that an empty set of specifiers @@ -749,9 +929,46 @@ def contains( return all(s.contains(item, prereleases=prereleases) for s in self._specs) def filter( - self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None - ) -> Iterable[VersionTypeVar]: - + self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None + ) -> Iterator[UnparsedVersionVar]: + """Filter items in the given iterable, that match the specifiers in this set. + + :param iterable: + An iterable that can contain version strings and :class:`Version` instances. + The items in the iterable will be filtered according to the specifier. + :param prereleases: + Whether or not to allow prereleases in the returned iterator. If set to + ``None`` (the default), it will be intelligently decide whether to allow + prereleases or not (based on the :attr:`prereleases` attribute, and + whether the only versions matching are prereleases). + + This method is smarter than just ``filter(SpecifierSet(...).contains, [...])`` + because it implements the rule from :pep:`440` that a prerelease item + SHOULD be accepted if no other versions match the given specifier. + + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", "1.5a1"])) + ['1.3'] + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", Version("1.4")])) + ['1.3', ] + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.5a1"])) + [] + >>> list(SpecifierSet(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + >>> list(SpecifierSet(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + + An "empty" SpecifierSet will filter items based on the presence of prerelease + versions in the set. + + >>> list(SpecifierSet("").filter(["1.3", "1.5a1"])) + ['1.3'] + >>> list(SpecifierSet("").filter(["1.5a1"])) + ['1.5a1'] + >>> list(SpecifierSet("", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + >>> list(SpecifierSet("").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + """ # Determine if we're forcing a prerelease or not, if we're not forcing # one for this particular filter call, then we'll use whatever the # SpecifierSet thinks for whether or not we should support prereleases. @@ -764,27 +981,16 @@ def filter( if self._specs: for spec in self._specs: iterable = spec.filter(iterable, prereleases=bool(prereleases)) - return iterable + return iter(iterable) # If we do not have any specifiers, then we need to have a rough filter # which will filter out any pre-releases, unless there are no final - # releases, and which will filter out LegacyVersion in general. + # releases. else: - filtered: List[VersionTypeVar] = [] - found_prereleases: List[VersionTypeVar] = [] - - item: UnparsedVersion - parsed_version: Union[Version, LegacyVersion] + filtered: List[UnparsedVersionVar] = [] + found_prereleases: List[UnparsedVersionVar] = [] for item in iterable: - # Ensure that we some kind of Version class for this item. - if not isinstance(item, (LegacyVersion, Version)): - parsed_version = parse(item) - else: - parsed_version = item - - # Filter out any item which is parsed as a LegacyVersion - if isinstance(parsed_version, LegacyVersion): - continue + parsed_version = _coerce_version(item) # Store any item which is a pre-release for later unless we've # already found a final version or we are accepting prereleases @@ -797,6 +1003,6 @@ def filter( # If we've found no items except for pre-releases, then we'll go # ahead and use the pre-releases if not filtered and found_prereleases and prereleases is None: - return found_prereleases + return iter(found_prereleases) - return filtered + return iter(filtered) diff --git a/src/pip/_vendor/packaging/tags.py b/src/pip/_vendor/packaging/tags.py index 9a3d25a71c7..37f33b1ef84 100644 --- a/src/pip/_vendor/packaging/tags.py +++ b/src/pip/_vendor/packaging/tags.py @@ -4,6 +4,8 @@ import logging import platform +import struct +import subprocess import sys import sysconfig from importlib.machinery import EXTENSION_SUFFIXES @@ -36,7 +38,7 @@ } -_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32 +_32_BIT_INTERPRETER = struct.calcsize("P") == 4 class Tag: @@ -110,7 +112,7 @@ def parse_tag(tag: str) -> FrozenSet[Tag]: def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]: - value = sysconfig.get_config_var(name) + value: Union[int, str, None] = sysconfig.get_config_var(name) if value is None and warn: logger.debug( "Config variable '%s' is unset, Python ABI tag may be incorrect", name @@ -119,7 +121,7 @@ def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]: def _normalize_string(string: str) -> str: - return string.replace(".", "_").replace("-", "_") + return string.replace(".", "_").replace("-", "_").replace(" ", "_") def _abi3_applies(python_version: PythonVersion) -> bool: @@ -224,10 +226,45 @@ def cpython_tags( yield Tag(interpreter, "abi3", platform_) -def _generic_abi() -> Iterator[str]: - abi = sysconfig.get_config_var("SOABI") - if abi: - yield _normalize_string(abi) +def _generic_abi() -> List[str]: + """ + Return the ABI tag based on EXT_SUFFIX. + """ + # The following are examples of `EXT_SUFFIX`. + # We want to keep the parts which are related to the ABI and remove the + # parts which are related to the platform: + # - linux: '.cpython-310-x86_64-linux-gnu.so' => cp310 + # - mac: '.cpython-310-darwin.so' => cp310 + # - win: '.cp310-win_amd64.pyd' => cp310 + # - win: '.pyd' => cp37 (uses _cpython_abis()) + # - pypy: '.pypy38-pp73-x86_64-linux-gnu.so' => pypy38_pp73 + # - graalpy: '.graalpy-38-native-x86_64-darwin.dylib' + # => graalpy_38_native + + ext_suffix = _get_config_var("EXT_SUFFIX", warn=True) + if not isinstance(ext_suffix, str) or ext_suffix[0] != ".": + raise SystemError("invalid sysconfig.get_config_var('EXT_SUFFIX')") + parts = ext_suffix.split(".") + if len(parts) < 3: + # CPython3.7 and earlier uses ".pyd" on Windows. + return _cpython_abis(sys.version_info[:2]) + soabi = parts[1] + if soabi.startswith("cpython"): + # non-windows + abi = "cp" + soabi.split("-")[1] + elif soabi.startswith("cp"): + # windows + abi = soabi.split("-")[0] + elif soabi.startswith("pypy"): + abi = "-".join(soabi.split("-")[:2]) + elif soabi.startswith("graalpy"): + abi = "-".join(soabi.split("-")[:3]) + elif soabi: + # pyston, ironpython, others? + abi = soabi + else: + return [] + return [_normalize_string(abi)] def generic_tags( @@ -251,8 +288,9 @@ def generic_tags( interpreter = "".join([interp_name, interp_version]) if abis is None: abis = _generic_abi() + else: + abis = list(abis) platforms = list(platforms or platform_tags()) - abis = list(abis) if "none" not in abis: abis.append("none") for abi in abis: @@ -356,6 +394,22 @@ def mac_platforms( version_str, _, cpu_arch = platform.mac_ver() if version is None: version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2]))) + if version == (10, 16): + # When built against an older macOS SDK, Python will report macOS 10.16 + # instead of the real version. + version_str = subprocess.run( + [ + sys.executable, + "-sS", + "-c", + "import platform; print(platform.mac_ver()[0])", + ], + check=True, + env={"SYSTEM_VERSION_COMPAT": "0"}, + stdout=subprocess.PIPE, + text=True, + ).stdout + version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2]))) else: version = version if arch is None: @@ -416,15 +470,21 @@ def mac_platforms( def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]: linux = _normalize_string(sysconfig.get_platform()) + if not linux.startswith("linux_"): + # we should never be here, just yield the sysconfig one and return + yield linux + return if is_32bit: if linux == "linux_x86_64": linux = "linux_i686" elif linux == "linux_aarch64": - linux = "linux_armv7l" + linux = "linux_armv8l" _, arch = linux.split("_", 1) - yield from _manylinux.platform_tags(linux, arch) - yield from _musllinux.platform_tags(arch) - yield linux + archs = {"armv8l": ["armv8l", "armv7l"]}.get(arch, [arch]) + yield from _manylinux.platform_tags(archs) + yield from _musllinux.platform_tags(archs) + for arch in archs: + yield f"linux_{arch}" def _generic_platforms() -> Iterator[str]: @@ -446,6 +506,9 @@ def platform_tags() -> Iterator[str]: def interpreter_name() -> str: """ Returns the name of the running interpreter. + + Some implementations have a reserved, two-letter abbreviation which will + be returned when appropriate. """ name = sys.implementation.name return INTERPRETER_SHORT_NAMES.get(name) or name @@ -482,6 +545,9 @@ def sys_tags(*, warn: bool = False) -> Iterator[Tag]: yield from generic_tags() if interp_name == "pp": - yield from compatible_tags(interpreter="pp3") + interp = "pp3" + elif interp_name == "cp": + interp = "cp" + interpreter_version(warn=warn) else: - yield from compatible_tags() + interp = None + yield from compatible_tags(interpreter=interp) diff --git a/src/pip/_vendor/packaging/utils.py b/src/pip/_vendor/packaging/utils.py index bab11b80c60..c2c2f75aa80 100644 --- a/src/pip/_vendor/packaging/utils.py +++ b/src/pip/_vendor/packaging/utils.py @@ -12,6 +12,12 @@ NormalizedName = NewType("NormalizedName", str) +class InvalidName(ValueError): + """ + An invalid distribution name; users should refer to the packaging user guide. + """ + + class InvalidWheelFilename(ValueError): """ An invalid wheel filename was found, users should refer to PEP 427. @@ -24,18 +30,31 @@ class InvalidSdistFilename(ValueError): """ +# Core metadata spec for `Name` +_validate_regex = re.compile( + r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", re.IGNORECASE +) _canonicalize_regex = re.compile(r"[-_.]+") +_normalized_regex = re.compile(r"^([a-z0-9]|[a-z0-9]([a-z0-9-](?!--))*[a-z0-9])$") # PEP 427: The build number must start with a digit. _build_tag_regex = re.compile(r"(\d+)(.*)") -def canonicalize_name(name: str) -> NormalizedName: +def canonicalize_name(name: str, *, validate: bool = False) -> NormalizedName: + if validate and not _validate_regex.match(name): + raise InvalidName(f"name is invalid: {name!r}") # This is taken from PEP 503. value = _canonicalize_regex.sub("-", name).lower() return cast(NormalizedName, value) -def canonicalize_version(version: Union[Version, str]) -> str: +def is_normalized_name(name: str) -> bool: + return _normalized_regex.match(name) is not None + + +def canonicalize_version( + version: Union[Version, str], *, strip_trailing_zero: bool = True +) -> str: """ This is very similar to Version.__str__, but has one subtle difference with the way it handles the release segment. @@ -56,8 +75,11 @@ def canonicalize_version(version: Union[Version, str]) -> str: parts.append(f"{parsed.epoch}!") # Release segment - # NB: This strips trailing '.0's to normalize - parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in parsed.release))) + release_segment = ".".join(str(x) for x in parsed.release) + if strip_trailing_zero: + # NB: This strips trailing '.0's to normalize + release_segment = re.sub(r"(\.0)+$", "", release_segment) + parts.append(release_segment) # Pre-release if parsed.pre is not None: @@ -95,11 +117,18 @@ def parse_wheel_filename( parts = filename.split("-", dashes - 2) name_part = parts[0] - # See PEP 427 for the rules on escaping the project name + # See PEP 427 for the rules on escaping the project name. if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None: raise InvalidWheelFilename(f"Invalid project name: {filename}") name = canonicalize_name(name_part) - version = Version(parts[1]) + + try: + version = Version(parts[1]) + except InvalidVersion as e: + raise InvalidWheelFilename( + f"Invalid wheel filename (invalid version): {filename}" + ) from e + if dashes == 5: build_part = parts[2] build_match = _build_tag_regex.match(build_part) @@ -132,5 +161,12 @@ def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]: raise InvalidSdistFilename(f"Invalid sdist filename: {filename}") name = canonicalize_name(name_part) - version = Version(version_part) + + try: + version = Version(version_part) + except InvalidVersion as e: + raise InvalidSdistFilename( + f"Invalid sdist filename (invalid version): {filename}" + ) from e + return (name, version) diff --git a/src/pip/_vendor/packaging/version.py b/src/pip/_vendor/packaging/version.py index de9a09a4ed3..6978e2843fa 100644 --- a/src/pip/_vendor/packaging/version.py +++ b/src/pip/_vendor/packaging/version.py @@ -1,64 +1,71 @@ # This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. +""" +.. testsetup:: + + from pip._vendor.packaging.version import parse, Version +""" -import collections import itertools import re -import warnings -from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union +from typing import Any, Callable, NamedTuple, Optional, SupportsInt, Tuple, Union from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType -__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"] +__all__ = ["VERSION_PATTERN", "parse", "Version", "InvalidVersion"] -InfiniteTypes = Union[InfinityType, NegativeInfinityType] -PrePostDevType = Union[InfiniteTypes, Tuple[str, int]] -SubLocalType = Union[InfiniteTypes, int, str] -LocalType = Union[ +LocalType = Tuple[Union[int, str], ...] + +CmpPrePostDevType = Union[InfinityType, NegativeInfinityType, Tuple[str, int]] +CmpLocalType = Union[ NegativeInfinityType, - Tuple[ - Union[ - SubLocalType, - Tuple[SubLocalType, str], - Tuple[NegativeInfinityType, SubLocalType], - ], - ..., - ], + Tuple[Union[Tuple[int, str], Tuple[NegativeInfinityType, Union[int, str]]], ...], ] CmpKey = Tuple[ - int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType -] -LegacyCmpKey = Tuple[int, Tuple[str, ...]] -VersionComparisonMethod = Callable[ - [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool + int, + Tuple[int, ...], + CmpPrePostDevType, + CmpPrePostDevType, + CmpPrePostDevType, + CmpLocalType, ] +VersionComparisonMethod = Callable[[CmpKey, CmpKey], bool] -_Version = collections.namedtuple( - "_Version", ["epoch", "release", "dev", "pre", "post", "local"] -) +class _Version(NamedTuple): + epoch: int + release: Tuple[int, ...] + dev: Optional[Tuple[str, int]] + pre: Optional[Tuple[str, int]] + post: Optional[Tuple[str, int]] + local: Optional[LocalType] -def parse(version: str) -> Union["LegacyVersion", "Version"]: - """ - Parse the given version string and return either a :class:`Version` object - or a :class:`LegacyVersion` object depending on if the given version is - a valid PEP 440 version or a legacy version. + +def parse(version: str) -> "Version": + """Parse the given version string. + + >>> parse('1.0.dev1') + + + :param version: The version string to parse. + :raises InvalidVersion: When the version string is not a valid version. """ - try: - return Version(version) - except InvalidVersion: - return LegacyVersion(version) + return Version(version) class InvalidVersion(ValueError): - """ - An invalid version was found, users should refer to PEP 440. + """Raised when a version string is not a valid version. + + >>> Version("invalid") + Traceback (most recent call last): + ... + packaging.version.InvalidVersion: Invalid version: 'invalid' """ class _BaseVersion: - _key: Union[CmpKey, LegacyCmpKey] + _key: Tuple[Any, ...] def __hash__(self) -> int: return hash(self._key) @@ -103,133 +110,16 @@ def __ne__(self, other: object) -> bool: return self._key != other._key -class LegacyVersion(_BaseVersion): - def __init__(self, version: str) -> None: - self._version = str(version) - self._key = _legacy_cmpkey(self._version) - - warnings.warn( - "Creating a LegacyVersion has been deprecated and will be " - "removed in the next major release", - DeprecationWarning, - ) - - def __str__(self) -> str: - return self._version - - def __repr__(self) -> str: - return f"" - - @property - def public(self) -> str: - return self._version - - @property - def base_version(self) -> str: - return self._version - - @property - def epoch(self) -> int: - return -1 - - @property - def release(self) -> None: - return None - - @property - def pre(self) -> None: - return None - - @property - def post(self) -> None: - return None - - @property - def dev(self) -> None: - return None - - @property - def local(self) -> None: - return None - - @property - def is_prerelease(self) -> bool: - return False - - @property - def is_postrelease(self) -> bool: - return False - - @property - def is_devrelease(self) -> bool: - return False - - -_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE) - -_legacy_version_replacement_map = { - "pre": "c", - "preview": "c", - "-": "final-", - "rc": "c", - "dev": "@", -} - - -def _parse_version_parts(s: str) -> Iterator[str]: - for part in _legacy_version_component_re.split(s): - part = _legacy_version_replacement_map.get(part, part) - - if not part or part == ".": - continue - - if part[:1] in "0123456789": - # pad for numeric comparison - yield part.zfill(8) - else: - yield "*" + part - - # ensure that alpha/beta/candidate are before final - yield "*final" - - -def _legacy_cmpkey(version: str) -> LegacyCmpKey: - - # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch - # greater than or equal to 0. This will effectively put the LegacyVersion, - # which uses the defacto standard originally implemented by setuptools, - # as before all PEP 440 versions. - epoch = -1 - - # This scheme is taken from pkg_resources.parse_version setuptools prior to - # it's adoption of the packaging library. - parts: List[str] = [] - for part in _parse_version_parts(version.lower()): - if part.startswith("*"): - # remove "-" before a prerelease tag - if part < "*final": - while parts and parts[-1] == "*final-": - parts.pop() - - # remove trailing zeros from each series of numeric parts - while parts and parts[-1] == "00000000": - parts.pop() - - parts.append(part) - - return epoch, tuple(parts) - - # Deliberately not anchored to the start and end of the string, to make it # easier for 3rd party code to reuse -VERSION_PATTERN = r""" +_VERSION_PATTERN = r""" v? (?: (?:(?P[0-9]+)!)? # epoch (?P[0-9]+(?:\.[0-9]+)*) # release segment (?P
                                          # pre-release
             [-_\.]?
-            (?P(a|b|c|rc|alpha|beta|pre|preview))
+            (?Palpha|a|beta|b|preview|pre|c|rc)
             [-_\.]?
             (?P[0-9]+)?
         )?
@@ -253,12 +143,56 @@ def _legacy_cmpkey(version: str) -> LegacyCmpKey:
     (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
 """
 
+VERSION_PATTERN = _VERSION_PATTERN
+"""
+A string containing the regular expression used to match a valid version.
+
+The pattern is not anchored at either end, and is intended for embedding in larger
+expressions (for example, matching a version number as part of a file name). The
+regular expression should be compiled with the ``re.VERBOSE`` and ``re.IGNORECASE``
+flags set.
+
+:meta hide-value:
+"""
+
 
 class Version(_BaseVersion):
+    """This class abstracts handling of a project's versions.
+
+    A :class:`Version` instance is comparison aware and can be compared and
+    sorted using the standard Python interfaces.
+
+    >>> v1 = Version("1.0a5")
+    >>> v2 = Version("1.0")
+    >>> v1
+    
+    >>> v2
+    
+    >>> v1 < v2
+    True
+    >>> v1 == v2
+    False
+    >>> v1 > v2
+    False
+    >>> v1 >= v2
+    False
+    >>> v1 <= v2
+    True
+    """
 
     _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
+    _key: CmpKey
 
     def __init__(self, version: str) -> None:
+        """Initialize a Version object.
+
+        :param version:
+            The string representation of a version which will be parsed and normalized
+            before use.
+        :raises InvalidVersion:
+            If the ``version`` does not conform to PEP 440 in any way then this
+            exception will be raised.
+        """
 
         # Validate the version and parse it into pieces
         match = self._regex.search(version)
@@ -288,9 +222,19 @@ def __init__(self, version: str) -> None:
         )
 
     def __repr__(self) -> str:
+        """A representation of the Version that shows all internal state.
+
+        >>> Version('1.0.0')
+        
+        """
         return f""
 
     def __str__(self) -> str:
+        """A string representation of the version that can be rounded-tripped.
+
+        >>> str(Version("1.0a5"))
+        '1.0a5'
+        """
         parts = []
 
         # Epoch
@@ -320,29 +264,77 @@ def __str__(self) -> str:
 
     @property
     def epoch(self) -> int:
-        _epoch: int = self._version.epoch
-        return _epoch
+        """The epoch of the version.
+
+        >>> Version("2.0.0").epoch
+        0
+        >>> Version("1!2.0.0").epoch
+        1
+        """
+        return self._version.epoch
 
     @property
     def release(self) -> Tuple[int, ...]:
-        _release: Tuple[int, ...] = self._version.release
-        return _release
+        """The components of the "release" segment of the version.
+
+        >>> Version("1.2.3").release
+        (1, 2, 3)
+        >>> Version("2.0.0").release
+        (2, 0, 0)
+        >>> Version("1!2.0.0.post0").release
+        (2, 0, 0)
+
+        Includes trailing zeroes but not the epoch or any pre-release / development /
+        post-release suffixes.
+        """
+        return self._version.release
 
     @property
     def pre(self) -> Optional[Tuple[str, int]]:
-        _pre: Optional[Tuple[str, int]] = self._version.pre
-        return _pre
+        """The pre-release segment of the version.
+
+        >>> print(Version("1.2.3").pre)
+        None
+        >>> Version("1.2.3a1").pre
+        ('a', 1)
+        >>> Version("1.2.3b1").pre
+        ('b', 1)
+        >>> Version("1.2.3rc1").pre
+        ('rc', 1)
+        """
+        return self._version.pre
 
     @property
     def post(self) -> Optional[int]:
+        """The post-release number of the version.
+
+        >>> print(Version("1.2.3").post)
+        None
+        >>> Version("1.2.3.post1").post
+        1
+        """
         return self._version.post[1] if self._version.post else None
 
     @property
     def dev(self) -> Optional[int]:
+        """The development number of the version.
+
+        >>> print(Version("1.2.3").dev)
+        None
+        >>> Version("1.2.3.dev1").dev
+        1
+        """
         return self._version.dev[1] if self._version.dev else None
 
     @property
     def local(self) -> Optional[str]:
+        """The local version segment of the version.
+
+        >>> print(Version("1.2.3").local)
+        None
+        >>> Version("1.2.3+abc").local
+        'abc'
+        """
         if self._version.local:
             return ".".join(str(x) for x in self._version.local)
         else:
@@ -350,10 +342,31 @@ def local(self) -> Optional[str]:
 
     @property
     def public(self) -> str:
+        """The public portion of the version.
+
+        >>> Version("1.2.3").public
+        '1.2.3'
+        >>> Version("1.2.3+abc").public
+        '1.2.3'
+        >>> Version("1.2.3+abc.dev1").public
+        '1.2.3'
+        """
         return str(self).split("+", 1)[0]
 
     @property
     def base_version(self) -> str:
+        """The "base version" of the version.
+
+        >>> Version("1.2.3").base_version
+        '1.2.3'
+        >>> Version("1.2.3+abc").base_version
+        '1.2.3'
+        >>> Version("1!1.2.3+abc.dev1").base_version
+        '1!1.2.3'
+
+        The "base version" is the public version of the project without any pre or post
+        release markers.
+        """
         parts = []
 
         # Epoch
@@ -367,31 +380,77 @@ def base_version(self) -> str:
 
     @property
     def is_prerelease(self) -> bool:
+        """Whether this version is a pre-release.
+
+        >>> Version("1.2.3").is_prerelease
+        False
+        >>> Version("1.2.3a1").is_prerelease
+        True
+        >>> Version("1.2.3b1").is_prerelease
+        True
+        >>> Version("1.2.3rc1").is_prerelease
+        True
+        >>> Version("1.2.3dev1").is_prerelease
+        True
+        """
         return self.dev is not None or self.pre is not None
 
     @property
     def is_postrelease(self) -> bool:
+        """Whether this version is a post-release.
+
+        >>> Version("1.2.3").is_postrelease
+        False
+        >>> Version("1.2.3.post1").is_postrelease
+        True
+        """
         return self.post is not None
 
     @property
     def is_devrelease(self) -> bool:
+        """Whether this version is a development release.
+
+        >>> Version("1.2.3").is_devrelease
+        False
+        >>> Version("1.2.3.dev1").is_devrelease
+        True
+        """
         return self.dev is not None
 
     @property
     def major(self) -> int:
+        """The first item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").major
+        1
+        """
         return self.release[0] if len(self.release) >= 1 else 0
 
     @property
     def minor(self) -> int:
+        """The second item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").minor
+        2
+        >>> Version("1").minor
+        0
+        """
         return self.release[1] if len(self.release) >= 2 else 0
 
     @property
     def micro(self) -> int:
+        """The third item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").micro
+        3
+        >>> Version("1").micro
+        0
+        """
         return self.release[2] if len(self.release) >= 3 else 0
 
 
 def _parse_letter_version(
-    letter: str, number: Union[str, bytes, SupportsInt]
+    letter: Optional[str], number: Union[str, bytes, SupportsInt, None]
 ) -> Optional[Tuple[str, int]]:
 
     if letter:
@@ -429,7 +488,7 @@ def _parse_letter_version(
 _local_version_separators = re.compile(r"[\._-]")
 
 
-def _parse_local_version(local: str) -> Optional[LocalType]:
+def _parse_local_version(local: Optional[str]) -> Optional[LocalType]:
     """
     Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
     """
@@ -447,7 +506,7 @@ def _cmpkey(
     pre: Optional[Tuple[str, int]],
     post: Optional[Tuple[str, int]],
     dev: Optional[Tuple[str, int]],
-    local: Optional[Tuple[SubLocalType]],
+    local: Optional[LocalType],
 ) -> CmpKey:
 
     # When we compare a release version, we want to compare it with all of the
@@ -464,7 +523,7 @@ def _cmpkey(
     # if there is not a pre or a post segment. If we have one of those then
     # the normal sorting rules will handle this case correctly.
     if pre is None and post is None and dev is not None:
-        _pre: PrePostDevType = NegativeInfinity
+        _pre: CmpPrePostDevType = NegativeInfinity
     # Versions without a pre-release (except as noted above) should sort after
     # those with one.
     elif pre is None:
@@ -474,21 +533,21 @@ def _cmpkey(
 
     # Versions without a post segment should sort before those with one.
     if post is None:
-        _post: PrePostDevType = NegativeInfinity
+        _post: CmpPrePostDevType = NegativeInfinity
 
     else:
         _post = post
 
     # Versions without a development segment should sort after those with one.
     if dev is None:
-        _dev: PrePostDevType = Infinity
+        _dev: CmpPrePostDevType = Infinity
 
     else:
         _dev = dev
 
     if local is None:
         # Versions without a local segment should sort before those with one.
-        _local: LocalType = NegativeInfinity
+        _local: CmpLocalType = NegativeInfinity
     else:
         # Versions with a local segment need that segment parsed to implement
         # the sorting rules in PEP440.
diff --git a/src/pip/_vendor/vendor.txt b/src/pip/_vendor/vendor.txt
index e9694adc037..c892f8d4ad0 100644
--- a/src/pip/_vendor/vendor.txt
+++ b/src/pip/_vendor/vendor.txt
@@ -3,7 +3,7 @@ colorama==0.4.6
 distlib==0.3.8
 distro==1.9.0
 msgpack==1.0.8
-packaging==21.3
+packaging==23.2
 platformdirs==4.2.1
 pyparsing==3.1.2
 pyproject-hooks==1.0.0

From 335f01cbfb16f8f07047f42f8a50687ee354bdaf Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= 
Date: Fri, 29 Sep 2023 22:20:56 +0200
Subject: [PATCH 067/113] Fix test?

---
 tests/unit/metadata/test_metadata_pkg_resources.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/tests/unit/metadata/test_metadata_pkg_resources.py b/tests/unit/metadata/test_metadata_pkg_resources.py
index ab1a56107f4..69efa209fdf 100644
--- a/tests/unit/metadata/test_metadata_pkg_resources.py
+++ b/tests/unit/metadata/test_metadata_pkg_resources.py
@@ -82,7 +82,7 @@ def test_wheel_metadata_works() -> None:
     name = "simple"
     version = "0.1.0"
     require_a = "a==1.0"
-    require_b = 'b==1.1; extra == "also_b"'
+    require_b = 'b==1.1; extra == "also-b"'
     requires = [require_a, require_b, 'c==1.2; extra == "also_c"']
     extras = ["also_b", "also_c"]
     requires_python = ">=3"

From 87bda4b14a799e8b14f8736b6947bac48b0a748a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= 
Date: Fri, 29 Sep 2023 22:48:08 +0200
Subject: [PATCH 068/113] Rename invalid sdists in our test data

---
 ...e-0.1.1.tar.gz => pip_test_package-0.1.1.tar.gz} | Bin
 ...ckage-0.1.tar.gz => pip_test_package-0.1.tar.gz} | Bin
 tests/functional/test_install.py                    |   2 +-
 3 files changed, 1 insertion(+), 1 deletion(-)
 rename tests/data/packages/{pip-test-package-0.1.1.tar.gz => pip_test_package-0.1.1.tar.gz} (100%)
 rename tests/data/packages/{pip-test-package-0.1.tar.gz => pip_test_package-0.1.tar.gz} (100%)

diff --git a/tests/data/packages/pip-test-package-0.1.1.tar.gz b/tests/data/packages/pip_test_package-0.1.1.tar.gz
similarity index 100%
rename from tests/data/packages/pip-test-package-0.1.1.tar.gz
rename to tests/data/packages/pip_test_package-0.1.1.tar.gz
diff --git a/tests/data/packages/pip-test-package-0.1.tar.gz b/tests/data/packages/pip_test_package-0.1.tar.gz
similarity index 100%
rename from tests/data/packages/pip-test-package-0.1.tar.gz
rename to tests/data/packages/pip_test_package-0.1.tar.gz
diff --git a/tests/functional/test_install.py b/tests/functional/test_install.py
index b65212f929c..5ec07e7c456 100644
--- a/tests/functional/test_install.py
+++ b/tests/functional/test_install.py
@@ -391,7 +391,7 @@ def test_install_editable_uninstalls_existing(
     https://github.com/pypa/pip/issues/1548
     https://github.com/pypa/pip/pull/1552
     """
-    to_install = data.packages.joinpath("pip-test-package-0.1.tar.gz")
+    to_install = data.packages.joinpath("pip_test_package-0.1.tar.gz")
     result = script.pip_install_local(to_install)
     assert "Successfully installed pip-test-package" in result.stdout
     result.assert_installed("piptestpackage", editable=False)

From 3b012210a0f98a567b53eea8316778ca56ef2b38 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= 
Date: Fri, 29 Sep 2023 22:52:12 +0200
Subject: [PATCH 069/113] Fix test following stricter version specifiers
 parsing

---
 tests/functional/test_install.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/tests/functional/test_install.py b/tests/functional/test_install.py
index 5ec07e7c456..eaea12a163c 100644
--- a/tests/functional/test_install.py
+++ b/tests/functional/test_install.py
@@ -2253,14 +2253,14 @@ def test_yanked_version_missing_from_availble_versions_error_message(
     """
     result = script.pip(
         "install",
-        "simple==",
+        "simple==0.1",
         "--index-url",
         data.index_url("yanked"),
         expect_error=True,
     )
     # the yanked version (3.0) is filtered out from the output:
     expected_warning = (
-        "Could not find a version that satisfies the requirement simple== "
+        "Could not find a version that satisfies the requirement simple==0.1 "
         "(from versions: 1.0, 2.0)"
     )
     assert expected_warning in result.stderr, str(result)

From 62215ca9efd02f4863ea3b5f806a6f6c2822859d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= 
Date: Fri, 29 Sep 2023 23:14:54 +0200
Subject: [PATCH 070/113] Don't use legacy version numbers in tests

---
 tests/data/packages/README.txt                      |   6 +++---
 ...en-0.2broken.tar.gz => broken-0.2+broken.tar.gz} | Bin
 tests/functional/test_install_upgrade.py            |   2 +-
 3 files changed, 4 insertions(+), 4 deletions(-)
 rename tests/data/packages/{broken-0.2broken.tar.gz => broken-0.2+broken.tar.gz} (100%)

diff --git a/tests/data/packages/README.txt b/tests/data/packages/README.txt
index aa957b337f0..37860a21b9e 100644
--- a/tests/data/packages/README.txt
+++ b/tests/data/packages/README.txt
@@ -6,9 +6,9 @@ broken-0.1.tar.gz
 -----------------
 This package exists for testing uninstall-rollback.
 
-broken-0.2broken.tar.gz
------------------------
-Version 0.2broken has a setup.py crafted to fail on install (and only on
+broken-0.2+broken.tar.gz
+------------------------
+Version 0.2+broken has a setup.py crafted to fail on install (and only on
 install). If any earlier step would fail (i.e. egg-info-generation), the
 already-installed version would never be uninstalled, so uninstall-rollback
 would not come into play.
diff --git a/tests/data/packages/broken-0.2broken.tar.gz b/tests/data/packages/broken-0.2+broken.tar.gz
similarity index 100%
rename from tests/data/packages/broken-0.2broken.tar.gz
rename to tests/data/packages/broken-0.2+broken.tar.gz
diff --git a/tests/functional/test_install_upgrade.py b/tests/functional/test_install_upgrade.py
index 6556fcdf599..00036c1ef05 100644
--- a/tests/functional/test_install_upgrade.py
+++ b/tests/functional/test_install_upgrade.py
@@ -295,7 +295,7 @@ def test_uninstall_rollback(script: PipTestEnvironment, data: TestData) -> None:
         "-f",
         data.find_links,
         "--no-index",
-        "broken===0.2broken",
+        "broken===0.2+broken",
         expect_error=True,
     )
     assert result2.returncode == 1, str(result2)

From 9dcd26935982736d1cadcb1ec36ecc150a1621ac Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= 
Date: Sat, 30 Sep 2023 22:58:13 +0200
Subject: [PATCH 071/113] Remove now redundant error detection

This specifier syntax check is now done by the packaging library.
---
 src/pip/_internal/req/constructors.py | 13 +------------
 1 file changed, 1 insertion(+), 12 deletions(-)

diff --git a/src/pip/_internal/req/constructors.py b/src/pip/_internal/req/constructors.py
index 36f517e599d..b2c9505a9a4 100644
--- a/src/pip/_internal/req/constructors.py
+++ b/src/pip/_internal/req/constructors.py
@@ -359,7 +359,7 @@ def with_source(text: str) -> str:
 
     def _parse_req_string(req_as_string: str) -> Requirement:
         try:
-            req = get_requirement(req_as_string)
+            return get_requirement(req_as_string)
         except InvalidRequirement:
             if os.path.sep in req_as_string:
                 add_msg = "It looks like a path."
@@ -374,17 +374,6 @@ def _parse_req_string(req_as_string: str) -> Requirement:
             if add_msg:
                 msg += f"\nHint: {add_msg}"
             raise InstallationError(msg)
-        else:
-            # Deprecate extras after specifiers: "name>=1.0[extras]"
-            # This currently works by accident because _strip_extras() parses
-            # any extras in the end of the string and those are saved in
-            # RequirementParts
-            for spec in req.specifier:
-                spec_str = str(spec)
-                if spec_str.endswith("]"):
-                    msg = f"Extras after version '{spec_str}'."
-                    raise InstallationError(msg)
-        return req
 
     if req_as_string is not None:
         req: Optional[Requirement] = _parse_req_string(req_as_string)

From 4d70566c687d83cd8e0c0c41040b9beaca41492c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= 
Date: Fri, 29 Sep 2023 22:35:21 +0200
Subject: [PATCH 072/113] Remove various extras normalization workarounds

---
 src/pip/_internal/req/req_install.py               |  8 +-------
 .../_internal/resolution/resolvelib/candidates.py  | 10 +---------
 tests/functional/test_install_extras.py            | 14 ++------------
 3 files changed, 4 insertions(+), 28 deletions(-)

diff --git a/src/pip/_internal/req/req_install.py b/src/pip/_internal/req/req_install.py
index 7d527959e81..213278588d2 100644
--- a/src/pip/_internal/req/req_install.py
+++ b/src/pip/_internal/req/req_install.py
@@ -52,7 +52,6 @@
     redact_auth_from_requirement,
     redact_auth_from_url,
 )
-from pip._internal.utils.packaging import safe_extra
 from pip._internal.utils.subprocess import runner_with_spinner_message
 from pip._internal.utils.temp_dir import TempDirectory, tempdir_kinds
 from pip._internal.utils.unpacking import unpack_file
@@ -284,12 +283,7 @@ def match_markers(self, extras_requested: Optional[Iterable[str]] = None) -> boo
             extras_requested = ("",)
         if self.markers is not None:
             return any(
-                self.markers.evaluate({"extra": extra})
-                # TODO: Remove these two variants when packaging is upgraded to
-                # support the marker comparison logic specified in PEP 685.
-                or self.markers.evaluate({"extra": safe_extra(extra)})
-                or self.markers.evaluate({"extra": canonicalize_name(extra)})
-                for extra in extras_requested
+                self.markers.evaluate({"extra": extra}) for extra in extras_requested
             )
         else:
             return True
diff --git a/src/pip/_internal/resolution/resolvelib/candidates.py b/src/pip/_internal/resolution/resolvelib/candidates.py
index 029b26a3368..bda9147d7f9 100644
--- a/src/pip/_internal/resolution/resolvelib/candidates.py
+++ b/src/pip/_internal/resolution/resolvelib/candidates.py
@@ -438,14 +438,6 @@ def __init__(
         """
         self.base = base
         self.extras = frozenset(canonicalize_name(e) for e in extras)
-        # If any extras are requested in their non-normalized forms, keep track
-        # of their raw values. This is needed when we look up dependencies
-        # since PEP 685 has not been implemented for marker-matching, and using
-        # the non-normalized extra for lookup ensures the user can select a
-        # non-normalized extra in a package with its non-normalized form.
-        # TODO: Remove this attribute when packaging is upgraded to support the
-        # marker comparison logic specified in PEP 685.
-        self._unnormalized_extras = extras.difference(self.extras)
         self._comes_from = comes_from if comes_from is not None else self.base._ireq
 
     def __str__(self) -> str:
@@ -528,7 +520,7 @@ def _calculate_valid_requested_extras(self) -> FrozenSet[str]:
         candidate doesn't support. Any unsupported extras are dropped, and each
         cause a warning to be logged here.
         """
-        requested_extras = self.extras.union(self._unnormalized_extras)
+        requested_extras = self.extras
         valid_extras = frozenset(
             extra
             for extra in requested_extras
diff --git a/tests/functional/test_install_extras.py b/tests/functional/test_install_extras.py
index 1dd67be0a0c..4dd27bb9df3 100644
--- a/tests/functional/test_install_extras.py
+++ b/tests/functional/test_install_extras.py
@@ -152,24 +152,14 @@ def test_install_fails_if_extra_at_end(
         script.scratch_path / "requirements.txt",
         expect_error=True,
     )
-    assert "Extras after version" in result.stderr
+    assert "Invalid requirement: 'requires_simple_extra>=0.1[extra]'" in result.stderr
 
 
 @pytest.mark.parametrize(
     "specified_extra, requested_extra",
     [
         ("Hop_hOp-hoP", "Hop_hOp-hoP"),
-        pytest.param(
-            "Hop_hOp-hoP",
-            "hop-hop-hop",
-            marks=pytest.mark.xfail(
-                reason=(
-                    "matching a normalized extra request against an"
-                    "unnormalized extra in metadata requires PEP 685 support "
-                    "in packaging (see pypa/pip#11445)."
-                ),
-            ),
-        ),
+        ("Hop_hOp-hoP", "hop-hop-hop"),
         ("hop-hop-hop", "Hop_hOp-hoP"),
     ],
 )

From 5463bbadaa61ad205bc01e6cfe238ee282847b64 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= 
Date: Sun, 1 Oct 2023 18:29:03 +0200
Subject: [PATCH 073/113] Fix obvious typo in test wheel generator

---
 tests/lib/wheel.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/tests/lib/wheel.py b/tests/lib/wheel.py
index e63f44e1cad..342abbacaad 100644
--- a/tests/lib/wheel.py
+++ b/tests/lib/wheel.py
@@ -103,7 +103,7 @@ def make_metadata_file(
     if body is not _default:
         message.set_payload(body)
 
-    return File(path, message_from_dict(metadata).as_bytes())
+    return File(path, message.as_bytes())
 
 
 def make_wheel_metadata_file(

From 92cb9c91cc3ad544fc58c3a053f5b5699bb8254b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= 
Date: Sun, 1 Oct 2023 18:48:12 +0200
Subject: [PATCH 074/113] Strip Requires-Dist metadata parsed from METADATA
 files

These can contain a leading \n if the requirement was long and wrapped by
email.message.EmailMessage.as_bytes().
---
 src/pip/_internal/metadata/importlib/_dists.py | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/src/pip/_internal/metadata/importlib/_dists.py b/src/pip/_internal/metadata/importlib/_dists.py
index 9eee474ea5b..5e6a0345621 100644
--- a/src/pip/_internal/metadata/importlib/_dists.py
+++ b/src/pip/_internal/metadata/importlib/_dists.py
@@ -216,7 +216,9 @@ def is_extra_provided(self, extra: str) -> bool:
     def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]:
         contexts: Sequence[Dict[str, str]] = [{"extra": e} for e in extras]
         for req_string in self.metadata.get_all("Requires-Dist", []):
-            req = Requirement(req_string)
+            # strip() because email.message.Message.get_all() may return a leading \n
+            # in case a long header was wrapped.
+            req = Requirement(req_string.strip())
             if not req.marker:
                 yield req
             elif not extras and req.marker.evaluate({"extra": ""}):

From 04ea0da1878c8bec271760b12ec4d7f702e3c3b2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= 
Date: Sun, 1 Oct 2023 19:22:53 +0200
Subject: [PATCH 075/113] Remove vendored pyparsing

---
 news/pyparsing.vendor.rst                     |    2 +-
 src/pip/_vendor/pyparsing/LICENSE             |   18 -
 src/pip/_vendor/pyparsing/__init__.py         |  325 -
 src/pip/_vendor/pyparsing/actions.py          |  206 -
 src/pip/_vendor/pyparsing/common.py           |  439 --
 src/pip/_vendor/pyparsing/core.py             | 6087 -----------------
 src/pip/_vendor/pyparsing/diagram/__init__.py |  656 --
 src/pip/_vendor/pyparsing/exceptions.py       |  300 -
 src/pip/_vendor/pyparsing/helpers.py          | 1078 ---
 src/pip/_vendor/pyparsing/py.typed            |    0
 src/pip/_vendor/pyparsing/results.py          |  797 ---
 src/pip/_vendor/pyparsing/testing.py          |  345 -
 src/pip/_vendor/pyparsing/unicode.py          |  354 -
 src/pip/_vendor/pyparsing/util.py             |  277 -
 src/pip/_vendor/vendor.txt                    |    1 -
 15 files changed, 1 insertion(+), 10884 deletions(-)
 delete mode 100644 src/pip/_vendor/pyparsing/LICENSE
 delete mode 100644 src/pip/_vendor/pyparsing/__init__.py
 delete mode 100644 src/pip/_vendor/pyparsing/actions.py
 delete mode 100644 src/pip/_vendor/pyparsing/common.py
 delete mode 100644 src/pip/_vendor/pyparsing/core.py
 delete mode 100644 src/pip/_vendor/pyparsing/diagram/__init__.py
 delete mode 100644 src/pip/_vendor/pyparsing/exceptions.py
 delete mode 100644 src/pip/_vendor/pyparsing/helpers.py
 delete mode 100644 src/pip/_vendor/pyparsing/py.typed
 delete mode 100644 src/pip/_vendor/pyparsing/results.py
 delete mode 100644 src/pip/_vendor/pyparsing/testing.py
 delete mode 100644 src/pip/_vendor/pyparsing/unicode.py
 delete mode 100644 src/pip/_vendor/pyparsing/util.py

diff --git a/news/pyparsing.vendor.rst b/news/pyparsing.vendor.rst
index d6aab47079f..37a586a5651 100644
--- a/news/pyparsing.vendor.rst
+++ b/news/pyparsing.vendor.rst
@@ -1 +1 @@
-Upgrade pyparsing to 3.1.2
+Remove pyparsing
diff --git a/src/pip/_vendor/pyparsing/LICENSE b/src/pip/_vendor/pyparsing/LICENSE
deleted file mode 100644
index 1bf98523e33..00000000000
--- a/src/pip/_vendor/pyparsing/LICENSE
+++ /dev/null
@@ -1,18 +0,0 @@
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/src/pip/_vendor/pyparsing/__init__.py b/src/pip/_vendor/pyparsing/__init__.py
deleted file mode 100644
index 83937ebf1f1..00000000000
--- a/src/pip/_vendor/pyparsing/__init__.py
+++ /dev/null
@@ -1,325 +0,0 @@
-# module pyparsing.py
-#
-# Copyright (c) 2003-2022  Paul T. McGuire
-#
-# Permission is hereby granted, free of charge, to any person obtaining
-# a copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish,
-# distribute, sublicense, and/or sell copies of the Software, and to
-# permit persons to whom the Software is furnished to do so, subject to
-# the following conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-#
-
-__doc__ = """
-pyparsing module - Classes and methods to define and execute parsing grammars
-=============================================================================
-
-The pyparsing module is an alternative approach to creating and
-executing simple grammars, vs. the traditional lex/yacc approach, or the
-use of regular expressions.  With pyparsing, you don't need to learn
-a new syntax for defining grammars or matching expressions - the parsing
-module provides a library of classes that you use to construct the
-grammar directly in Python.
-
-Here is a program to parse "Hello, World!" (or any greeting of the form
-``", !"``), built up using :class:`Word`,
-:class:`Literal`, and :class:`And` elements
-(the :meth:`'+'` operators create :class:`And` expressions,
-and the strings are auto-converted to :class:`Literal` expressions)::
-
-    from pip._vendor.pyparsing import Word, alphas
-
-    # define grammar of a greeting
-    greet = Word(alphas) + "," + Word(alphas) + "!"
-
-    hello = "Hello, World!"
-    print(hello, "->", greet.parse_string(hello))
-
-The program outputs the following::
-
-    Hello, World! -> ['Hello', ',', 'World', '!']
-
-The Python representation of the grammar is quite readable, owing to the
-self-explanatory class names, and the use of :class:`'+'`,
-:class:`'|'`, :class:`'^'` and :class:`'&'` operators.
-
-The :class:`ParseResults` object returned from
-:class:`ParserElement.parse_string` can be
-accessed as a nested list, a dictionary, or an object with named
-attributes.
-
-The pyparsing module handles some of the problems that are typically
-vexing when writing text parsers:
-
-  - extra or missing whitespace (the above program will also handle
-    "Hello,World!", "Hello  ,  World  !", etc.)
-  - quoted strings
-  - embedded comments
-
-
-Getting Started -
------------------
-Visit the classes :class:`ParserElement` and :class:`ParseResults` to
-see the base classes that most other pyparsing
-classes inherit from. Use the docstrings for examples of how to:
-
- - construct literal match expressions from :class:`Literal` and
-   :class:`CaselessLiteral` classes
- - construct character word-group expressions using the :class:`Word`
-   class
- - see how to create repetitive expressions using :class:`ZeroOrMore`
-   and :class:`OneOrMore` classes
- - use :class:`'+'`, :class:`'|'`, :class:`'^'`,
-   and :class:`'&'` operators to combine simple expressions into
-   more complex ones
- - associate names with your parsed results using
-   :class:`ParserElement.set_results_name`
- - access the parsed data, which is returned as a :class:`ParseResults`
-   object
- - find some helpful expression short-cuts like :class:`DelimitedList`
-   and :class:`one_of`
- - find more useful common expressions in the :class:`pyparsing_common`
-   namespace class
-"""
-from typing import NamedTuple
-
-
-class version_info(NamedTuple):
-    major: int
-    minor: int
-    micro: int
-    releaselevel: str
-    serial: int
-
-    @property
-    def __version__(self):
-        return (
-            f"{self.major}.{self.minor}.{self.micro}"
-            + (
-                f"{'r' if self.releaselevel[0] == 'c' else ''}{self.releaselevel[0]}{self.serial}",
-                "",
-            )[self.releaselevel == "final"]
-        )
-
-    def __str__(self):
-        return f"{__name__} {self.__version__} / {__version_time__}"
-
-    def __repr__(self):
-        return f"{__name__}.{type(self).__name__}({', '.join('{}={!r}'.format(*nv) for nv in zip(self._fields, self))})"
-
-
-__version_info__ = version_info(3, 1, 2, "final", 1)
-__version_time__ = "06 Mar 2024 07:08 UTC"
-__version__ = __version_info__.__version__
-__versionTime__ = __version_time__
-__author__ = "Paul McGuire "
-
-from .util import *
-from .exceptions import *
-from .actions import *
-from .core import __diag__, __compat__
-from .results import *
-from .core import *  # type: ignore[misc, assignment]
-from .core import _builtin_exprs as core_builtin_exprs
-from .helpers import *  # type: ignore[misc, assignment]
-from .helpers import _builtin_exprs as helper_builtin_exprs
-
-from .unicode import unicode_set, UnicodeRangeList, pyparsing_unicode as unicode
-from .testing import pyparsing_test as testing
-from .common import (
-    pyparsing_common as common,
-    _builtin_exprs as common_builtin_exprs,
-)
-
-# define backward compat synonyms
-if "pyparsing_unicode" not in globals():
-    pyparsing_unicode = unicode  # type: ignore[misc]
-if "pyparsing_common" not in globals():
-    pyparsing_common = common  # type: ignore[misc]
-if "pyparsing_test" not in globals():
-    pyparsing_test = testing  # type: ignore[misc]
-
-core_builtin_exprs += common_builtin_exprs + helper_builtin_exprs
-
-
-__all__ = [
-    "__version__",
-    "__version_time__",
-    "__author__",
-    "__compat__",
-    "__diag__",
-    "And",
-    "AtLineStart",
-    "AtStringStart",
-    "CaselessKeyword",
-    "CaselessLiteral",
-    "CharsNotIn",
-    "CloseMatch",
-    "Combine",
-    "DelimitedList",
-    "Dict",
-    "Each",
-    "Empty",
-    "FollowedBy",
-    "Forward",
-    "GoToColumn",
-    "Group",
-    "IndentedBlock",
-    "Keyword",
-    "LineEnd",
-    "LineStart",
-    "Literal",
-    "Located",
-    "PrecededBy",
-    "MatchFirst",
-    "NoMatch",
-    "NotAny",
-    "OneOrMore",
-    "OnlyOnce",
-    "OpAssoc",
-    "Opt",
-    "Optional",
-    "Or",
-    "ParseBaseException",
-    "ParseElementEnhance",
-    "ParseException",
-    "ParseExpression",
-    "ParseFatalException",
-    "ParseResults",
-    "ParseSyntaxException",
-    "ParserElement",
-    "PositionToken",
-    "QuotedString",
-    "RecursiveGrammarException",
-    "Regex",
-    "SkipTo",
-    "StringEnd",
-    "StringStart",
-    "Suppress",
-    "Token",
-    "TokenConverter",
-    "White",
-    "Word",
-    "WordEnd",
-    "WordStart",
-    "ZeroOrMore",
-    "Char",
-    "alphanums",
-    "alphas",
-    "alphas8bit",
-    "any_close_tag",
-    "any_open_tag",
-    "autoname_elements",
-    "c_style_comment",
-    "col",
-    "common_html_entity",
-    "condition_as_parse_action",
-    "counted_array",
-    "cpp_style_comment",
-    "dbl_quoted_string",
-    "dbl_slash_comment",
-    "delimited_list",
-    "dict_of",
-    "empty",
-    "hexnums",
-    "html_comment",
-    "identchars",
-    "identbodychars",
-    "infix_notation",
-    "java_style_comment",
-    "line",
-    "line_end",
-    "line_start",
-    "lineno",
-    "make_html_tags",
-    "make_xml_tags",
-    "match_only_at_col",
-    "match_previous_expr",
-    "match_previous_literal",
-    "nested_expr",
-    "null_debug_action",
-    "nums",
-    "one_of",
-    "original_text_for",
-    "printables",
-    "punc8bit",
-    "pyparsing_common",
-    "pyparsing_test",
-    "pyparsing_unicode",
-    "python_style_comment",
-    "quoted_string",
-    "remove_quotes",
-    "replace_with",
-    "replace_html_entity",
-    "rest_of_line",
-    "sgl_quoted_string",
-    "srange",
-    "string_end",
-    "string_start",
-    "token_map",
-    "trace_parse_action",
-    "ungroup",
-    "unicode_set",
-    "unicode_string",
-    "with_attribute",
-    "with_class",
-    # pre-PEP8 compatibility names
-    "__versionTime__",
-    "anyCloseTag",
-    "anyOpenTag",
-    "cStyleComment",
-    "commonHTMLEntity",
-    "conditionAsParseAction",
-    "countedArray",
-    "cppStyleComment",
-    "dblQuotedString",
-    "dblSlashComment",
-    "delimitedList",
-    "dictOf",
-    "htmlComment",
-    "indentedBlock",
-    "infixNotation",
-    "javaStyleComment",
-    "lineEnd",
-    "lineStart",
-    "locatedExpr",
-    "makeHTMLTags",
-    "makeXMLTags",
-    "matchOnlyAtCol",
-    "matchPreviousExpr",
-    "matchPreviousLiteral",
-    "nestedExpr",
-    "nullDebugAction",
-    "oneOf",
-    "opAssoc",
-    "originalTextFor",
-    "pythonStyleComment",
-    "quotedString",
-    "removeQuotes",
-    "replaceHTMLEntity",
-    "replaceWith",
-    "restOfLine",
-    "sglQuotedString",
-    "stringEnd",
-    "stringStart",
-    "tokenMap",
-    "traceParseAction",
-    "unicodeString",
-    "withAttribute",
-    "withClass",
-    "common",
-    "unicode",
-    "testing",
-]
diff --git a/src/pip/_vendor/pyparsing/actions.py b/src/pip/_vendor/pyparsing/actions.py
deleted file mode 100644
index ce51b3957ca..00000000000
--- a/src/pip/_vendor/pyparsing/actions.py
+++ /dev/null
@@ -1,206 +0,0 @@
-# actions.py
-
-from .exceptions import ParseException
-from .util import col, replaced_by_pep8
-
-
-class OnlyOnce:
-    """
-    Wrapper for parse actions, to ensure they are only called once.
-    """
-
-    def __init__(self, method_call):
-        from .core import _trim_arity
-
-        self.callable = _trim_arity(method_call)
-        self.called = False
-
-    def __call__(self, s, l, t):
-        if not self.called:
-            results = self.callable(s, l, t)
-            self.called = True
-            return results
-        raise ParseException(s, l, "OnlyOnce obj called multiple times w/out reset")
-
-    def reset(self):
-        """
-        Allow the associated parse action to be called once more.
-        """
-
-        self.called = False
-
-
-def match_only_at_col(n):
-    """
-    Helper method for defining parse actions that require matching at
-    a specific column in the input text.
-    """
-
-    def verify_col(strg, locn, toks):
-        if col(locn, strg) != n:
-            raise ParseException(strg, locn, f"matched token not at column {n}")
-
-    return verify_col
-
-
-def replace_with(repl_str):
-    """
-    Helper method for common parse actions that simply return
-    a literal value.  Especially useful when used with
-    :class:`transform_string` ().
-
-    Example::
-
-        num = Word(nums).set_parse_action(lambda toks: int(toks[0]))
-        na = one_of("N/A NA").set_parse_action(replace_with(math.nan))
-        term = na | num
-
-        term[1, ...].parse_string("324 234 N/A 234") # -> [324, 234, nan, 234]
-    """
-    return lambda s, l, t: [repl_str]
-
-
-def remove_quotes(s, l, t):
-    """
-    Helper parse action for removing quotation marks from parsed
-    quoted strings.
-
-    Example::
-
-        # by default, quotation marks are included in parsed results
-        quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
-
-        # use remove_quotes to strip quotation marks from parsed results
-        quoted_string.set_parse_action(remove_quotes)
-        quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
-    """
-    return t[0][1:-1]
-
-
-def with_attribute(*args, **attr_dict):
-    """
-    Helper to create a validating parse action to be used with start
-    tags created with :class:`make_xml_tags` or
-    :class:`make_html_tags`. Use ``with_attribute`` to qualify
-    a starting tag with a required attribute value, to avoid false
-    matches on common tags such as ```` or ``
``. - - Call ``with_attribute`` with a series of attribute names and - values. Specify the list of filter attributes names and values as: - - - keyword arguments, as in ``(align="right")``, or - - as an explicit dict with ``**`` operator, when an attribute - name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}`` - - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))`` - - For attribute names with a namespace prefix, you must use the second - form. Attribute names are matched insensitive to upper/lower case. - - If just testing for ``class`` (with or without a namespace), use - :class:`with_class`. - - To verify that the attribute exists, but without specifying a value, - pass ``with_attribute.ANY_VALUE`` as the value. - - Example:: - - html = ''' -
- Some text -
1 4 0 1 0
-
1,3 2,3 1,1
-
this has no type
-
- ''' - div,div_end = make_html_tags("div") - - # only match div tag having a type attribute with value "grid" - div_grid = div().set_parse_action(with_attribute(type="grid")) - grid_expr = div_grid + SkipTo(div | div_end)("body") - for grid_header in grid_expr.search_string(html): - print(grid_header.body) - - # construct a match with any div tag having a type attribute, regardless of the value - div_any_type = div().set_parse_action(with_attribute(type=with_attribute.ANY_VALUE)) - div_expr = div_any_type + SkipTo(div | div_end)("body") - for div_header in div_expr.search_string(html): - print(div_header.body) - - prints:: - - 1 4 0 1 0 - - 1 4 0 1 0 - 1,3 2,3 1,1 - """ - if args: - attrs = args[:] - else: - attrs = attr_dict.items() - attrs = [(k, v) for k, v in attrs] - - def pa(s, l, tokens): - for attrName, attrValue in attrs: - if attrName not in tokens: - raise ParseException(s, l, "no matching attribute " + attrName) - if attrValue != with_attribute.ANY_VALUE and tokens[attrName] != attrValue: - raise ParseException( - s, - l, - f"attribute {attrName!r} has value {tokens[attrName]!r}, must be {attrValue!r}", - ) - - return pa - - -with_attribute.ANY_VALUE = object() # type: ignore [attr-defined] - - -def with_class(classname, namespace=""): - """ - Simplified version of :class:`with_attribute` when - matching on a div class - made difficult because ``class`` is - a reserved word in Python. - - Example:: - - html = ''' -
- Some text -
1 4 0 1 0
-
1,3 2,3 1,1
-
this <div> has no class
-
- - ''' - div,div_end = make_html_tags("div") - div_grid = div().set_parse_action(with_class("grid")) - - grid_expr = div_grid + SkipTo(div | div_end)("body") - for grid_header in grid_expr.search_string(html): - print(grid_header.body) - - div_any_type = div().set_parse_action(with_class(withAttribute.ANY_VALUE)) - div_expr = div_any_type + SkipTo(div | div_end)("body") - for div_header in div_expr.search_string(html): - print(div_header.body) - - prints:: - - 1 4 0 1 0 - - 1 4 0 1 0 - 1,3 2,3 1,1 - """ - classattr = f"{namespace}:class" if namespace else "class" - return with_attribute(**{classattr: classname}) - - -# pre-PEP8 compatibility symbols -# fmt: off -replaceWith = replaced_by_pep8("replaceWith", replace_with) -removeQuotes = replaced_by_pep8("removeQuotes", remove_quotes) -withAttribute = replaced_by_pep8("withAttribute", with_attribute) -withClass = replaced_by_pep8("withClass", with_class) -matchOnlyAtCol = replaced_by_pep8("matchOnlyAtCol", match_only_at_col) -# fmt: on diff --git a/src/pip/_vendor/pyparsing/common.py b/src/pip/_vendor/pyparsing/common.py deleted file mode 100644 index 74faa46085a..00000000000 --- a/src/pip/_vendor/pyparsing/common.py +++ /dev/null @@ -1,439 +0,0 @@ -# common.py -from .core import * -from .helpers import DelimitedList, any_open_tag, any_close_tag -from datetime import datetime - - -# some other useful expressions - using lower-case class name since we are really using this as a namespace -class pyparsing_common: - """Here are some common low-level expressions that may be useful in - jump-starting parser development: - - - numeric forms (:class:`integers`, :class:`reals`, - :class:`scientific notation`) - - common :class:`programming identifiers` - - network addresses (:class:`MAC`, - :class:`IPv4`, :class:`IPv6`) - - ISO8601 :class:`dates` and - :class:`datetime` - - :class:`UUID` - - :class:`comma-separated list` - - :class:`url` - - Parse actions: - - - :class:`convert_to_integer` - - :class:`convert_to_float` - - :class:`convert_to_date` - - :class:`convert_to_datetime` - - :class:`strip_html_tags` - - :class:`upcase_tokens` - - :class:`downcase_tokens` - - Example:: - - pyparsing_common.number.run_tests(''' - # any int or real number, returned as the appropriate type - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - ''') - - pyparsing_common.fnumber.run_tests(''' - # any int or real number, returned as float - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - ''') - - pyparsing_common.hex_integer.run_tests(''' - # hex numbers - 100 - FF - ''') - - pyparsing_common.fraction.run_tests(''' - # fractions - 1/2 - -3/4 - ''') - - pyparsing_common.mixed_integer.run_tests(''' - # mixed fractions - 1 - 1/2 - -3/4 - 1-3/4 - ''') - - import uuid - pyparsing_common.uuid.set_parse_action(token_map(uuid.UUID)) - pyparsing_common.uuid.run_tests(''' - # uuid - 12345678-1234-5678-1234-567812345678 - ''') - - prints:: - - # any int or real number, returned as the appropriate type - 100 - [100] - - -100 - [-100] - - +100 - [100] - - 3.14159 - [3.14159] - - 6.02e23 - [6.02e+23] - - 1e-12 - [1e-12] - - # any int or real number, returned as float - 100 - [100.0] - - -100 - [-100.0] - - +100 - [100.0] - - 3.14159 - [3.14159] - - 6.02e23 - [6.02e+23] - - 1e-12 - [1e-12] - - # hex numbers - 100 - [256] - - FF - [255] - - # fractions - 1/2 - [0.5] - - -3/4 - [-0.75] - - # mixed fractions - 1 - [1] - - 1/2 - [0.5] - - -3/4 - [-0.75] - - 1-3/4 - [1.75] - - # uuid - 12345678-1234-5678-1234-567812345678 - [UUID('12345678-1234-5678-1234-567812345678')] - """ - - convert_to_integer = token_map(int) - """ - Parse action for converting parsed integers to Python int - """ - - convert_to_float = token_map(float) - """ - Parse action for converting parsed numbers to Python float - """ - - integer = Word(nums).set_name("integer").set_parse_action(convert_to_integer) - """expression that parses an unsigned integer, returns an int""" - - hex_integer = ( - Word(hexnums).set_name("hex integer").set_parse_action(token_map(int, 16)) - ) - """expression that parses a hexadecimal integer, returns an int""" - - signed_integer = ( - Regex(r"[+-]?\d+") - .set_name("signed integer") - .set_parse_action(convert_to_integer) - ) - """expression that parses an integer with optional leading sign, returns an int""" - - fraction = ( - signed_integer().set_parse_action(convert_to_float) - + "/" - + signed_integer().set_parse_action(convert_to_float) - ).set_name("fraction") - """fractional expression of an integer divided by an integer, returns a float""" - fraction.add_parse_action(lambda tt: tt[0] / tt[-1]) - - mixed_integer = ( - fraction | signed_integer + Opt(Opt("-").suppress() + fraction) - ).set_name("fraction or mixed integer-fraction") - """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" - mixed_integer.add_parse_action(sum) - - real = ( - Regex(r"[+-]?(?:\d+\.\d*|\.\d+)") - .set_name("real number") - .set_parse_action(convert_to_float) - ) - """expression that parses a floating point number and returns a float""" - - sci_real = ( - Regex(r"[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)") - .set_name("real number with scientific notation") - .set_parse_action(convert_to_float) - ) - """expression that parses a floating point number with optional - scientific notation and returns a float""" - - # streamlining this expression makes the docs nicer-looking - number = (sci_real | real | signed_integer).set_name("number").streamline() - """any numeric expression, returns the corresponding Python type""" - - fnumber = ( - Regex(r"[+-]?\d+\.?\d*([eE][+-]?\d+)?") - .set_name("fnumber") - .set_parse_action(convert_to_float) - ) - """any int or real number, returned as float""" - - ieee_float = ( - Regex(r"(?i)[+-]?((\d+\.?\d*(e[+-]?\d+)?)|nan|inf(inity)?)") - .set_name("ieee_float") - .set_parse_action(convert_to_float) - ) - """any floating-point literal (int, real number, infinity, or NaN), returned as float""" - - identifier = Word(identchars, identbodychars).set_name("identifier") - """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" - - ipv4_address = Regex( - r"(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}" - ).set_name("IPv4 address") - "IPv4 address (``0.0.0.0 - 255.255.255.255``)" - - _ipv6_part = Regex(r"[0-9a-fA-F]{1,4}").set_name("hex_integer") - _full_ipv6_address = (_ipv6_part + (":" + _ipv6_part) * 7).set_name( - "full IPv6 address" - ) - _short_ipv6_address = ( - Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6)) - + "::" - + Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6)) - ).set_name("short IPv6 address") - _short_ipv6_address.add_condition( - lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8 - ) - _mixed_ipv6_address = ("::ffff:" + ipv4_address).set_name("mixed IPv6 address") - ipv6_address = Combine( - (_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).set_name( - "IPv6 address" - ) - ).set_name("IPv6 address") - "IPv6 address (long, short, or mixed form)" - - mac_address = Regex( - r"[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}" - ).set_name("MAC address") - "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" - - @staticmethod - def convert_to_date(fmt: str = "%Y-%m-%d"): - """ - Helper to create a parse action for converting parsed date string to Python datetime.date - - Params - - - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``) - - Example:: - - date_expr = pyparsing_common.iso8601_date.copy() - date_expr.set_parse_action(pyparsing_common.convert_to_date()) - print(date_expr.parse_string("1999-12-31")) - - prints:: - - [datetime.date(1999, 12, 31)] - """ - - def cvt_fn(ss, ll, tt): - try: - return datetime.strptime(tt[0], fmt).date() - except ValueError as ve: - raise ParseException(ss, ll, str(ve)) - - return cvt_fn - - @staticmethod - def convert_to_datetime(fmt: str = "%Y-%m-%dT%H:%M:%S.%f"): - """Helper to create a parse action for converting parsed - datetime string to Python datetime.datetime - - Params - - - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``) - - Example:: - - dt_expr = pyparsing_common.iso8601_datetime.copy() - dt_expr.set_parse_action(pyparsing_common.convert_to_datetime()) - print(dt_expr.parse_string("1999-12-31T23:59:59.999")) - - prints:: - - [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] - """ - - def cvt_fn(s, l, t): - try: - return datetime.strptime(t[0], fmt) - except ValueError as ve: - raise ParseException(s, l, str(ve)) - - return cvt_fn - - iso8601_date = Regex( - r"(?P\d{4})(?:-(?P\d\d)(?:-(?P\d\d))?)?" - ).set_name("ISO8601 date") - "ISO8601 date (``yyyy-mm-dd``)" - - iso8601_datetime = Regex( - r"(?P\d{4})-(?P\d\d)-(?P\d\d)[T ](?P\d\d):(?P\d\d)(:(?P\d\d(\.\d*)?)?)?(?PZ|[+-]\d\d:?\d\d)?" - ).set_name("ISO8601 datetime") - "ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``" - - uuid = Regex(r"[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}").set_name("UUID") - "UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)" - - _html_stripper = any_open_tag.suppress() | any_close_tag.suppress() - - @staticmethod - def strip_html_tags(s: str, l: int, tokens: ParseResults): - """Parse action to remove HTML tags from web page HTML source - - Example:: - - # strip HTML links from normal text - text = 'More info at the pyparsing wiki page' - td, td_end = make_html_tags("TD") - table_text = td + SkipTo(td_end).set_parse_action(pyparsing_common.strip_html_tags)("body") + td_end - print(table_text.parse_string(text).body) - - Prints:: - - More info at the pyparsing wiki page - """ - return pyparsing_common._html_stripper.transform_string(tokens[0]) - - _commasepitem = ( - Combine( - OneOrMore( - ~Literal(",") - + ~LineEnd() - + Word(printables, exclude_chars=",") - + Opt(White(" \t") + ~FollowedBy(LineEnd() | ",")) - ) - ) - .streamline() - .set_name("commaItem") - ) - comma_separated_list = DelimitedList( - Opt(quoted_string.copy() | _commasepitem, default="") - ).set_name("comma separated list") - """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" - - upcase_tokens = staticmethod(token_map(lambda t: t.upper())) - """Parse action to convert tokens to upper case.""" - - downcase_tokens = staticmethod(token_map(lambda t: t.lower())) - """Parse action to convert tokens to lower case.""" - - # fmt: off - url = Regex( - # https://mathiasbynens.be/demo/url-regex - # https://gist.github.com/dperini/729294 - r"(?P" + - # protocol identifier (optional) - # short syntax // still required - r"(?:(?:(?Phttps?|ftp):)?\/\/)" + - # user:pass BasicAuth (optional) - r"(?:(?P\S+(?::\S*)?)@)?" + - r"(?P" + - # IP address exclusion - # private & local networks - r"(?!(?:10|127)(?:\.\d{1,3}){3})" + - r"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})" + - r"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})" + - # IP address dotted notation octets - # excludes loopback network 0.0.0.0 - # excludes reserved space >= 224.0.0.0 - # excludes network & broadcast addresses - # (first & last IP address of each class) - r"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])" + - r"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}" + - r"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))" + - r"|" + - # host & domain names, may end with dot - # can be replaced by a shortest alternative - # (?![-_])(?:[-\w\u00a1-\uffff]{0,63}[^-_]\.)+ - r"(?:" + - r"(?:" + - r"[a-z0-9\u00a1-\uffff]" + - r"[a-z0-9\u00a1-\uffff_-]{0,62}" + - r")?" + - r"[a-z0-9\u00a1-\uffff]\." + - r")+" + - # TLD identifier name, may end with dot - r"(?:[a-z\u00a1-\uffff]{2,}\.?)" + - r")" + - # port number (optional) - r"(:(?P\d{2,5}))?" + - # resource path (optional) - r"(?P\/[^?# ]*)?" + - # query string (optional) - r"(\?(?P[^#]*))?" + - # fragment (optional) - r"(#(?P\S*))?" + - r")" - ).set_name("url") - """URL (http/https/ftp scheme)""" - # fmt: on - - # pre-PEP8 compatibility names - convertToInteger = convert_to_integer - """Deprecated - use :class:`convert_to_integer`""" - convertToFloat = convert_to_float - """Deprecated - use :class:`convert_to_float`""" - convertToDate = convert_to_date - """Deprecated - use :class:`convert_to_date`""" - convertToDatetime = convert_to_datetime - """Deprecated - use :class:`convert_to_datetime`""" - stripHTMLTags = strip_html_tags - """Deprecated - use :class:`strip_html_tags`""" - upcaseTokens = upcase_tokens - """Deprecated - use :class:`upcase_tokens`""" - downcaseTokens = downcase_tokens - """Deprecated - use :class:`downcase_tokens`""" - - -_builtin_exprs = [ - v for v in vars(pyparsing_common).values() if isinstance(v, ParserElement) -] diff --git a/src/pip/_vendor/pyparsing/core.py b/src/pip/_vendor/pyparsing/core.py deleted file mode 100644 index fc403862354..00000000000 --- a/src/pip/_vendor/pyparsing/core.py +++ /dev/null @@ -1,6087 +0,0 @@ -# -# core.py -# - -from collections import deque -import os -import typing -from typing import ( - Any, - Callable, - Generator, - List, - NamedTuple, - Sequence, - Set, - TextIO, - Tuple, - Union, - cast, -) -from abc import ABC, abstractmethod -from enum import Enum -import string -import copy -import warnings -import re -import sys -from collections.abc import Iterable -import traceback -import types -from operator import itemgetter -from functools import wraps -from threading import RLock -from pathlib import Path - -from .util import ( - _FifoCache, - _UnboundedCache, - __config_flags, - _collapse_string_to_ranges, - _escape_regex_range_chars, - _bslash, - _flatten, - LRUMemo as _LRUMemo, - UnboundedMemo as _UnboundedMemo, - replaced_by_pep8, -) -from .exceptions import * -from .actions import * -from .results import ParseResults, _ParseResultsWithOffset -from .unicode import pyparsing_unicode - -_MAX_INT = sys.maxsize -str_type: Tuple[type, ...] = (str, bytes) - -# -# Copyright (c) 2003-2022 Paul T. McGuire -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -# - - -if sys.version_info >= (3, 8): - from functools import cached_property -else: - - class cached_property: - def __init__(self, func): - self._func = func - - def __get__(self, instance, owner=None): - ret = instance.__dict__[self._func.__name__] = self._func(instance) - return ret - - -class __compat__(__config_flags): - """ - A cross-version compatibility configuration for pyparsing features that will be - released in a future version. By setting values in this configuration to True, - those features can be enabled in prior versions for compatibility development - and testing. - - - ``collect_all_And_tokens`` - flag to enable fix for Issue #63 that fixes erroneous grouping - of results names when an :class:`And` expression is nested within an :class:`Or` or :class:`MatchFirst`; - maintained for compatibility, but setting to ``False`` no longer restores pre-2.3.1 - behavior - """ - - _type_desc = "compatibility" - - collect_all_And_tokens = True - - _all_names = [__ for __ in locals() if not __.startswith("_")] - _fixed_names = """ - collect_all_And_tokens - """.split() - - -class __diag__(__config_flags): - _type_desc = "diagnostic" - - warn_multiple_tokens_in_named_alternation = False - warn_ungrouped_named_tokens_in_collection = False - warn_name_set_on_empty_Forward = False - warn_on_parse_using_empty_Forward = False - warn_on_assignment_to_Forward = False - warn_on_multiple_string_args_to_oneof = False - warn_on_match_first_with_lshift_operator = False - enable_debug_on_named_expressions = False - - _all_names = [__ for __ in locals() if not __.startswith("_")] - _warning_names = [name for name in _all_names if name.startswith("warn")] - _debug_names = [name for name in _all_names if name.startswith("enable_debug")] - - @classmethod - def enable_all_warnings(cls) -> None: - for name in cls._warning_names: - cls.enable(name) - - -class Diagnostics(Enum): - """ - Diagnostic configuration (all default to disabled) - - - ``warn_multiple_tokens_in_named_alternation`` - flag to enable warnings when a results - name is defined on a :class:`MatchFirst` or :class:`Or` expression with one or more :class:`And` subexpressions - - ``warn_ungrouped_named_tokens_in_collection`` - flag to enable warnings when a results - name is defined on a containing expression with ungrouped subexpressions that also - have results names - - ``warn_name_set_on_empty_Forward`` - flag to enable warnings when a :class:`Forward` is defined - with a results name, but has no contents defined - - ``warn_on_parse_using_empty_Forward`` - flag to enable warnings when a :class:`Forward` is - defined in a grammar but has never had an expression attached to it - - ``warn_on_assignment_to_Forward`` - flag to enable warnings when a :class:`Forward` is defined - but is overwritten by assigning using ``'='`` instead of ``'<<='`` or ``'<<'`` - - ``warn_on_multiple_string_args_to_oneof`` - flag to enable warnings when :class:`one_of` is - incorrectly called with multiple str arguments - - ``enable_debug_on_named_expressions`` - flag to auto-enable debug on all subsequent - calls to :class:`ParserElement.set_name` - - Diagnostics are enabled/disabled by calling :class:`enable_diag` and :class:`disable_diag`. - All warnings can be enabled by calling :class:`enable_all_warnings`. - """ - - warn_multiple_tokens_in_named_alternation = 0 - warn_ungrouped_named_tokens_in_collection = 1 - warn_name_set_on_empty_Forward = 2 - warn_on_parse_using_empty_Forward = 3 - warn_on_assignment_to_Forward = 4 - warn_on_multiple_string_args_to_oneof = 5 - warn_on_match_first_with_lshift_operator = 6 - enable_debug_on_named_expressions = 7 - - -def enable_diag(diag_enum: Diagnostics) -> None: - """ - Enable a global pyparsing diagnostic flag (see :class:`Diagnostics`). - """ - __diag__.enable(diag_enum.name) - - -def disable_diag(diag_enum: Diagnostics) -> None: - """ - Disable a global pyparsing diagnostic flag (see :class:`Diagnostics`). - """ - __diag__.disable(diag_enum.name) - - -def enable_all_warnings() -> None: - """ - Enable all global pyparsing diagnostic warnings (see :class:`Diagnostics`). - """ - __diag__.enable_all_warnings() - - -# hide abstract class -del __config_flags - - -def _should_enable_warnings( - cmd_line_warn_options: typing.Iterable[str], warn_env_var: typing.Optional[str] -) -> bool: - enable = bool(warn_env_var) - for warn_opt in cmd_line_warn_options: - w_action, w_message, w_category, w_module, w_line = (warn_opt + "::::").split( - ":" - )[:5] - if not w_action.lower().startswith("i") and ( - not (w_message or w_category or w_module) or w_module == "pyparsing" - ): - enable = True - elif w_action.lower().startswith("i") and w_module in ("pyparsing", ""): - enable = False - return enable - - -if _should_enable_warnings( - sys.warnoptions, os.environ.get("PYPARSINGENABLEALLWARNINGS") -): - enable_all_warnings() - - -# build list of single arg builtins, that can be used as parse actions -_single_arg_builtins = { - sum, - len, - sorted, - reversed, - list, - tuple, - set, - any, - all, - min, - max, -} - -_generatorType = types.GeneratorType -ParseImplReturnType = Tuple[int, Any] -PostParseReturnType = Union[ParseResults, Sequence[ParseResults]] -ParseAction = Union[ - Callable[[], Any], - Callable[[ParseResults], Any], - Callable[[int, ParseResults], Any], - Callable[[str, int, ParseResults], Any], -] -ParseCondition = Union[ - Callable[[], bool], - Callable[[ParseResults], bool], - Callable[[int, ParseResults], bool], - Callable[[str, int, ParseResults], bool], -] -ParseFailAction = Callable[[str, int, "ParserElement", Exception], None] -DebugStartAction = Callable[[str, int, "ParserElement", bool], None] -DebugSuccessAction = Callable[ - [str, int, int, "ParserElement", ParseResults, bool], None -] -DebugExceptionAction = Callable[[str, int, "ParserElement", Exception, bool], None] - - -alphas = string.ascii_uppercase + string.ascii_lowercase -identchars = pyparsing_unicode.Latin1.identchars -identbodychars = pyparsing_unicode.Latin1.identbodychars -nums = "0123456789" -hexnums = nums + "ABCDEFabcdef" -alphanums = alphas + nums -printables = "".join([c for c in string.printable if c not in string.whitespace]) - -_trim_arity_call_line: traceback.StackSummary = None # type: ignore[assignment] - - -def _trim_arity(func, max_limit=3): - """decorator to trim function calls to match the arity of the target""" - global _trim_arity_call_line - - if func in _single_arg_builtins: - return lambda s, l, t: func(t) - - limit = 0 - found_arity = False - - # synthesize what would be returned by traceback.extract_stack at the call to - # user's parse action 'func', so that we don't incur call penalty at parse time - - # fmt: off - LINE_DIFF = 7 - # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND - # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!! - _trim_arity_call_line = (_trim_arity_call_line or traceback.extract_stack(limit=2)[-1]) - pa_call_line_synth = (_trim_arity_call_line[0], _trim_arity_call_line[1] + LINE_DIFF) - - def wrapper(*args): - nonlocal found_arity, limit - while 1: - try: - ret = func(*args[limit:]) - found_arity = True - return ret - except TypeError as te: - # re-raise TypeErrors if they did not come from our arity testing - if found_arity: - raise - else: - tb = te.__traceback__ - frames = traceback.extract_tb(tb, limit=2) - frame_summary = frames[-1] - trim_arity_type_error = ( - [frame_summary[:2]][-1][:2] == pa_call_line_synth - ) - del tb - - if trim_arity_type_error: - if limit < max_limit: - limit += 1 - continue - - raise - # fmt: on - - # copy func name to wrapper for sensible debug output - # (can't use functools.wraps, since that messes with function signature) - func_name = getattr(func, "__name__", getattr(func, "__class__").__name__) - wrapper.__name__ = func_name - wrapper.__doc__ = func.__doc__ - - return wrapper - - -def condition_as_parse_action( - fn: ParseCondition, message: typing.Optional[str] = None, fatal: bool = False -) -> ParseAction: - """ - Function to convert a simple predicate function that returns ``True`` or ``False`` - into a parse action. Can be used in places when a parse action is required - and :class:`ParserElement.add_condition` cannot be used (such as when adding a condition - to an operator level in :class:`infix_notation`). - - Optional keyword arguments: - - - ``message`` - define a custom message to be used in the raised exception - - ``fatal`` - if True, will raise :class:`ParseFatalException` to stop parsing immediately; - otherwise will raise :class:`ParseException` - - """ - msg = message if message is not None else "failed user-defined condition" - exc_type = ParseFatalException if fatal else ParseException - fn = _trim_arity(fn) - - @wraps(fn) - def pa(s, l, t): - if not bool(fn(s, l, t)): - raise exc_type(s, l, msg) - - return pa - - -def _default_start_debug_action( - instring: str, loc: int, expr: "ParserElement", cache_hit: bool = False -): - cache_hit_str = "*" if cache_hit else "" - print( - ( - f"{cache_hit_str}Match {expr} at loc {loc}({lineno(loc, instring)},{col(loc, instring)})\n" - f" {line(loc, instring)}\n" - f" {' ' * (col(loc, instring) - 1)}^" - ) - ) - - -def _default_success_debug_action( - instring: str, - startloc: int, - endloc: int, - expr: "ParserElement", - toks: ParseResults, - cache_hit: bool = False, -): - cache_hit_str = "*" if cache_hit else "" - print(f"{cache_hit_str}Matched {expr} -> {toks.as_list()}") - - -def _default_exception_debug_action( - instring: str, - loc: int, - expr: "ParserElement", - exc: Exception, - cache_hit: bool = False, -): - cache_hit_str = "*" if cache_hit else "" - print(f"{cache_hit_str}Match {expr} failed, {type(exc).__name__} raised: {exc}") - - -def null_debug_action(*args): - """'Do-nothing' debug action, to suppress debugging output during parsing.""" - - -class ParserElement(ABC): - """Abstract base level parser element class.""" - - DEFAULT_WHITE_CHARS: str = " \n\t\r" - verbose_stacktrace: bool = False - _literalStringClass: type = None # type: ignore[assignment] - - @staticmethod - def set_default_whitespace_chars(chars: str) -> None: - r""" - Overrides the default whitespace chars - - Example:: - - # default whitespace chars are space, and newline - Word(alphas)[1, ...].parse_string("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] - - # change to just treat newline as significant - ParserElement.set_default_whitespace_chars(" \t") - Word(alphas)[1, ...].parse_string("abc def\nghi jkl") # -> ['abc', 'def'] - """ - ParserElement.DEFAULT_WHITE_CHARS = chars - - # update whitespace all parse expressions defined in this module - for expr in _builtin_exprs: - if expr.copyDefaultWhiteChars: - expr.whiteChars = set(chars) - - @staticmethod - def inline_literals_using(cls: type) -> None: - """ - Set class to be used for inclusion of string literals into a parser. - - Example:: - - # default literal class used is Literal - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - date_str.parse_string("1999/12/31") # -> ['1999', '/', '12', '/', '31'] - - - # change to Suppress - ParserElement.inline_literals_using(Suppress) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - date_str.parse_string("1999/12/31") # -> ['1999', '12', '31'] - """ - ParserElement._literalStringClass = cls - - @classmethod - def using_each(cls, seq, **class_kwargs): - """ - Yields a sequence of class(obj, **class_kwargs) for obj in seq. - - Example:: - - LPAR, RPAR, LBRACE, RBRACE, SEMI = Suppress.using_each("(){};") - - """ - yield from (cls(obj, **class_kwargs) for obj in seq) - - class DebugActions(NamedTuple): - debug_try: typing.Optional[DebugStartAction] - debug_match: typing.Optional[DebugSuccessAction] - debug_fail: typing.Optional[DebugExceptionAction] - - def __init__(self, savelist: bool = False): - self.parseAction: List[ParseAction] = list() - self.failAction: typing.Optional[ParseFailAction] = None - self.customName: str = None # type: ignore[assignment] - self._defaultName: typing.Optional[str] = None - self.resultsName: str = None # type: ignore[assignment] - self.saveAsList = savelist - self.skipWhitespace = True - self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS) - self.copyDefaultWhiteChars = True - # used when checking for left-recursion - self.mayReturnEmpty = False - self.keepTabs = False - self.ignoreExprs: List["ParserElement"] = list() - self.debug = False - self.streamlined = False - # optimize exception handling for subclasses that don't advance parse index - self.mayIndexError = True - self.errmsg = "" - # mark results names as modal (report only last) or cumulative (list all) - self.modalResults = True - # custom debug actions - self.debugActions = self.DebugActions(None, None, None) - # avoid redundant calls to preParse - self.callPreparse = True - self.callDuringTry = False - self.suppress_warnings_: List[Diagnostics] = [] - - def suppress_warning(self, warning_type: Diagnostics) -> "ParserElement": - """ - Suppress warnings emitted for a particular diagnostic on this expression. - - Example:: - - base = pp.Forward() - base.suppress_warning(Diagnostics.warn_on_parse_using_empty_Forward) - - # statement would normally raise a warning, but is now suppressed - print(base.parse_string("x")) - - """ - self.suppress_warnings_.append(warning_type) - return self - - def visit_all(self): - """General-purpose method to yield all expressions and sub-expressions - in a grammar. Typically just for internal use. - """ - to_visit = deque([self]) - seen = set() - while to_visit: - cur = to_visit.popleft() - - # guard against looping forever through recursive grammars - if cur in seen: - continue - seen.add(cur) - - to_visit.extend(cur.recurse()) - yield cur - - def copy(self) -> "ParserElement": - """ - Make a copy of this :class:`ParserElement`. Useful for defining - different parse actions for the same parsing pattern, using copies of - the original parse element. - - Example:: - - integer = Word(nums).set_parse_action(lambda toks: int(toks[0])) - integerK = integer.copy().add_parse_action(lambda toks: toks[0] * 1024) + Suppress("K") - integerM = integer.copy().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") - - print((integerK | integerM | integer)[1, ...].parse_string("5K 100 640K 256M")) - - prints:: - - [5120, 100, 655360, 268435456] - - Equivalent form of ``expr.copy()`` is just ``expr()``:: - - integerM = integer().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") - """ - cpy = copy.copy(self) - cpy.parseAction = self.parseAction[:] - cpy.ignoreExprs = self.ignoreExprs[:] - if self.copyDefaultWhiteChars: - cpy.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS) - return cpy - - def set_results_name( - self, name: str, list_all_matches: bool = False, *, listAllMatches: bool = False - ) -> "ParserElement": - """ - Define name for referencing matching tokens as a nested attribute - of the returned parse results. - - Normally, results names are assigned as you would assign keys in a dict: - any existing value is overwritten by later values. If it is necessary to - keep all values captured for a particular results name, call ``set_results_name`` - with ``list_all_matches`` = True. - - NOTE: ``set_results_name`` returns a *copy* of the original :class:`ParserElement` object; - this is so that the client can define a basic element, such as an - integer, and reference it in multiple places with different names. - - You can also set results names using the abbreviated syntax, - ``expr("name")`` in place of ``expr.set_results_name("name")`` - - see :class:`__call__`. If ``list_all_matches`` is required, use - ``expr("name*")``. - - Example:: - - integer = Word(nums) - date_str = (integer.set_results_name("year") + '/' - + integer.set_results_name("month") + '/' - + integer.set_results_name("day")) - - # equivalent form: - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - """ - listAllMatches = listAllMatches or list_all_matches - return self._setResultsName(name, listAllMatches) - - def _setResultsName(self, name, listAllMatches=False): - if name is None: - return self - newself = self.copy() - if name.endswith("*"): - name = name[:-1] - listAllMatches = True - newself.resultsName = name - newself.modalResults = not listAllMatches - return newself - - def set_break(self, break_flag: bool = True) -> "ParserElement": - """ - Method to invoke the Python pdb debugger when this element is - about to be parsed. Set ``break_flag`` to ``True`` to enable, ``False`` to - disable. - """ - if break_flag: - _parseMethod = self._parse - - def breaker(instring, loc, doActions=True, callPreParse=True): - import pdb - - # this call to pdb.set_trace() is intentional, not a checkin error - pdb.set_trace() - return _parseMethod(instring, loc, doActions, callPreParse) - - breaker._originalParseMethod = _parseMethod # type: ignore [attr-defined] - self._parse = breaker # type: ignore [assignment] - elif hasattr(self._parse, "_originalParseMethod"): - self._parse = self._parse._originalParseMethod # type: ignore [attr-defined, assignment] - return self - - def set_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement": - """ - Define one or more actions to perform when successfully matching parse element definition. - - Parse actions can be called to perform data conversions, do extra validation, - update external data structures, or enhance or replace the parsed tokens. - Each parse action ``fn`` is a callable method with 0-3 arguments, called as - ``fn(s, loc, toks)`` , ``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where: - - - ``s`` = the original string being parsed (see note below) - - ``loc`` = the location of the matching substring - - ``toks`` = a list of the matched tokens, packaged as a :class:`ParseResults` object - - The parsed tokens are passed to the parse action as ParseResults. They can be - modified in place using list-style append, extend, and pop operations to update - the parsed list elements; and with dictionary-style item set and del operations - to add, update, or remove any named results. If the tokens are modified in place, - it is not necessary to return them with a return statement. - - Parse actions can also completely replace the given tokens, with another ``ParseResults`` - object, or with some entirely different object (common for parse actions that perform data - conversions). A convenient way to build a new parse result is to define the values - using a dict, and then create the return value using :class:`ParseResults.from_dict`. - - If None is passed as the ``fn`` parse action, all previously added parse actions for this - expression are cleared. - - Optional keyword arguments: - - - ``call_during_try`` = (default= ``False``) indicate if parse action should be run during - lookaheads and alternate testing. For parse actions that have side effects, it is - important to only call the parse action once it is determined that it is being - called as part of a successful parse. For parse actions that perform additional - validation, then call_during_try should be passed as True, so that the validation - code is included in the preliminary "try" parses. - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See :class:`parse_string` for more - information on parsing strings containing ```` s, and suggested - methods to maintain a consistent view of the parsed string, the parse - location, and line and column positions within the parsed string. - - Example:: - - # parse dates in the form YYYY/MM/DD - - # use parse action to convert toks from str to int at parse time - def convert_to_int(toks): - return int(toks[0]) - - # use a parse action to verify that the date is a valid date - def is_valid_date(instring, loc, toks): - from datetime import date - year, month, day = toks[::2] - try: - date(year, month, day) - except ValueError: - raise ParseException(instring, loc, "invalid date given") - - integer = Word(nums) - date_str = integer + '/' + integer + '/' + integer - - # add parse actions - integer.set_parse_action(convert_to_int) - date_str.set_parse_action(is_valid_date) - - # note that integer fields are now ints, not strings - date_str.run_tests(''' - # successful parse - note that integer fields were converted to ints - 1999/12/31 - - # fail - invalid date - 1999/13/31 - ''') - """ - if list(fns) == [None]: - self.parseAction = [] - return self - - if not all(callable(fn) for fn in fns): - raise TypeError("parse actions must be callable") - self.parseAction = [_trim_arity(fn) for fn in fns] - self.callDuringTry = kwargs.get( - "call_during_try", kwargs.get("callDuringTry", False) - ) - - return self - - def add_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement": - """ - Add one or more parse actions to expression's list of parse actions. See :class:`set_parse_action`. - - See examples in :class:`copy`. - """ - self.parseAction += [_trim_arity(fn) for fn in fns] - self.callDuringTry = self.callDuringTry or kwargs.get( - "call_during_try", kwargs.get("callDuringTry", False) - ) - return self - - def add_condition(self, *fns: ParseCondition, **kwargs) -> "ParserElement": - """Add a boolean predicate function to expression's list of parse actions. See - :class:`set_parse_action` for function call signatures. Unlike ``set_parse_action``, - functions passed to ``add_condition`` need to return boolean success/fail of the condition. - - Optional keyword arguments: - - - ``message`` = define a custom message to be used in the raised exception - - ``fatal`` = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise - ParseException - - ``call_during_try`` = boolean to indicate if this method should be called during internal tryParse calls, - default=False - - Example:: - - integer = Word(nums).set_parse_action(lambda toks: int(toks[0])) - year_int = integer.copy() - year_int.add_condition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later") - date_str = year_int + '/' + integer + '/' + integer - - result = date_str.parse_string("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), - (line:1, col:1) - """ - for fn in fns: - self.parseAction.append( - condition_as_parse_action( - fn, - message=str(kwargs.get("message")), - fatal=bool(kwargs.get("fatal", False)), - ) - ) - - self.callDuringTry = self.callDuringTry or kwargs.get( - "call_during_try", kwargs.get("callDuringTry", False) - ) - return self - - def set_fail_action(self, fn: ParseFailAction) -> "ParserElement": - """ - Define action to perform if parsing fails at this expression. - Fail acton fn is a callable function that takes the arguments - ``fn(s, loc, expr, err)`` where: - - - ``s`` = string being parsed - - ``loc`` = location where expression match was attempted and failed - - ``expr`` = the parse expression that failed - - ``err`` = the exception thrown - - The function returns no value. It may throw :class:`ParseFatalException` - if it is desired to stop parsing immediately.""" - self.failAction = fn - return self - - def _skipIgnorables(self, instring: str, loc: int) -> int: - if not self.ignoreExprs: - return loc - exprsFound = True - ignore_expr_fns = [e._parse for e in self.ignoreExprs] - last_loc = loc - while exprsFound: - exprsFound = False - for ignore_fn in ignore_expr_fns: - try: - while 1: - loc, dummy = ignore_fn(instring, loc) - exprsFound = True - except ParseException: - pass - # check if all ignore exprs matched but didn't actually advance the parse location - if loc == last_loc: - break - last_loc = loc - return loc - - def preParse(self, instring: str, loc: int) -> int: - if self.ignoreExprs: - loc = self._skipIgnorables(instring, loc) - - if self.skipWhitespace: - instrlen = len(instring) - white_chars = self.whiteChars - while loc < instrlen and instring[loc] in white_chars: - loc += 1 - - return loc - - def parseImpl(self, instring, loc, doActions=True): - return loc, [] - - def postParse(self, instring, loc, tokenlist): - return tokenlist - - # @profile - def _parseNoCache( - self, instring, loc, doActions=True, callPreParse=True - ) -> Tuple[int, ParseResults]: - TRY, MATCH, FAIL = 0, 1, 2 - debugging = self.debug # and doActions) - len_instring = len(instring) - - if debugging or self.failAction: - # print("Match {} at loc {}({}, {})".format(self, loc, lineno(loc, instring), col(loc, instring))) - try: - if callPreParse and self.callPreparse: - pre_loc = self.preParse(instring, loc) - else: - pre_loc = loc - tokens_start = pre_loc - if self.debugActions.debug_try: - self.debugActions.debug_try(instring, tokens_start, self, False) - if self.mayIndexError or pre_loc >= len_instring: - try: - loc, tokens = self.parseImpl(instring, pre_loc, doActions) - except IndexError: - raise ParseException(instring, len_instring, self.errmsg, self) - else: - loc, tokens = self.parseImpl(instring, pre_loc, doActions) - except Exception as err: - # print("Exception raised:", err) - if self.debugActions.debug_fail: - self.debugActions.debug_fail( - instring, tokens_start, self, err, False - ) - if self.failAction: - self.failAction(instring, tokens_start, self, err) - raise - else: - if callPreParse and self.callPreparse: - pre_loc = self.preParse(instring, loc) - else: - pre_loc = loc - tokens_start = pre_loc - if self.mayIndexError or pre_loc >= len_instring: - try: - loc, tokens = self.parseImpl(instring, pre_loc, doActions) - except IndexError: - raise ParseException(instring, len_instring, self.errmsg, self) - else: - loc, tokens = self.parseImpl(instring, pre_loc, doActions) - - tokens = self.postParse(instring, loc, tokens) - - ret_tokens = ParseResults( - tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults - ) - if self.parseAction and (doActions or self.callDuringTry): - if debugging: - try: - for fn in self.parseAction: - try: - tokens = fn(instring, tokens_start, ret_tokens) # type: ignore [call-arg, arg-type] - except IndexError as parse_action_exc: - exc = ParseException("exception raised in parse action") - raise exc from parse_action_exc - - if tokens is not None and tokens is not ret_tokens: - ret_tokens = ParseResults( - tokens, - self.resultsName, - asList=self.saveAsList - and isinstance(tokens, (ParseResults, list)), - modal=self.modalResults, - ) - except Exception as err: - # print "Exception raised in user parse action:", err - if self.debugActions.debug_fail: - self.debugActions.debug_fail( - instring, tokens_start, self, err, False - ) - raise - else: - for fn in self.parseAction: - try: - tokens = fn(instring, tokens_start, ret_tokens) # type: ignore [call-arg, arg-type] - except IndexError as parse_action_exc: - exc = ParseException("exception raised in parse action") - raise exc from parse_action_exc - - if tokens is not None and tokens is not ret_tokens: - ret_tokens = ParseResults( - tokens, - self.resultsName, - asList=self.saveAsList - and isinstance(tokens, (ParseResults, list)), - modal=self.modalResults, - ) - if debugging: - # print("Matched", self, "->", ret_tokens.as_list()) - if self.debugActions.debug_match: - self.debugActions.debug_match( - instring, tokens_start, loc, self, ret_tokens, False - ) - - return loc, ret_tokens - - def try_parse( - self, - instring: str, - loc: int, - *, - raise_fatal: bool = False, - do_actions: bool = False, - ) -> int: - try: - return self._parse(instring, loc, doActions=do_actions)[0] - except ParseFatalException: - if raise_fatal: - raise - raise ParseException(instring, loc, self.errmsg, self) - - def can_parse_next(self, instring: str, loc: int, do_actions: bool = False) -> bool: - try: - self.try_parse(instring, loc, do_actions=do_actions) - except (ParseException, IndexError): - return False - else: - return True - - # cache for left-recursion in Forward references - recursion_lock = RLock() - recursion_memos: typing.Dict[ - Tuple[int, "Forward", bool], Tuple[int, Union[ParseResults, Exception]] - ] = {} - - class _CacheType(dict): - """ - class to help type checking - """ - - not_in_cache: bool - - def get(self, *args): ... - - def set(self, *args): ... - - # argument cache for optimizing repeated calls when backtracking through recursive expressions - packrat_cache = ( - _CacheType() - ) # set later by enable_packrat(); this is here so that reset_cache() doesn't fail - packrat_cache_lock = RLock() - packrat_cache_stats = [0, 0] - - # this method gets repeatedly called during backtracking with the same arguments - - # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression - def _parseCache( - self, instring, loc, doActions=True, callPreParse=True - ) -> Tuple[int, ParseResults]: - HIT, MISS = 0, 1 - TRY, MATCH, FAIL = 0, 1, 2 - lookup = (self, instring, loc, callPreParse, doActions) - with ParserElement.packrat_cache_lock: - cache = ParserElement.packrat_cache - value = cache.get(lookup) - if value is cache.not_in_cache: - ParserElement.packrat_cache_stats[MISS] += 1 - try: - value = self._parseNoCache(instring, loc, doActions, callPreParse) - except ParseBaseException as pe: - # cache a copy of the exception, without the traceback - cache.set(lookup, pe.__class__(*pe.args)) - raise - else: - cache.set(lookup, (value[0], value[1].copy(), loc)) - return value - else: - ParserElement.packrat_cache_stats[HIT] += 1 - if self.debug and self.debugActions.debug_try: - try: - self.debugActions.debug_try(instring, loc, self, cache_hit=True) # type: ignore [call-arg] - except TypeError: - pass - if isinstance(value, Exception): - if self.debug and self.debugActions.debug_fail: - try: - self.debugActions.debug_fail( - instring, loc, self, value, cache_hit=True # type: ignore [call-arg] - ) - except TypeError: - pass - raise value - - value = cast(Tuple[int, ParseResults, int], value) - loc_, result, endloc = value[0], value[1].copy(), value[2] - if self.debug and self.debugActions.debug_match: - try: - self.debugActions.debug_match( - instring, loc_, endloc, self, result, cache_hit=True # type: ignore [call-arg] - ) - except TypeError: - pass - - return loc_, result - - _parse = _parseNoCache - - @staticmethod - def reset_cache() -> None: - ParserElement.packrat_cache.clear() - ParserElement.packrat_cache_stats[:] = [0] * len( - ParserElement.packrat_cache_stats - ) - ParserElement.recursion_memos.clear() - - _packratEnabled = False - _left_recursion_enabled = False - - @staticmethod - def disable_memoization() -> None: - """ - Disables active Packrat or Left Recursion parsing and their memoization - - This method also works if neither Packrat nor Left Recursion are enabled. - This makes it safe to call before activating Packrat nor Left Recursion - to clear any previous settings. - """ - ParserElement.reset_cache() - ParserElement._left_recursion_enabled = False - ParserElement._packratEnabled = False - ParserElement._parse = ParserElement._parseNoCache - - @staticmethod - def enable_left_recursion( - cache_size_limit: typing.Optional[int] = None, *, force=False - ) -> None: - """ - Enables "bounded recursion" parsing, which allows for both direct and indirect - left-recursion. During parsing, left-recursive :class:`Forward` elements are - repeatedly matched with a fixed recursion depth that is gradually increased - until finding the longest match. - - Example:: - - from pip._vendor import pyparsing as pp - pp.ParserElement.enable_left_recursion() - - E = pp.Forward("E") - num = pp.Word(pp.nums) - # match `num`, or `num '+' num`, or `num '+' num '+' num`, ... - E <<= E + '+' - num | num - - print(E.parse_string("1+2+3")) - - Recursion search naturally memoizes matches of ``Forward`` elements and may - thus skip reevaluation of parse actions during backtracking. This may break - programs with parse actions which rely on strict ordering of side-effects. - - Parameters: - - - ``cache_size_limit`` - (default=``None``) - memoize at most this many - ``Forward`` elements during matching; if ``None`` (the default), - memoize all ``Forward`` elements. - - Bounded Recursion parsing works similar but not identical to Packrat parsing, - thus the two cannot be used together. Use ``force=True`` to disable any - previous, conflicting settings. - """ - if force: - ParserElement.disable_memoization() - elif ParserElement._packratEnabled: - raise RuntimeError("Packrat and Bounded Recursion are not compatible") - if cache_size_limit is None: - ParserElement.recursion_memos = _UnboundedMemo() # type: ignore[assignment] - elif cache_size_limit > 0: - ParserElement.recursion_memos = _LRUMemo(capacity=cache_size_limit) # type: ignore[assignment] - else: - raise NotImplementedError(f"Memo size of {cache_size_limit}") - ParserElement._left_recursion_enabled = True - - @staticmethod - def enable_packrat( - cache_size_limit: Union[int, None] = 128, *, force: bool = False - ) -> None: - """ - Enables "packrat" parsing, which adds memoizing to the parsing logic. - Repeated parse attempts at the same string location (which happens - often in many complex grammars) can immediately return a cached value, - instead of re-executing parsing/validating code. Memoizing is done of - both valid results and parsing exceptions. - - Parameters: - - - ``cache_size_limit`` - (default= ``128``) - if an integer value is provided - will limit the size of the packrat cache; if None is passed, then - the cache size will be unbounded; if 0 is passed, the cache will - be effectively disabled. - - This speedup may break existing programs that use parse actions that - have side-effects. For this reason, packrat parsing is disabled when - you first import pyparsing. To activate the packrat feature, your - program must call the class method :class:`ParserElement.enable_packrat`. - For best results, call ``enable_packrat()`` immediately after - importing pyparsing. - - Example:: - - from pip._vendor import pyparsing - pyparsing.ParserElement.enable_packrat() - - Packrat parsing works similar but not identical to Bounded Recursion parsing, - thus the two cannot be used together. Use ``force=True`` to disable any - previous, conflicting settings. - """ - if force: - ParserElement.disable_memoization() - elif ParserElement._left_recursion_enabled: - raise RuntimeError("Packrat and Bounded Recursion are not compatible") - - if ParserElement._packratEnabled: - return - - ParserElement._packratEnabled = True - if cache_size_limit is None: - ParserElement.packrat_cache = _UnboundedCache() - else: - ParserElement.packrat_cache = _FifoCache(cache_size_limit) # type: ignore[assignment] - ParserElement._parse = ParserElement._parseCache - - def parse_string( - self, instring: str, parse_all: bool = False, *, parseAll: bool = False - ) -> ParseResults: - """ - Parse a string with respect to the parser definition. This function is intended as the primary interface to the - client code. - - :param instring: The input string to be parsed. - :param parse_all: If set, the entire input string must match the grammar. - :param parseAll: retained for pre-PEP8 compatibility, will be removed in a future release. - :raises ParseException: Raised if ``parse_all`` is set and the input string does not match the whole grammar. - :returns: the parsed data as a :class:`ParseResults` object, which may be accessed as a `list`, a `dict`, or - an object with attributes if the given parser includes results names. - - If the input string is required to match the entire grammar, ``parse_all`` flag must be set to ``True``. This - is also equivalent to ending the grammar with :class:`StringEnd`\\ (). - - To report proper column numbers, ``parse_string`` operates on a copy of the input string where all tabs are - converted to spaces (8 spaces per tab, as per the default in ``string.expandtabs``). If the input string - contains tabs and the grammar uses parse actions that use the ``loc`` argument to index into the string - being parsed, one can ensure a consistent view of the input string by doing one of the following: - - - calling ``parse_with_tabs`` on your grammar before calling ``parse_string`` (see :class:`parse_with_tabs`), - - define your parse action using the full ``(s,loc,toks)`` signature, and reference the input string using the - parse action's ``s`` argument, or - - explicitly expand the tabs in your input string before calling ``parse_string``. - - Examples: - - By default, partial matches are OK. - - >>> res = Word('a').parse_string('aaaaabaaa') - >>> print(res) - ['aaaaa'] - - The parsing behavior varies by the inheriting class of this abstract class. Please refer to the children - directly to see more examples. - - It raises an exception if parse_all flag is set and instring does not match the whole grammar. - - >>> res = Word('a').parse_string('aaaaabaaa', parse_all=True) - Traceback (most recent call last): - ... - pyparsing.ParseException: Expected end of text, found 'b' (at char 5), (line:1, col:6) - """ - parseAll = parse_all or parseAll - - ParserElement.reset_cache() - if not self.streamlined: - self.streamline() - for e in self.ignoreExprs: - e.streamline() - if not self.keepTabs: - instring = instring.expandtabs() - try: - loc, tokens = self._parse(instring, 0) - if parseAll: - loc = self.preParse(instring, loc) - se = Empty() + StringEnd() - se._parse(instring, loc) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clearing out pyparsing internal stack trace - raise exc.with_traceback(None) - else: - return tokens - - def scan_string( - self, - instring: str, - max_matches: int = _MAX_INT, - overlap: bool = False, - *, - debug: bool = False, - maxMatches: int = _MAX_INT, - ) -> Generator[Tuple[ParseResults, int, int], None, None]: - """ - Scan the input string for expression matches. Each match will return the - matching tokens, start location, and end location. May be called with optional - ``max_matches`` argument, to clip scanning after 'n' matches are found. If - ``overlap`` is specified, then overlapping matches will be reported. - - Note that the start and end locations are reported relative to the string - being parsed. See :class:`parse_string` for more information on parsing - strings with embedded tabs. - - Example:: - - source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" - print(source) - for tokens, start, end in Word(alphas).scan_string(source): - print(' '*start + '^'*(end-start)) - print(' '*start + tokens[0]) - - prints:: - - sldjf123lsdjjkf345sldkjf879lkjsfd987 - ^^^^^ - sldjf - ^^^^^^^ - lsdjjkf - ^^^^^^ - sldkjf - ^^^^^^ - lkjsfd - """ - maxMatches = min(maxMatches, max_matches) - if not self.streamlined: - self.streamline() - for e in self.ignoreExprs: - e.streamline() - - if not self.keepTabs: - instring = str(instring).expandtabs() - instrlen = len(instring) - loc = 0 - preparseFn = self.preParse - parseFn = self._parse - ParserElement.resetCache() - matches = 0 - try: - while loc <= instrlen and matches < maxMatches: - try: - preloc: int = preparseFn(instring, loc) - nextLoc: int - tokens: ParseResults - nextLoc, tokens = parseFn(instring, preloc, callPreParse=False) - except ParseException: - loc = preloc + 1 - else: - if nextLoc > loc: - matches += 1 - if debug: - print( - { - "tokens": tokens.asList(), - "start": preloc, - "end": nextLoc, - } - ) - yield tokens, preloc, nextLoc - if overlap: - nextloc = preparseFn(instring, loc) - if nextloc > loc: - loc = nextLoc - else: - loc += 1 - else: - loc = nextLoc - else: - loc = preloc + 1 - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc.with_traceback(None) - - def transform_string(self, instring: str, *, debug: bool = False) -> str: - """ - Extension to :class:`scan_string`, to modify matching text with modified tokens that may - be returned from a parse action. To use ``transform_string``, define a grammar and - attach a parse action to it that modifies the returned token list. - Invoking ``transform_string()`` on a target string will then scan for matches, - and replace the matched text patterns according to the logic in the parse - action. ``transform_string()`` returns the resulting transformed string. - - Example:: - - wd = Word(alphas) - wd.set_parse_action(lambda toks: toks[0].title()) - - print(wd.transform_string("now is the winter of our discontent made glorious summer by this sun of york.")) - - prints:: - - Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York. - """ - out: List[str] = [] - lastE = 0 - # force preservation of s, to minimize unwanted transformation of string, and to - # keep string locs straight between transform_string and scan_string - self.keepTabs = True - try: - for t, s, e in self.scan_string(instring, debug=debug): - out.append(instring[lastE:s]) - lastE = e - - if not t: - continue - - if isinstance(t, ParseResults): - out += t.as_list() - elif isinstance(t, Iterable) and not isinstance(t, str_type): - out.extend(t) - else: - out.append(t) - - out.append(instring[lastE:]) - out = [o for o in out if o] - return "".join([str(s) for s in _flatten(out)]) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc.with_traceback(None) - - def search_string( - self, - instring: str, - max_matches: int = _MAX_INT, - *, - debug: bool = False, - maxMatches: int = _MAX_INT, - ) -> ParseResults: - """ - Another extension to :class:`scan_string`, simplifying the access to the tokens found - to match the given parse expression. May be called with optional - ``max_matches`` argument, to clip searching after 'n' matches are found. - - Example:: - - # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters - cap_word = Word(alphas.upper(), alphas.lower()) - - print(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity")) - - # the sum() builtin can be used to merge results into a single ParseResults object - print(sum(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity"))) - - prints:: - - [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']] - ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity'] - """ - maxMatches = min(maxMatches, max_matches) - try: - return ParseResults( - [t for t, s, e in self.scan_string(instring, maxMatches, debug=debug)] - ) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc.with_traceback(None) - - def split( - self, - instring: str, - maxsplit: int = _MAX_INT, - include_separators: bool = False, - *, - includeSeparators=False, - ) -> Generator[str, None, None]: - """ - Generator method to split a string using the given expression as a separator. - May be called with optional ``maxsplit`` argument, to limit the number of splits; - and the optional ``include_separators`` argument (default= ``False``), if the separating - matching text should be included in the split results. - - Example:: - - punc = one_of(list(".,;:/-!?")) - print(list(punc.split("This, this?, this sentence, is badly punctuated!"))) - - prints:: - - ['This', ' this', '', ' this sentence', ' is badly punctuated', ''] - """ - includeSeparators = includeSeparators or include_separators - last = 0 - for t, s, e in self.scan_string(instring, max_matches=maxsplit): - yield instring[last:s] - if includeSeparators: - yield t[0] - last = e - yield instring[last:] - - def __add__(self, other) -> "ParserElement": - """ - Implementation of ``+`` operator - returns :class:`And`. Adding strings to a :class:`ParserElement` - converts them to :class:`Literal`\\ s by default. - - Example:: - - greet = Word(alphas) + "," + Word(alphas) + "!" - hello = "Hello, World!" - print(hello, "->", greet.parse_string(hello)) - - prints:: - - Hello, World! -> ['Hello', ',', 'World', '!'] - - ``...`` may be used as a parse expression as a short form of :class:`SkipTo`:: - - Literal('start') + ... + Literal('end') - - is equivalent to:: - - Literal('start') + SkipTo('end')("_skipped*") + Literal('end') - - Note that the skipped text is returned with '_skipped' as a results name, - and to support having multiple skips in the same parser, the value returned is - a list of all skipped text. - """ - if other is Ellipsis: - return _PendingSkip(self) - - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - return NotImplemented - return And([self, other]) - - def __radd__(self, other) -> "ParserElement": - """ - Implementation of ``+`` operator when left operand is not a :class:`ParserElement` - """ - if other is Ellipsis: - return SkipTo(self)("_skipped*") + self - - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - return NotImplemented - return other + self - - def __sub__(self, other) -> "ParserElement": - """ - Implementation of ``-`` operator, returns :class:`And` with error stop - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - return NotImplemented - return self + And._ErrorStop() + other - - def __rsub__(self, other) -> "ParserElement": - """ - Implementation of ``-`` operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - return NotImplemented - return other - self - - def __mul__(self, other) -> "ParserElement": - """ - Implementation of ``*`` operator, allows use of ``expr * 3`` in place of - ``expr + expr + expr``. Expressions may also be multiplied by a 2-integer - tuple, similar to ``{min, max}`` multipliers in regular expressions. Tuples - may also include ``None`` as in: - - - ``expr*(n, None)`` or ``expr*(n, )`` is equivalent - to ``expr*n + ZeroOrMore(expr)`` - (read as "at least n instances of ``expr``") - - ``expr*(None, n)`` is equivalent to ``expr*(0, n)`` - (read as "0 to n instances of ``expr``") - - ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)`` - - ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)`` - - Note that ``expr*(None, n)`` does not raise an exception if - more than n exprs exist in the input stream; that is, - ``expr*(None, n)`` does not enforce a maximum number of expr - occurrences. If this behavior is desired, then write - ``expr*(None, n) + ~expr`` - """ - if other is Ellipsis: - other = (0, None) - elif isinstance(other, tuple) and other[:1] == (Ellipsis,): - other = ((0,) + other[1:] + (None,))[:2] - - if not isinstance(other, (int, tuple)): - return NotImplemented - - if isinstance(other, int): - minElements, optElements = other, 0 - else: - other = tuple(o if o is not Ellipsis else None for o in other) - other = (other + (None, None))[:2] - if other[0] is None: - other = (0, other[1]) - if isinstance(other[0], int) and other[1] is None: - if other[0] == 0: - return ZeroOrMore(self) - if other[0] == 1: - return OneOrMore(self) - else: - return self * other[0] + ZeroOrMore(self) - elif isinstance(other[0], int) and isinstance(other[1], int): - minElements, optElements = other - optElements -= minElements - else: - return NotImplemented - - if minElements < 0: - raise ValueError("cannot multiply ParserElement by negative value") - if optElements < 0: - raise ValueError( - "second tuple value must be greater or equal to first tuple value" - ) - if minElements == optElements == 0: - return And([]) - - if optElements: - - def makeOptionalList(n): - if n > 1: - return Opt(self + makeOptionalList(n - 1)) - else: - return Opt(self) - - if minElements: - if minElements == 1: - ret = self + makeOptionalList(optElements) - else: - ret = And([self] * minElements) + makeOptionalList(optElements) - else: - ret = makeOptionalList(optElements) - else: - if minElements == 1: - ret = self - else: - ret = And([self] * minElements) - return ret - - def __rmul__(self, other) -> "ParserElement": - return self.__mul__(other) - - def __or__(self, other) -> "ParserElement": - """ - Implementation of ``|`` operator - returns :class:`MatchFirst` - """ - if other is Ellipsis: - return _PendingSkip(self, must_skip=True) - - if isinstance(other, str_type): - # `expr | ""` is equivalent to `Opt(expr)` - if other == "": - return Opt(self) - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - return NotImplemented - return MatchFirst([self, other]) - - def __ror__(self, other) -> "ParserElement": - """ - Implementation of ``|`` operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - return NotImplemented - return other | self - - def __xor__(self, other) -> "ParserElement": - """ - Implementation of ``^`` operator - returns :class:`Or` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - return NotImplemented - return Or([self, other]) - - def __rxor__(self, other) -> "ParserElement": - """ - Implementation of ``^`` operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - return NotImplemented - return other ^ self - - def __and__(self, other) -> "ParserElement": - """ - Implementation of ``&`` operator - returns :class:`Each` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - return NotImplemented - return Each([self, other]) - - def __rand__(self, other) -> "ParserElement": - """ - Implementation of ``&`` operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - return NotImplemented - return other & self - - def __invert__(self) -> "ParserElement": - """ - Implementation of ``~`` operator - returns :class:`NotAny` - """ - return NotAny(self) - - # disable __iter__ to override legacy use of sequential access to __getitem__ to - # iterate over a sequence - __iter__ = None - - def __getitem__(self, key): - """ - use ``[]`` indexing notation as a short form for expression repetition: - - - ``expr[n]`` is equivalent to ``expr*n`` - - ``expr[m, n]`` is equivalent to ``expr*(m, n)`` - - ``expr[n, ...]`` or ``expr[n,]`` is equivalent - to ``expr*n + ZeroOrMore(expr)`` - (read as "at least n instances of ``expr``") - - ``expr[..., n]`` is equivalent to ``expr*(0, n)`` - (read as "0 to n instances of ``expr``") - - ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)`` - - ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)`` - - ``None`` may be used in place of ``...``. - - Note that ``expr[..., n]`` and ``expr[m, n]`` do not raise an exception - if more than ``n`` ``expr``\\ s exist in the input stream. If this behavior is - desired, then write ``expr[..., n] + ~expr``. - - For repetition with a stop_on expression, use slice notation: - - - ``expr[...: end_expr]`` and ``expr[0, ...: end_expr]`` are equivalent to ``ZeroOrMore(expr, stop_on=end_expr)`` - - ``expr[1, ...: end_expr]`` is equivalent to ``OneOrMore(expr, stop_on=end_expr)`` - - """ - - stop_on_defined = False - stop_on = NoMatch() - if isinstance(key, slice): - key, stop_on = key.start, key.stop - if key is None: - key = ... - stop_on_defined = True - elif isinstance(key, tuple) and isinstance(key[-1], slice): - key, stop_on = (key[0], key[1].start), key[1].stop - stop_on_defined = True - - # convert single arg keys to tuples - if isinstance(key, str_type): - key = (key,) - try: - iter(key) - except TypeError: - key = (key, key) - - if len(key) > 2: - raise TypeError( - f"only 1 or 2 index arguments supported ({key[:5]}{f'... [{len(key)}]' if len(key) > 5 else ''})" - ) - - # clip to 2 elements - ret = self * tuple(key[:2]) - ret = typing.cast(_MultipleMatch, ret) - - if stop_on_defined: - ret.stopOn(stop_on) - - return ret - - def __call__(self, name: typing.Optional[str] = None) -> "ParserElement": - """ - Shortcut for :class:`set_results_name`, with ``list_all_matches=False``. - - If ``name`` is given with a trailing ``'*'`` character, then ``list_all_matches`` will be - passed as ``True``. - - If ``name`` is omitted, same as calling :class:`copy`. - - Example:: - - # these are equivalent - userdata = Word(alphas).set_results_name("name") + Word(nums + "-").set_results_name("socsecno") - userdata = Word(alphas)("name") + Word(nums + "-")("socsecno") - """ - if name is not None: - return self._setResultsName(name) - - return self.copy() - - def suppress(self) -> "ParserElement": - """ - Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from - cluttering up returned output. - """ - return Suppress(self) - - def ignore_whitespace(self, recursive: bool = True) -> "ParserElement": - """ - Enables the skipping of whitespace before matching the characters in the - :class:`ParserElement`'s defined pattern. - - :param recursive: If ``True`` (the default), also enable whitespace skipping in child elements (if any) - """ - self.skipWhitespace = True - return self - - def leave_whitespace(self, recursive: bool = True) -> "ParserElement": - """ - Disables the skipping of whitespace before matching the characters in the - :class:`ParserElement`'s defined pattern. This is normally only used internally by - the pyparsing module, but may be needed in some whitespace-sensitive grammars. - - :param recursive: If true (the default), also disable whitespace skipping in child elements (if any) - """ - self.skipWhitespace = False - return self - - def set_whitespace_chars( - self, chars: Union[Set[str], str], copy_defaults: bool = False - ) -> "ParserElement": - """ - Overrides the default whitespace chars - """ - self.skipWhitespace = True - self.whiteChars = set(chars) - self.copyDefaultWhiteChars = copy_defaults - return self - - def parse_with_tabs(self) -> "ParserElement": - """ - Overrides default behavior to expand ```` s to spaces before parsing the input string. - Must be called before ``parse_string`` when the input grammar contains elements that - match ```` characters. - """ - self.keepTabs = True - return self - - def ignore(self, other: "ParserElement") -> "ParserElement": - """ - Define expression to be ignored (e.g., comments) while doing pattern - matching; may be called repeatedly, to define multiple comment or other - ignorable patterns. - - Example:: - - patt = Word(alphas)[...] - patt.parse_string('ablaj /* comment */ lskjd') - # -> ['ablaj'] - - patt.ignore(c_style_comment) - patt.parse_string('ablaj /* comment */ lskjd') - # -> ['ablaj', 'lskjd'] - """ - if isinstance(other, str_type): - other = Suppress(other) - - if isinstance(other, Suppress): - if other not in self.ignoreExprs: - self.ignoreExprs.append(other) - else: - self.ignoreExprs.append(Suppress(other.copy())) - return self - - def set_debug_actions( - self, - start_action: DebugStartAction, - success_action: DebugSuccessAction, - exception_action: DebugExceptionAction, - ) -> "ParserElement": - """ - Customize display of debugging messages while doing pattern matching: - - - ``start_action`` - method to be called when an expression is about to be parsed; - should have the signature ``fn(input_string: str, location: int, expression: ParserElement, cache_hit: bool)`` - - - ``success_action`` - method to be called when an expression has successfully parsed; - should have the signature ``fn(input_string: str, start_location: int, end_location: int, expression: ParserELement, parsed_tokens: ParseResults, cache_hit: bool)`` - - - ``exception_action`` - method to be called when expression fails to parse; - should have the signature ``fn(input_string: str, location: int, expression: ParserElement, exception: Exception, cache_hit: bool)`` - """ - self.debugActions = self.DebugActions( - start_action or _default_start_debug_action, # type: ignore[truthy-function] - success_action or _default_success_debug_action, # type: ignore[truthy-function] - exception_action or _default_exception_debug_action, # type: ignore[truthy-function] - ) - self.debug = True - return self - - def set_debug(self, flag: bool = True, recurse: bool = False) -> "ParserElement": - """ - Enable display of debugging messages while doing pattern matching. - Set ``flag`` to ``True`` to enable, ``False`` to disable. - Set ``recurse`` to ``True`` to set the debug flag on this expression and all sub-expressions. - - Example:: - - wd = Word(alphas).set_name("alphaword") - integer = Word(nums).set_name("numword") - term = wd | integer - - # turn on debugging for wd - wd.set_debug() - - term[1, ...].parse_string("abc 123 xyz 890") - - prints:: - - Match alphaword at loc 0(1,1) - Matched alphaword -> ['abc'] - Match alphaword at loc 3(1,4) - Exception raised:Expected alphaword (at char 4), (line:1, col:5) - Match alphaword at loc 7(1,8) - Matched alphaword -> ['xyz'] - Match alphaword at loc 11(1,12) - Exception raised:Expected alphaword (at char 12), (line:1, col:13) - Match alphaword at loc 15(1,16) - Exception raised:Expected alphaword (at char 15), (line:1, col:16) - - The output shown is that produced by the default debug actions - custom debug actions can be - specified using :class:`set_debug_actions`. Prior to attempting - to match the ``wd`` expression, the debugging message ``"Match at loc (,)"`` - is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"`` - message is shown. Also note the use of :class:`set_name` to assign a human-readable name to the expression, - which makes debugging and exception messages easier to understand - for instance, the default - name created for the :class:`Word` expression without calling ``set_name`` is ``"W:(A-Za-z)"``. - """ - if recurse: - for expr in self.visit_all(): - expr.set_debug(flag, recurse=False) - return self - - if flag: - self.set_debug_actions( - _default_start_debug_action, - _default_success_debug_action, - _default_exception_debug_action, - ) - else: - self.debug = False - return self - - @property - def default_name(self) -> str: - if self._defaultName is None: - self._defaultName = self._generateDefaultName() - return self._defaultName - - @abstractmethod - def _generateDefaultName(self) -> str: - """ - Child classes must define this method, which defines how the ``default_name`` is set. - """ - - def set_name(self, name: str) -> "ParserElement": - """ - Define name for this expression, makes debugging and exception messages clearer. - - Example:: - - integer = Word(nums) - integer.parse_string("ABC") # -> Exception: Expected W:(0-9) (at char 0), (line:1, col:1) - - integer.set_name("integer") - integer.parse_string("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) - """ - self.customName = name - self.errmsg = f"Expected {self.name}" - if __diag__.enable_debug_on_named_expressions: - self.set_debug() - return self - - @property - def name(self) -> str: - # This will use a user-defined name if available, but otherwise defaults back to the auto-generated name - return self.customName if self.customName is not None else self.default_name - - def __str__(self) -> str: - return self.name - - def __repr__(self) -> str: - return str(self) - - def streamline(self) -> "ParserElement": - self.streamlined = True - self._defaultName = None - return self - - def recurse(self) -> List["ParserElement"]: - return [] - - def _checkRecursion(self, parseElementList): - subRecCheckList = parseElementList[:] + [self] - for e in self.recurse(): - e._checkRecursion(subRecCheckList) - - def validate(self, validateTrace=None) -> None: - """ - Check defined expressions for valid structure, check for infinite recursive definitions. - """ - warnings.warn( - "ParserElement.validate() is deprecated, and should not be used to check for left recursion", - DeprecationWarning, - stacklevel=2, - ) - self._checkRecursion([]) - - def parse_file( - self, - file_or_filename: Union[str, Path, TextIO], - encoding: str = "utf-8", - parse_all: bool = False, - *, - parseAll: bool = False, - ) -> ParseResults: - """ - Execute the parse expression on the given file or filename. - If a filename is specified (instead of a file object), - the entire file is opened, read, and closed before parsing. - """ - parseAll = parseAll or parse_all - try: - file_or_filename = typing.cast(TextIO, file_or_filename) - file_contents = file_or_filename.read() - except AttributeError: - file_or_filename = typing.cast(str, file_or_filename) - with open(file_or_filename, "r", encoding=encoding) as f: - file_contents = f.read() - try: - return self.parse_string(file_contents, parseAll) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc.with_traceback(None) - - def __eq__(self, other): - if self is other: - return True - elif isinstance(other, str_type): - return self.matches(other, parse_all=True) - elif isinstance(other, ParserElement): - return vars(self) == vars(other) - return False - - def __hash__(self): - return id(self) - - def matches( - self, test_string: str, parse_all: bool = True, *, parseAll: bool = True - ) -> bool: - """ - Method for quick testing of a parser against a test string. Good for simple - inline microtests of sub expressions while building up larger parser. - - Parameters: - - - ``test_string`` - to test against this expression for a match - - ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests - - Example:: - - expr = Word(nums) - assert expr.matches("100") - """ - parseAll = parseAll and parse_all - try: - self.parse_string(str(test_string), parse_all=parseAll) - return True - except ParseBaseException: - return False - - def run_tests( - self, - tests: Union[str, List[str]], - parse_all: bool = True, - comment: typing.Optional[Union["ParserElement", str]] = "#", - full_dump: bool = True, - print_results: bool = True, - failure_tests: bool = False, - post_parse: typing.Optional[Callable[[str, ParseResults], str]] = None, - file: typing.Optional[TextIO] = None, - with_line_numbers: bool = False, - *, - parseAll: bool = True, - fullDump: bool = True, - printResults: bool = True, - failureTests: bool = False, - postParse: typing.Optional[Callable[[str, ParseResults], str]] = None, - ) -> Tuple[bool, List[Tuple[str, Union[ParseResults, Exception]]]]: - """ - Execute the parse expression on a series of test strings, showing each - test, the parsed results or where the parse failed. Quick and easy way to - run a parse expression against a list of sample strings. - - Parameters: - - - ``tests`` - a list of separate test strings, or a multiline string of test strings - - ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests - - ``comment`` - (default= ``'#'``) - expression for indicating embedded comments in the test - string; pass None to disable comment filtering - - ``full_dump`` - (default= ``True``) - dump results as list followed by results names in nested outline; - if False, only dump nested list - - ``print_results`` - (default= ``True``) prints test output to stdout - - ``failure_tests`` - (default= ``False``) indicates if these tests are expected to fail parsing - - ``post_parse`` - (default= ``None``) optional callback for successful parse results; called as - `fn(test_string, parse_results)` and returns a string to be added to the test output - - ``file`` - (default= ``None``) optional file-like object to which test output will be written; - if None, will default to ``sys.stdout`` - - ``with_line_numbers`` - default= ``False``) show test strings with line and column numbers - - Returns: a (success, results) tuple, where success indicates that all tests succeeded - (or failed if ``failure_tests`` is True), and the results contain a list of lines of each - test's output - - Example:: - - number_expr = pyparsing_common.number.copy() - - result = number_expr.run_tests(''' - # unsigned integer - 100 - # negative integer - -100 - # float with scientific notation - 6.02e23 - # integer with scientific notation - 1e-12 - ''') - print("Success" if result[0] else "Failed!") - - result = number_expr.run_tests(''' - # stray character - 100Z - # missing leading digit before '.' - -.100 - # too many '.' - 3.14.159 - ''', failure_tests=True) - print("Success" if result[0] else "Failed!") - - prints:: - - # unsigned integer - 100 - [100] - - # negative integer - -100 - [-100] - - # float with scientific notation - 6.02e23 - [6.02e+23] - - # integer with scientific notation - 1e-12 - [1e-12] - - Success - - # stray character - 100Z - ^ - FAIL: Expected end of text (at char 3), (line:1, col:4) - - # missing leading digit before '.' - -.100 - ^ - FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1) - - # too many '.' - 3.14.159 - ^ - FAIL: Expected end of text (at char 4), (line:1, col:5) - - Success - - Each test string must be on a single line. If you want to test a string that spans multiple - lines, create a test like this:: - - expr.run_tests(r"this is a test\\n of strings that spans \\n 3 lines") - - (Note that this is a raw string literal, you must include the leading ``'r'``.) - """ - from .testing import pyparsing_test - - parseAll = parseAll and parse_all - fullDump = fullDump and full_dump - printResults = printResults and print_results - failureTests = failureTests or failure_tests - postParse = postParse or post_parse - if isinstance(tests, str_type): - tests = typing.cast(str, tests) - line_strip = type(tests).strip - tests = [line_strip(test_line) for test_line in tests.rstrip().splitlines()] - comment_specified = comment is not None - if comment_specified: - if isinstance(comment, str_type): - comment = typing.cast(str, comment) - comment = Literal(comment) - comment = typing.cast(ParserElement, comment) - if file is None: - file = sys.stdout - print_ = file.write - - result: Union[ParseResults, Exception] - allResults: List[Tuple[str, Union[ParseResults, Exception]]] = [] - comments: List[str] = [] - success = True - NL = Literal(r"\n").add_parse_action(replace_with("\n")).ignore(quoted_string) - BOM = "\ufeff" - nlstr = "\n" - for t in tests: - if comment_specified and comment.matches(t, False) or comments and not t: - comments.append( - pyparsing_test.with_line_numbers(t) if with_line_numbers else t - ) - continue - if not t: - continue - out = [ - f"{nlstr}{nlstr.join(comments) if comments else ''}", - pyparsing_test.with_line_numbers(t) if with_line_numbers else t, - ] - comments = [] - try: - # convert newline marks to actual newlines, and strip leading BOM if present - t = NL.transform_string(t.lstrip(BOM)) - result = self.parse_string(t, parse_all=parseAll) - except ParseBaseException as pe: - fatal = "(FATAL) " if isinstance(pe, ParseFatalException) else "" - out.append(pe.explain()) - out.append(f"FAIL: {fatal}{pe}") - if ParserElement.verbose_stacktrace: - out.extend(traceback.format_tb(pe.__traceback__)) - success = success and failureTests - result = pe - except Exception as exc: - out.append(f"FAIL-EXCEPTION: {type(exc).__name__}: {exc}") - if ParserElement.verbose_stacktrace: - out.extend(traceback.format_tb(exc.__traceback__)) - success = success and failureTests - result = exc - else: - success = success and not failureTests - if postParse is not None: - try: - pp_value = postParse(t, result) - if pp_value is not None: - if isinstance(pp_value, ParseResults): - out.append(pp_value.dump()) - else: - out.append(str(pp_value)) - else: - out.append(result.dump()) - except Exception as e: - out.append(result.dump(full=fullDump)) - out.append( - f"{postParse.__name__} failed: {type(e).__name__}: {e}" - ) - else: - out.append(result.dump(full=fullDump)) - out.append("") - - if printResults: - print_("\n".join(out)) - - allResults.append((t, result)) - - return success, allResults - - def create_diagram( - self, - output_html: Union[TextIO, Path, str], - vertical: int = 3, - show_results_names: bool = False, - show_groups: bool = False, - embed: bool = False, - **kwargs, - ) -> None: - """ - Create a railroad diagram for the parser. - - Parameters: - - - ``output_html`` (str or file-like object) - output target for generated - diagram HTML - - ``vertical`` (int) - threshold for formatting multiple alternatives vertically - instead of horizontally (default=3) - - ``show_results_names`` - bool flag whether diagram should show annotations for - defined results names - - ``show_groups`` - bool flag whether groups should be highlighted with an unlabeled surrounding box - - ``embed`` - bool flag whether generated HTML should omit , , and tags to embed - the resulting HTML in an enclosing HTML source - - ``head`` - str containing additional HTML to insert into the section of the generated code; - can be used to insert custom CSS styling - - ``body`` - str containing additional HTML to insert at the beginning of the section of the - generated code - - Additional diagram-formatting keyword arguments can also be included; - see railroad.Diagram class. - """ - - try: - from .diagram import to_railroad, railroad_to_html - except ImportError as ie: - raise Exception( - "must ``pip install pyparsing[diagrams]`` to generate parser railroad diagrams" - ) from ie - - self.streamline() - - railroad = to_railroad( - self, - vertical=vertical, - show_results_names=show_results_names, - show_groups=show_groups, - diagram_kwargs=kwargs, - ) - if not isinstance(output_html, (str, Path)): - # we were passed a file-like object, just write to it - output_html.write(railroad_to_html(railroad, embed=embed, **kwargs)) - return - - with open(output_html, "w", encoding="utf-8") as diag_file: - diag_file.write(railroad_to_html(railroad, embed=embed, **kwargs)) - - # Compatibility synonyms - # fmt: off - inlineLiteralsUsing = replaced_by_pep8("inlineLiteralsUsing", inline_literals_using) - setDefaultWhitespaceChars = replaced_by_pep8( - "setDefaultWhitespaceChars", set_default_whitespace_chars - ) - setResultsName = replaced_by_pep8("setResultsName", set_results_name) - setBreak = replaced_by_pep8("setBreak", set_break) - setParseAction = replaced_by_pep8("setParseAction", set_parse_action) - addParseAction = replaced_by_pep8("addParseAction", add_parse_action) - addCondition = replaced_by_pep8("addCondition", add_condition) - setFailAction = replaced_by_pep8("setFailAction", set_fail_action) - tryParse = replaced_by_pep8("tryParse", try_parse) - enableLeftRecursion = replaced_by_pep8("enableLeftRecursion", enable_left_recursion) - enablePackrat = replaced_by_pep8("enablePackrat", enable_packrat) - parseString = replaced_by_pep8("parseString", parse_string) - scanString = replaced_by_pep8("scanString", scan_string) - transformString = replaced_by_pep8("transformString", transform_string) - searchString = replaced_by_pep8("searchString", search_string) - ignoreWhitespace = replaced_by_pep8("ignoreWhitespace", ignore_whitespace) - leaveWhitespace = replaced_by_pep8("leaveWhitespace", leave_whitespace) - setWhitespaceChars = replaced_by_pep8("setWhitespaceChars", set_whitespace_chars) - parseWithTabs = replaced_by_pep8("parseWithTabs", parse_with_tabs) - setDebugActions = replaced_by_pep8("setDebugActions", set_debug_actions) - setDebug = replaced_by_pep8("setDebug", set_debug) - setName = replaced_by_pep8("setName", set_name) - parseFile = replaced_by_pep8("parseFile", parse_file) - runTests = replaced_by_pep8("runTests", run_tests) - canParseNext = can_parse_next - resetCache = reset_cache - defaultName = default_name - # fmt: on - - -class _PendingSkip(ParserElement): - # internal placeholder class to hold a place were '...' is added to a parser element, - # once another ParserElement is added, this placeholder will be replaced with a SkipTo - def __init__(self, expr: ParserElement, must_skip: bool = False): - super().__init__() - self.anchor = expr - self.must_skip = must_skip - - def _generateDefaultName(self) -> str: - return str(self.anchor + Empty()).replace("Empty", "...") - - def __add__(self, other) -> "ParserElement": - skipper = SkipTo(other).set_name("...")("_skipped*") - if self.must_skip: - - def must_skip(t): - if not t._skipped or t._skipped.as_list() == [""]: - del t[0] - t.pop("_skipped", None) - - def show_skip(t): - if t._skipped.as_list()[-1:] == [""]: - t.pop("_skipped") - t["_skipped"] = f"missing <{self.anchor!r}>" - - return ( - self.anchor + skipper().add_parse_action(must_skip) - | skipper().add_parse_action(show_skip) - ) + other - - return self.anchor + skipper + other - - def __repr__(self): - return self.defaultName - - def parseImpl(self, *args): - raise Exception( - "use of `...` expression without following SkipTo target expression" - ) - - -class Token(ParserElement): - """Abstract :class:`ParserElement` subclass, for defining atomic - matching patterns. - """ - - def __init__(self): - super().__init__(savelist=False) - - def _generateDefaultName(self) -> str: - return type(self).__name__ - - -class NoMatch(Token): - """ - A token that will never match. - """ - - def __init__(self): - super().__init__() - self.mayReturnEmpty = True - self.mayIndexError = False - self.errmsg = "Unmatchable token" - - def parseImpl(self, instring, loc, doActions=True): - raise ParseException(instring, loc, self.errmsg, self) - - -class Literal(Token): - """ - Token to exactly match a specified string. - - Example:: - - Literal('abc').parse_string('abc') # -> ['abc'] - Literal('abc').parse_string('abcdef') # -> ['abc'] - Literal('abc').parse_string('ab') # -> Exception: Expected "abc" - - For case-insensitive matching, use :class:`CaselessLiteral`. - - For keyword matching (force word break before and after the matched string), - use :class:`Keyword` or :class:`CaselessKeyword`. - """ - - def __new__(cls, match_string: str = "", *, matchString: str = ""): - # Performance tuning: select a subclass with optimized parseImpl - if cls is Literal: - match_string = matchString or match_string - if not match_string: - return super().__new__(Empty) - if len(match_string) == 1: - return super().__new__(_SingleCharLiteral) - - # Default behavior - return super().__new__(cls) - - # Needed to make copy.copy() work correctly if we customize __new__ - def __getnewargs__(self): - return (self.match,) - - def __init__(self, match_string: str = "", *, matchString: str = ""): - super().__init__() - match_string = matchString or match_string - self.match = match_string - self.matchLen = len(match_string) - self.firstMatchChar = match_string[:1] - self.errmsg = f"Expected {self.name}" - self.mayReturnEmpty = False - self.mayIndexError = False - - def _generateDefaultName(self) -> str: - return repr(self.match) - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] == self.firstMatchChar and instring.startswith( - self.match, loc - ): - return loc + self.matchLen, self.match - raise ParseException(instring, loc, self.errmsg, self) - - -class Empty(Literal): - """ - An empty token, will always match. - """ - - def __init__(self, match_string="", *, matchString=""): - super().__init__("") - self.mayReturnEmpty = True - self.mayIndexError = False - - def _generateDefaultName(self) -> str: - return "Empty" - - def parseImpl(self, instring, loc, doActions=True): - return loc, [] - - -class _SingleCharLiteral(Literal): - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] == self.firstMatchChar: - return loc + 1, self.match - raise ParseException(instring, loc, self.errmsg, self) - - -ParserElement._literalStringClass = Literal - - -class Keyword(Token): - """ - Token to exactly match a specified string as a keyword, that is, - it must be immediately preceded and followed by whitespace or - non-keyword characters. Compare with :class:`Literal`: - - - ``Literal("if")`` will match the leading ``'if'`` in - ``'ifAndOnlyIf'``. - - ``Keyword("if")`` will not; it will only match the leading - ``'if'`` in ``'if x=1'``, or ``'if(y==2)'`` - - Accepts two optional constructor arguments in addition to the - keyword string: - - - ``ident_chars`` is a string of characters that would be valid - identifier characters, defaulting to all alphanumerics + "_" and - "$" - - ``caseless`` allows case-insensitive matching, default is ``False``. - - Example:: - - Keyword("start").parse_string("start") # -> ['start'] - Keyword("start").parse_string("starting") # -> Exception - - For case-insensitive matching, use :class:`CaselessKeyword`. - """ - - DEFAULT_KEYWORD_CHARS = alphanums + "_$" - - def __init__( - self, - match_string: str = "", - ident_chars: typing.Optional[str] = None, - caseless: bool = False, - *, - matchString: str = "", - identChars: typing.Optional[str] = None, - ): - super().__init__() - identChars = identChars or ident_chars - if identChars is None: - identChars = Keyword.DEFAULT_KEYWORD_CHARS - match_string = matchString or match_string - self.match = match_string - self.matchLen = len(match_string) - try: - self.firstMatchChar = match_string[0] - except IndexError: - raise ValueError("null string passed to Keyword; use Empty() instead") - self.errmsg = f"Expected {type(self).__name__} {self.name}" - self.mayReturnEmpty = False - self.mayIndexError = False - self.caseless = caseless - if caseless: - self.caselessmatch = match_string.upper() - identChars = identChars.upper() - self.identChars = set(identChars) - - def _generateDefaultName(self) -> str: - return repr(self.match) - - def parseImpl(self, instring, loc, doActions=True): - errmsg = self.errmsg - errloc = loc - if self.caseless: - if instring[loc : loc + self.matchLen].upper() == self.caselessmatch: - if loc == 0 or instring[loc - 1].upper() not in self.identChars: - if ( - loc >= len(instring) - self.matchLen - or instring[loc + self.matchLen].upper() not in self.identChars - ): - return loc + self.matchLen, self.match - - # followed by keyword char - errmsg += ", was immediately followed by keyword character" - errloc = loc + self.matchLen - else: - # preceded by keyword char - errmsg += ", keyword was immediately preceded by keyword character" - errloc = loc - 1 - # else no match just raise plain exception - - elif ( - instring[loc] == self.firstMatchChar - and self.matchLen == 1 - or instring.startswith(self.match, loc) - ): - if loc == 0 or instring[loc - 1] not in self.identChars: - if ( - loc >= len(instring) - self.matchLen - or instring[loc + self.matchLen] not in self.identChars - ): - return loc + self.matchLen, self.match - - # followed by keyword char - errmsg += ", keyword was immediately followed by keyword character" - errloc = loc + self.matchLen - else: - # preceded by keyword char - errmsg += ", keyword was immediately preceded by keyword character" - errloc = loc - 1 - # else no match just raise plain exception - - raise ParseException(instring, errloc, errmsg, self) - - @staticmethod - def set_default_keyword_chars(chars) -> None: - """ - Overrides the default characters used by :class:`Keyword` expressions. - """ - Keyword.DEFAULT_KEYWORD_CHARS = chars - - setDefaultKeywordChars = set_default_keyword_chars - - -class CaselessLiteral(Literal): - """ - Token to match a specified string, ignoring case of letters. - Note: the matched results will always be in the case of the given - match string, NOT the case of the input text. - - Example:: - - CaselessLiteral("CMD")[1, ...].parse_string("cmd CMD Cmd10") - # -> ['CMD', 'CMD', 'CMD'] - - (Contrast with example for :class:`CaselessKeyword`.) - """ - - def __init__(self, match_string: str = "", *, matchString: str = ""): - match_string = matchString or match_string - super().__init__(match_string.upper()) - # Preserve the defining literal. - self.returnString = match_string - self.errmsg = f"Expected {self.name}" - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc : loc + self.matchLen].upper() == self.match: - return loc + self.matchLen, self.returnString - raise ParseException(instring, loc, self.errmsg, self) - - -class CaselessKeyword(Keyword): - """ - Caseless version of :class:`Keyword`. - - Example:: - - CaselessKeyword("CMD")[1, ...].parse_string("cmd CMD Cmd10") - # -> ['CMD', 'CMD'] - - (Contrast with example for :class:`CaselessLiteral`.) - """ - - def __init__( - self, - match_string: str = "", - ident_chars: typing.Optional[str] = None, - *, - matchString: str = "", - identChars: typing.Optional[str] = None, - ): - identChars = identChars or ident_chars - match_string = matchString or match_string - super().__init__(match_string, identChars, caseless=True) - - -class CloseMatch(Token): - """A variation on :class:`Literal` which matches "close" matches, - that is, strings with at most 'n' mismatching characters. - :class:`CloseMatch` takes parameters: - - - ``match_string`` - string to be matched - - ``caseless`` - a boolean indicating whether to ignore casing when comparing characters - - ``max_mismatches`` - (``default=1``) maximum number of - mismatches allowed to count as a match - - The results from a successful parse will contain the matched text - from the input string and the following named results: - - - ``mismatches`` - a list of the positions within the - match_string where mismatches were found - - ``original`` - the original match_string used to compare - against the input string - - If ``mismatches`` is an empty list, then the match was an exact - match. - - Example:: - - patt = CloseMatch("ATCATCGAATGGA") - patt.parse_string("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']}) - patt.parse_string("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1) - - # exact match - patt.parse_string("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']}) - - # close match allowing up to 2 mismatches - patt = CloseMatch("ATCATCGAATGGA", max_mismatches=2) - patt.parse_string("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']}) - """ - - def __init__( - self, - match_string: str, - max_mismatches: typing.Optional[int] = None, - *, - maxMismatches: int = 1, - caseless=False, - ): - maxMismatches = max_mismatches if max_mismatches is not None else maxMismatches - super().__init__() - self.match_string = match_string - self.maxMismatches = maxMismatches - self.errmsg = f"Expected {self.match_string!r} (with up to {self.maxMismatches} mismatches)" - self.caseless = caseless - self.mayIndexError = False - self.mayReturnEmpty = False - - def _generateDefaultName(self) -> str: - return f"{type(self).__name__}:{self.match_string!r}" - - def parseImpl(self, instring, loc, doActions=True): - start = loc - instrlen = len(instring) - maxloc = start + len(self.match_string) - - if maxloc <= instrlen: - match_string = self.match_string - match_stringloc = 0 - mismatches = [] - maxMismatches = self.maxMismatches - - for match_stringloc, s_m in enumerate( - zip(instring[loc:maxloc], match_string) - ): - src, mat = s_m - if self.caseless: - src, mat = src.lower(), mat.lower() - - if src != mat: - mismatches.append(match_stringloc) - if len(mismatches) > maxMismatches: - break - else: - loc = start + match_stringloc + 1 - results = ParseResults([instring[start:loc]]) - results["original"] = match_string - results["mismatches"] = mismatches - return loc, results - - raise ParseException(instring, loc, self.errmsg, self) - - -class Word(Token): - """Token for matching words composed of allowed character sets. - - Parameters: - - - ``init_chars`` - string of all characters that should be used to - match as a word; "ABC" will match "AAA", "ABAB", "CBAC", etc.; - if ``body_chars`` is also specified, then this is the string of - initial characters - - ``body_chars`` - string of characters that - can be used for matching after a matched initial character as - given in ``init_chars``; if omitted, same as the initial characters - (default=``None``) - - ``min`` - minimum number of characters to match (default=1) - - ``max`` - maximum number of characters to match (default=0) - - ``exact`` - exact number of characters to match (default=0) - - ``as_keyword`` - match as a keyword (default=``False``) - - ``exclude_chars`` - characters that might be - found in the input ``body_chars`` string but which should not be - accepted for matching ;useful to define a word of all - printables except for one or two characters, for instance - (default=``None``) - - :class:`srange` is useful for defining custom character set strings - for defining :class:`Word` expressions, using range notation from - regular expression character sets. - - A common mistake is to use :class:`Word` to match a specific literal - string, as in ``Word("Address")``. Remember that :class:`Word` - uses the string argument to define *sets* of matchable characters. - This expression would match "Add", "AAA", "dAred", or any other word - made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an - exact literal string, use :class:`Literal` or :class:`Keyword`. - - pyparsing includes helper strings for building Words: - - - :class:`alphas` - - :class:`nums` - - :class:`alphanums` - - :class:`hexnums` - - :class:`alphas8bit` (alphabetic characters in ASCII range 128-255 - - accented, tilded, umlauted, etc.) - - :class:`punc8bit` (non-alphabetic characters in ASCII range - 128-255 - currency, symbols, superscripts, diacriticals, etc.) - - :class:`printables` (any non-whitespace character) - - ``alphas``, ``nums``, and ``printables`` are also defined in several - Unicode sets - see :class:`pyparsing_unicode``. - - Example:: - - # a word composed of digits - integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9")) - - # a word with a leading capital, and zero or more lowercase - capitalized_word = Word(alphas.upper(), alphas.lower()) - - # hostnames are alphanumeric, with leading alpha, and '-' - hostname = Word(alphas, alphanums + '-') - - # roman numeral (not a strict parser, accepts invalid mix of characters) - roman = Word("IVXLCDM") - - # any string of non-whitespace characters, except for ',' - csv_value = Word(printables, exclude_chars=",") - """ - - def __init__( - self, - init_chars: str = "", - body_chars: typing.Optional[str] = None, - min: int = 1, - max: int = 0, - exact: int = 0, - as_keyword: bool = False, - exclude_chars: typing.Optional[str] = None, - *, - initChars: typing.Optional[str] = None, - bodyChars: typing.Optional[str] = None, - asKeyword: bool = False, - excludeChars: typing.Optional[str] = None, - ): - initChars = initChars or init_chars - bodyChars = bodyChars or body_chars - asKeyword = asKeyword or as_keyword - excludeChars = excludeChars or exclude_chars - super().__init__() - if not initChars: - raise ValueError( - f"invalid {type(self).__name__}, initChars cannot be empty string" - ) - - initChars_set = set(initChars) - if excludeChars: - excludeChars_set = set(excludeChars) - initChars_set -= excludeChars_set - if bodyChars: - bodyChars = "".join(set(bodyChars) - excludeChars_set) - self.initChars = initChars_set - self.initCharsOrig = "".join(sorted(initChars_set)) - - if bodyChars: - self.bodyChars = set(bodyChars) - self.bodyCharsOrig = "".join(sorted(bodyChars)) - else: - self.bodyChars = initChars_set - self.bodyCharsOrig = self.initCharsOrig - - self.maxSpecified = max > 0 - - if min < 1: - raise ValueError( - "cannot specify a minimum length < 1; use Opt(Word()) if zero-length word is permitted" - ) - - if self.maxSpecified and min > max: - raise ValueError( - f"invalid args, if min and max both specified min must be <= max (min={min}, max={max})" - ) - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - min = max = exact - self.maxLen = exact - self.minLen = exact - - self.errmsg = f"Expected {self.name}" - self.mayIndexError = False - self.asKeyword = asKeyword - if self.asKeyword: - self.errmsg += " as a keyword" - - # see if we can make a regex for this Word - if " " not in (self.initChars | self.bodyChars): - if len(self.initChars) == 1: - re_leading_fragment = re.escape(self.initCharsOrig) - else: - re_leading_fragment = f"[{_collapse_string_to_ranges(self.initChars)}]" - - if self.bodyChars == self.initChars: - if max == 0 and self.minLen == 1: - repeat = "+" - elif max == 1: - repeat = "" - else: - if self.minLen != self.maxLen: - repeat = f"{{{self.minLen},{'' if self.maxLen == _MAX_INT else self.maxLen}}}" - else: - repeat = f"{{{self.minLen}}}" - self.reString = f"{re_leading_fragment}{repeat}" - else: - if max == 1: - re_body_fragment = "" - repeat = "" - else: - re_body_fragment = f"[{_collapse_string_to_ranges(self.bodyChars)}]" - if max == 0 and self.minLen == 1: - repeat = "*" - elif max == 2: - repeat = "?" if min <= 1 else "" - else: - if min != max: - repeat = f"{{{min - 1 if min > 0 else ''},{max - 1 if max > 0 else ''}}}" - else: - repeat = f"{{{min - 1 if min > 0 else ''}}}" - - self.reString = f"{re_leading_fragment}{re_body_fragment}{repeat}" - - if self.asKeyword: - self.reString = rf"\b{self.reString}\b" - - try: - self.re = re.compile(self.reString) - except re.error: - self.re = None # type: ignore[assignment] - else: - self.re_match = self.re.match - self.parseImpl = self.parseImpl_regex # type: ignore[assignment] - - def _generateDefaultName(self) -> str: - def charsAsStr(s): - max_repr_len = 16 - s = _collapse_string_to_ranges(s, re_escape=False) - - if len(s) > max_repr_len: - return s[: max_repr_len - 3] + "..." - - return s - - if self.initChars != self.bodyChars: - base = f"W:({charsAsStr(self.initChars)}, {charsAsStr(self.bodyChars)})" - else: - base = f"W:({charsAsStr(self.initChars)})" - - # add length specification - if self.minLen > 1 or self.maxLen != _MAX_INT: - if self.minLen == self.maxLen: - if self.minLen == 1: - return base[2:] - else: - return base + f"{{{self.minLen}}}" - elif self.maxLen == _MAX_INT: - return base + f"{{{self.minLen},...}}" - else: - return base + f"{{{self.minLen},{self.maxLen}}}" - return base - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] not in self.initChars: - raise ParseException(instring, loc, self.errmsg, self) - - start = loc - loc += 1 - instrlen = len(instring) - bodychars = self.bodyChars - maxloc = start + self.maxLen - maxloc = min(maxloc, instrlen) - while loc < maxloc and instring[loc] in bodychars: - loc += 1 - - throwException = False - if loc - start < self.minLen: - throwException = True - elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars: - throwException = True - elif self.asKeyword and ( - (start > 0 and instring[start - 1] in bodychars) - or (loc < instrlen and instring[loc] in bodychars) - ): - throwException = True - - if throwException: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - def parseImpl_regex(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - return loc, result.group() - - -class Char(Word): - """A short-cut class for defining :class:`Word` ``(characters, exact=1)``, - when defining a match of any single character in a string of - characters. - """ - - def __init__( - self, - charset: str, - as_keyword: bool = False, - exclude_chars: typing.Optional[str] = None, - *, - asKeyword: bool = False, - excludeChars: typing.Optional[str] = None, - ): - asKeyword = asKeyword or as_keyword - excludeChars = excludeChars or exclude_chars - super().__init__( - charset, exact=1, as_keyword=asKeyword, exclude_chars=excludeChars - ) - - -class Regex(Token): - r"""Token for matching strings that match a given regular - expression. Defined with string specifying the regular expression in - a form recognized by the stdlib Python `re module `_. - If the given regex contains named groups (defined using ``(?P...)``), - these will be preserved as named :class:`ParseResults`. - - If instead of the Python stdlib ``re`` module you wish to use a different RE module - (such as the ``regex`` module), you can do so by building your ``Regex`` object with - a compiled RE that was compiled using ``regex``. - - Example:: - - realnum = Regex(r"[+-]?\d+\.\d*") - # ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression - roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") - - # named fields in a regex will be returned as named results - date = Regex(r'(?P\d{4})-(?P\d\d?)-(?P\d\d?)') - - # the Regex class will accept re's compiled using the regex module - import regex - parser = pp.Regex(regex.compile(r'[0-9]')) - """ - - def __init__( - self, - pattern: Any, - flags: Union[re.RegexFlag, int] = 0, - as_group_list: bool = False, - as_match: bool = False, - *, - asGroupList: bool = False, - asMatch: bool = False, - ): - """The parameters ``pattern`` and ``flags`` are passed - to the ``re.compile()`` function as-is. See the Python - `re module `_ module for an - explanation of the acceptable patterns and flags. - """ - super().__init__() - asGroupList = asGroupList or as_group_list - asMatch = asMatch or as_match - - if isinstance(pattern, str_type): - if not pattern: - raise ValueError("null string passed to Regex; use Empty() instead") - - self._re = None - self.reString = self.pattern = pattern - self.flags = flags - - elif hasattr(pattern, "pattern") and hasattr(pattern, "match"): - self._re = pattern - self.pattern = self.reString = pattern.pattern - self.flags = flags - - else: - raise TypeError( - "Regex may only be constructed with a string or a compiled RE object" - ) - - self.errmsg = f"Expected {self.name}" - self.mayIndexError = False - self.asGroupList = asGroupList - self.asMatch = asMatch - if self.asGroupList: - self.parseImpl = self.parseImplAsGroupList # type: ignore [assignment] - if self.asMatch: - self.parseImpl = self.parseImplAsMatch # type: ignore [assignment] - - @cached_property - def re(self): - if self._re: - return self._re - - try: - return re.compile(self.pattern, self.flags) - except re.error: - raise ValueError(f"invalid pattern ({self.pattern!r}) passed to Regex") - - @cached_property - def re_match(self): - return self.re.match - - @cached_property - def mayReturnEmpty(self): - return self.re_match("") is not None - - def _generateDefaultName(self) -> str: - return "Re:({})".format(repr(self.pattern).replace("\\\\", "\\")) - - def parseImpl(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = ParseResults(result.group()) - d = result.groupdict() - - for k, v in d.items(): - ret[k] = v - - return loc, ret - - def parseImplAsGroupList(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = result.groups() - return loc, ret - - def parseImplAsMatch(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = result - return loc, ret - - def sub(self, repl: str) -> ParserElement: - r""" - Return :class:`Regex` with an attached parse action to transform the parsed - result as if called using `re.sub(expr, repl, string) `_. - - Example:: - - make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2") - print(make_html.transform_string("h1:main title:")) - # prints "

main title

" - """ - if self.asGroupList: - raise TypeError("cannot use sub() with Regex(as_group_list=True)") - - if self.asMatch and callable(repl): - raise TypeError( - "cannot use sub() with a callable with Regex(as_match=True)" - ) - - if self.asMatch: - - def pa(tokens): - return tokens[0].expand(repl) - - else: - - def pa(tokens): - return self.re.sub(repl, tokens[0]) - - return self.add_parse_action(pa) - - -class QuotedString(Token): - r""" - Token for matching strings that are delimited by quoting characters. - - Defined with the following parameters: - - - ``quote_char`` - string of one or more characters defining the - quote delimiting string - - ``esc_char`` - character to re_escape quotes, typically backslash - (default= ``None``) - - ``esc_quote`` - special quote sequence to re_escape an embedded quote - string (such as SQL's ``""`` to re_escape an embedded ``"``) - (default= ``None``) - - ``multiline`` - boolean indicating whether quotes can span - multiple lines (default= ``False``) - - ``unquote_results`` - boolean indicating whether the matched text - should be unquoted (default= ``True``) - - ``end_quote_char`` - string of one or more characters defining the - end of the quote delimited string (default= ``None`` => same as - quote_char) - - ``convert_whitespace_escapes`` - convert escaped whitespace - (``'\t'``, ``'\n'``, etc.) to actual whitespace - (default= ``True``) - - Example:: - - qs = QuotedString('"') - print(qs.search_string('lsjdf "This is the quote" sldjf')) - complex_qs = QuotedString('{{', end_quote_char='}}') - print(complex_qs.search_string('lsjdf {{This is the "quote"}} sldjf')) - sql_qs = QuotedString('"', esc_quote='""') - print(sql_qs.search_string('lsjdf "This is the quote with ""embedded"" quotes" sldjf')) - - prints:: - - [['This is the quote']] - [['This is the "quote"']] - [['This is the quote with "embedded" quotes']] - """ - - ws_map = dict(((r"\t", "\t"), (r"\n", "\n"), (r"\f", "\f"), (r"\r", "\r"))) - - def __init__( - self, - quote_char: str = "", - esc_char: typing.Optional[str] = None, - esc_quote: typing.Optional[str] = None, - multiline: bool = False, - unquote_results: bool = True, - end_quote_char: typing.Optional[str] = None, - convert_whitespace_escapes: bool = True, - *, - quoteChar: str = "", - escChar: typing.Optional[str] = None, - escQuote: typing.Optional[str] = None, - unquoteResults: bool = True, - endQuoteChar: typing.Optional[str] = None, - convertWhitespaceEscapes: bool = True, - ): - super().__init__() - esc_char = escChar or esc_char - esc_quote = escQuote or esc_quote - unquote_results = unquoteResults and unquote_results - end_quote_char = endQuoteChar or end_quote_char - convert_whitespace_escapes = ( - convertWhitespaceEscapes and convert_whitespace_escapes - ) - quote_char = quoteChar or quote_char - - # remove white space from quote chars - quote_char = quote_char.strip() - if not quote_char: - raise ValueError("quote_char cannot be the empty string") - - if end_quote_char is None: - end_quote_char = quote_char - else: - end_quote_char = end_quote_char.strip() - if not end_quote_char: - raise ValueError("end_quote_char cannot be the empty string") - - self.quote_char: str = quote_char - self.quote_char_len: int = len(quote_char) - self.first_quote_char: str = quote_char[0] - self.end_quote_char: str = end_quote_char - self.end_quote_char_len: int = len(end_quote_char) - self.esc_char: str = esc_char or "" - self.has_esc_char: bool = esc_char is not None - self.esc_quote: str = esc_quote or "" - self.unquote_results: bool = unquote_results - self.convert_whitespace_escapes: bool = convert_whitespace_escapes - self.multiline = multiline - self.re_flags = re.RegexFlag(0) - - # fmt: off - # build up re pattern for the content between the quote delimiters - inner_pattern = [] - - if esc_quote: - inner_pattern.append(rf"(?:{re.escape(esc_quote)})") - - if esc_char: - inner_pattern.append(rf"(?:{re.escape(esc_char)}.)") - - if len(self.end_quote_char) > 1: - inner_pattern.append( - "(?:" - + "|".join( - f"(?:{re.escape(self.end_quote_char[:i])}(?!{re.escape(self.end_quote_char[i:])}))" - for i in range(len(self.end_quote_char) - 1, 0, -1) - ) - + ")" - ) - - if self.multiline: - self.re_flags |= re.MULTILINE | re.DOTALL - inner_pattern.append( - rf"(?:[^{_escape_regex_range_chars(self.end_quote_char[0])}" - rf"{(_escape_regex_range_chars(esc_char) if self.has_esc_char else '')}])" - ) - else: - inner_pattern.append( - rf"(?:[^{_escape_regex_range_chars(self.end_quote_char[0])}\n\r" - rf"{(_escape_regex_range_chars(esc_char) if self.has_esc_char else '')}])" - ) - - self.pattern = "".join( - [ - re.escape(self.quote_char), - "(?:", - '|'.join(inner_pattern), - ")*", - re.escape(self.end_quote_char), - ] - ) - - if self.unquote_results: - if self.convert_whitespace_escapes: - self.unquote_scan_re = re.compile( - rf"({'|'.join(re.escape(k) for k in self.ws_map)})" - rf"|({re.escape(self.esc_char)}.)" - rf"|(\n|.)", - flags=self.re_flags, - ) - else: - self.unquote_scan_re = re.compile( - rf"({re.escape(self.esc_char)}.)" - rf"|(\n|.)", - flags=self.re_flags - ) - # fmt: on - - try: - self.re = re.compile(self.pattern, self.re_flags) - self.reString = self.pattern - self.re_match = self.re.match - except re.error: - raise ValueError(f"invalid pattern {self.pattern!r} passed to Regex") - - self.errmsg = f"Expected {self.name}" - self.mayIndexError = False - self.mayReturnEmpty = True - - def _generateDefaultName(self) -> str: - if self.quote_char == self.end_quote_char and isinstance( - self.quote_char, str_type - ): - return f"string enclosed in {self.quote_char!r}" - - return f"quoted string, starting with {self.quote_char} ending with {self.end_quote_char}" - - def parseImpl(self, instring, loc, doActions=True): - # check first character of opening quote to see if that is a match - # before doing the more complicated regex match - result = ( - instring[loc] == self.first_quote_char - and self.re_match(instring, loc) - or None - ) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - # get ending loc and matched string from regex matching result - loc = result.end() - ret = result.group() - - if self.unquote_results: - # strip off quotes - ret = ret[self.quote_char_len : -self.end_quote_char_len] - - if isinstance(ret, str_type): - # fmt: off - if self.convert_whitespace_escapes: - # as we iterate over matches in the input string, - # collect from whichever match group of the unquote_scan_re - # regex matches (only 1 group will match at any given time) - ret = "".join( - # match group 1 matches \t, \n, etc. - self.ws_map[match.group(1)] if match.group(1) - # match group 2 matches escaped characters - else match.group(2)[-1] if match.group(2) - # match group 3 matches any character - else match.group(3) - for match in self.unquote_scan_re.finditer(ret) - ) - else: - ret = "".join( - # match group 1 matches escaped characters - match.group(1)[-1] if match.group(1) - # match group 2 matches any character - else match.group(2) - for match in self.unquote_scan_re.finditer(ret) - ) - # fmt: on - - # replace escaped quotes - if self.esc_quote: - ret = ret.replace(self.esc_quote, self.end_quote_char) - - return loc, ret - - -class CharsNotIn(Token): - """Token for matching words composed of characters *not* in a given - set (will include whitespace in matched characters if not listed in - the provided exclusion set - see example). Defined with string - containing all disallowed characters, and an optional minimum, - maximum, and/or exact length. The default value for ``min`` is - 1 (a minimum value < 1 is not valid); the default values for - ``max`` and ``exact`` are 0, meaning no maximum or exact - length restriction. - - Example:: - - # define a comma-separated-value as anything that is not a ',' - csv_value = CharsNotIn(',') - print(DelimitedList(csv_value).parse_string("dkls,lsdkjf,s12 34,@!#,213")) - - prints:: - - ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] - """ - - def __init__( - self, - not_chars: str = "", - min: int = 1, - max: int = 0, - exact: int = 0, - *, - notChars: str = "", - ): - super().__init__() - self.skipWhitespace = False - self.notChars = not_chars or notChars - self.notCharsSet = set(self.notChars) - - if min < 1: - raise ValueError( - "cannot specify a minimum length < 1; use" - " Opt(CharsNotIn()) if zero-length char group is permitted" - ) - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - self.errmsg = f"Expected {self.name}" - self.mayReturnEmpty = self.minLen == 0 - self.mayIndexError = False - - def _generateDefaultName(self) -> str: - not_chars_str = _collapse_string_to_ranges(self.notChars) - if len(not_chars_str) > 16: - return f"!W:({self.notChars[: 16 - 3]}...)" - else: - return f"!W:({self.notChars})" - - def parseImpl(self, instring, loc, doActions=True): - notchars = self.notCharsSet - if instring[loc] in notchars: - raise ParseException(instring, loc, self.errmsg, self) - - start = loc - loc += 1 - maxlen = min(start + self.maxLen, len(instring)) - while loc < maxlen and instring[loc] not in notchars: - loc += 1 - - if loc - start < self.minLen: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - -class White(Token): - """Special matching class for matching whitespace. Normally, - whitespace is ignored by pyparsing grammars. This class is included - when some whitespace structures are significant. Define with - a string containing the whitespace characters to be matched; default - is ``" \\t\\r\\n"``. Also takes optional ``min``, - ``max``, and ``exact`` arguments, as defined for the - :class:`Word` class. - """ - - whiteStrs = { - " ": "", - "\t": "", - "\n": "", - "\r": "", - "\f": "", - "\u00A0": "", - "\u1680": "", - "\u180E": "", - "\u2000": "", - "\u2001": "", - "\u2002": "", - "\u2003": "", - "\u2004": "", - "\u2005": "", - "\u2006": "", - "\u2007": "", - "\u2008": "", - "\u2009": "", - "\u200A": "", - "\u200B": "", - "\u202F": "", - "\u205F": "", - "\u3000": "", - } - - def __init__(self, ws: str = " \t\r\n", min: int = 1, max: int = 0, exact: int = 0): - super().__init__() - self.matchWhite = ws - self.set_whitespace_chars( - "".join(c for c in self.whiteStrs if c not in self.matchWhite), - copy_defaults=True, - ) - # self.leave_whitespace() - self.mayReturnEmpty = True - self.errmsg = f"Expected {self.name}" - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - def _generateDefaultName(self) -> str: - return "".join(White.whiteStrs[c] for c in self.matchWhite) - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] not in self.matchWhite: - raise ParseException(instring, loc, self.errmsg, self) - start = loc - loc += 1 - maxloc = start + self.maxLen - maxloc = min(maxloc, len(instring)) - while loc < maxloc and instring[loc] in self.matchWhite: - loc += 1 - - if loc - start < self.minLen: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - -class PositionToken(Token): - def __init__(self): - super().__init__() - self.mayReturnEmpty = True - self.mayIndexError = False - - -class GoToColumn(PositionToken): - """Token to advance to a specific column of input text; useful for - tabular report scraping. - """ - - def __init__(self, colno: int): - super().__init__() - self.col = colno - - def preParse(self, instring: str, loc: int) -> int: - if col(loc, instring) == self.col: - return loc - - instrlen = len(instring) - if self.ignoreExprs: - loc = self._skipIgnorables(instring, loc) - while ( - loc < instrlen - and instring[loc].isspace() - and col(loc, instring) != self.col - ): - loc += 1 - - return loc - - def parseImpl(self, instring, loc, doActions=True): - thiscol = col(loc, instring) - if thiscol > self.col: - raise ParseException(instring, loc, "Text not in expected column", self) - newloc = loc + self.col - thiscol - ret = instring[loc:newloc] - return newloc, ret - - -class LineStart(PositionToken): - r"""Matches if current position is at the beginning of a line within - the parse string - - Example:: - - test = '''\ - AAA this line - AAA and this line - AAA but not this one - B AAA and definitely not this one - ''' - - for t in (LineStart() + 'AAA' + rest_of_line).search_string(test): - print(t) - - prints:: - - ['AAA', ' this line'] - ['AAA', ' and this line'] - - """ - - def __init__(self): - super().__init__() - self.leave_whitespace() - self.orig_whiteChars = set() | self.whiteChars - self.whiteChars.discard("\n") - self.skipper = Empty().set_whitespace_chars(self.whiteChars) - self.errmsg = "Expected start of line" - - def preParse(self, instring: str, loc: int) -> int: - if loc == 0: - return loc - - ret = self.skipper.preParse(instring, loc) - - if "\n" in self.orig_whiteChars: - while instring[ret : ret + 1] == "\n": - ret = self.skipper.preParse(instring, ret + 1) - - return ret - - def parseImpl(self, instring, loc, doActions=True): - if col(loc, instring) == 1: - return loc, [] - raise ParseException(instring, loc, self.errmsg, self) - - -class LineEnd(PositionToken): - """Matches if current position is at the end of a line within the - parse string - """ - - def __init__(self): - super().__init__() - self.whiteChars.discard("\n") - self.set_whitespace_chars(self.whiteChars, copy_defaults=False) - self.errmsg = "Expected end of line" - - def parseImpl(self, instring, loc, doActions=True): - if loc < len(instring): - if instring[loc] == "\n": - return loc + 1, "\n" - else: - raise ParseException(instring, loc, self.errmsg, self) - elif loc == len(instring): - return loc + 1, [] - else: - raise ParseException(instring, loc, self.errmsg, self) - - -class StringStart(PositionToken): - """Matches if current position is at the beginning of the parse - string - """ - - def __init__(self): - super().__init__() - self.errmsg = "Expected start of text" - - def parseImpl(self, instring, loc, doActions=True): - # see if entire string up to here is just whitespace and ignoreables - if loc != 0 and loc != self.preParse(instring, 0): - raise ParseException(instring, loc, self.errmsg, self) - - return loc, [] - - -class StringEnd(PositionToken): - """ - Matches if current position is at the end of the parse string - """ - - def __init__(self): - super().__init__() - self.errmsg = "Expected end of text" - - def parseImpl(self, instring, loc, doActions=True): - if loc < len(instring): - raise ParseException(instring, loc, self.errmsg, self) - if loc == len(instring): - return loc + 1, [] - if loc > len(instring): - return loc, [] - - raise ParseException(instring, loc, self.errmsg, self) - - -class WordStart(PositionToken): - """Matches if the current position is at the beginning of a - :class:`Word`, and is not preceded by any character in a given - set of ``word_chars`` (default= ``printables``). To emulate the - ``\b`` behavior of regular expressions, use - ``WordStart(alphanums)``. ``WordStart`` will also match at - the beginning of the string being parsed, or at the beginning of - a line. - """ - - def __init__(self, word_chars: str = printables, *, wordChars: str = printables): - wordChars = word_chars if wordChars == printables else wordChars - super().__init__() - self.wordChars = set(wordChars) - self.errmsg = "Not at the start of a word" - - def parseImpl(self, instring, loc, doActions=True): - if loc != 0: - if ( - instring[loc - 1] in self.wordChars - or instring[loc] not in self.wordChars - ): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - -class WordEnd(PositionToken): - """Matches if the current position is at the end of a :class:`Word`, - and is not followed by any character in a given set of ``word_chars`` - (default= ``printables``). To emulate the ``\b`` behavior of - regular expressions, use ``WordEnd(alphanums)``. ``WordEnd`` - will also match at the end of the string being parsed, or at the end - of a line. - """ - - def __init__(self, word_chars: str = printables, *, wordChars: str = printables): - wordChars = word_chars if wordChars == printables else wordChars - super().__init__() - self.wordChars = set(wordChars) - self.skipWhitespace = False - self.errmsg = "Not at the end of a word" - - def parseImpl(self, instring, loc, doActions=True): - instrlen = len(instring) - if instrlen > 0 and loc < instrlen: - if ( - instring[loc] in self.wordChars - or instring[loc - 1] not in self.wordChars - ): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - -class ParseExpression(ParserElement): - """Abstract subclass of ParserElement, for combining and - post-processing parsed tokens. - """ - - def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False): - super().__init__(savelist) - self.exprs: List[ParserElement] - if isinstance(exprs, _generatorType): - exprs = list(exprs) - - if isinstance(exprs, str_type): - self.exprs = [self._literalStringClass(exprs)] - elif isinstance(exprs, ParserElement): - self.exprs = [exprs] - elif isinstance(exprs, Iterable): - exprs = list(exprs) - # if sequence of strings provided, wrap with Literal - if any(isinstance(expr, str_type) for expr in exprs): - exprs = ( - self._literalStringClass(e) if isinstance(e, str_type) else e - for e in exprs - ) - self.exprs = list(exprs) - else: - try: - self.exprs = list(exprs) - except TypeError: - self.exprs = [exprs] - self.callPreparse = False - - def recurse(self) -> List[ParserElement]: - return self.exprs[:] - - def append(self, other) -> ParserElement: - self.exprs.append(other) - self._defaultName = None - return self - - def leave_whitespace(self, recursive: bool = True) -> ParserElement: - """ - Extends ``leave_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on - all contained expressions. - """ - super().leave_whitespace(recursive) - - if recursive: - self.exprs = [e.copy() for e in self.exprs] - for e in self.exprs: - e.leave_whitespace(recursive) - return self - - def ignore_whitespace(self, recursive: bool = True) -> ParserElement: - """ - Extends ``ignore_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on - all contained expressions. - """ - super().ignore_whitespace(recursive) - if recursive: - self.exprs = [e.copy() for e in self.exprs] - for e in self.exprs: - e.ignore_whitespace(recursive) - return self - - def ignore(self, other) -> ParserElement: - if isinstance(other, Suppress): - if other not in self.ignoreExprs: - super().ignore(other) - for e in self.exprs: - e.ignore(self.ignoreExprs[-1]) - else: - super().ignore(other) - for e in self.exprs: - e.ignore(self.ignoreExprs[-1]) - return self - - def _generateDefaultName(self) -> str: - return f"{type(self).__name__}:({self.exprs})" - - def streamline(self) -> ParserElement: - if self.streamlined: - return self - - super().streamline() - - for e in self.exprs: - e.streamline() - - # collapse nested :class:`And`'s of the form ``And(And(And(a, b), c), d)`` to ``And(a, b, c, d)`` - # but only if there are no parse actions or resultsNames on the nested And's - # (likewise for :class:`Or`'s and :class:`MatchFirst`'s) - if len(self.exprs) == 2: - other = self.exprs[0] - if ( - isinstance(other, self.__class__) - and not other.parseAction - and other.resultsName is None - and not other.debug - ): - self.exprs = other.exprs[:] + [self.exprs[1]] - self._defaultName = None - self.mayReturnEmpty |= other.mayReturnEmpty - self.mayIndexError |= other.mayIndexError - - other = self.exprs[-1] - if ( - isinstance(other, self.__class__) - and not other.parseAction - and other.resultsName is None - and not other.debug - ): - self.exprs = self.exprs[:-1] + other.exprs[:] - self._defaultName = None - self.mayReturnEmpty |= other.mayReturnEmpty - self.mayIndexError |= other.mayIndexError - - self.errmsg = f"Expected {self}" - - return self - - def validate(self, validateTrace=None) -> None: - warnings.warn( - "ParserElement.validate() is deprecated, and should not be used to check for left recursion", - DeprecationWarning, - stacklevel=2, - ) - tmp = (validateTrace if validateTrace is not None else [])[:] + [self] - for e in self.exprs: - e.validate(tmp) - self._checkRecursion([]) - - def copy(self) -> ParserElement: - ret = super().copy() - ret = typing.cast(ParseExpression, ret) - ret.exprs = [e.copy() for e in self.exprs] - return ret - - def _setResultsName(self, name, listAllMatches=False): - if not ( - __diag__.warn_ungrouped_named_tokens_in_collection - and Diagnostics.warn_ungrouped_named_tokens_in_collection - not in self.suppress_warnings_ - ): - return super()._setResultsName(name, listAllMatches) - - for e in self.exprs: - if ( - isinstance(e, ParserElement) - and e.resultsName - and ( - Diagnostics.warn_ungrouped_named_tokens_in_collection - not in e.suppress_warnings_ - ) - ): - warning = ( - "warn_ungrouped_named_tokens_in_collection:" - f" setting results name {name!r} on {type(self).__name__} expression" - f" collides with {e.resultsName!r} on contained expression" - ) - warnings.warn(warning, stacklevel=3) - break - - return super()._setResultsName(name, listAllMatches) - - # Compatibility synonyms - # fmt: off - leaveWhitespace = replaced_by_pep8("leaveWhitespace", leave_whitespace) - ignoreWhitespace = replaced_by_pep8("ignoreWhitespace", ignore_whitespace) - # fmt: on - - -class And(ParseExpression): - """ - Requires all given :class:`ParseExpression` s to be found in the given order. - Expressions may be separated by whitespace. - May be constructed using the ``'+'`` operator. - May also be constructed using the ``'-'`` operator, which will - suppress backtracking. - - Example:: - - integer = Word(nums) - name_expr = Word(alphas)[1, ...] - - expr = And([integer("id"), name_expr("name"), integer("age")]) - # more easily written as: - expr = integer("id") + name_expr("name") + integer("age") - """ - - class _ErrorStop(Empty): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.leave_whitespace() - - def _generateDefaultName(self) -> str: - return "-" - - def __init__( - self, exprs_arg: typing.Iterable[ParserElement], savelist: bool = True - ): - exprs: List[ParserElement] = list(exprs_arg) - if exprs and Ellipsis in exprs: - tmp = [] - for i, expr in enumerate(exprs): - if expr is not Ellipsis: - tmp.append(expr) - continue - - if i < len(exprs) - 1: - skipto_arg: ParserElement = typing.cast( - ParseExpression, (Empty() + exprs[i + 1]) - ).exprs[-1] - tmp.append(SkipTo(skipto_arg)("_skipped*")) - continue - - raise Exception("cannot construct And with sequence ending in ...") - exprs[:] = tmp - super().__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - if not isinstance(self.exprs[0], White): - self.set_whitespace_chars( - self.exprs[0].whiteChars, - copy_defaults=self.exprs[0].copyDefaultWhiteChars, - ) - self.skipWhitespace = self.exprs[0].skipWhitespace - else: - self.skipWhitespace = False - else: - self.mayReturnEmpty = True - self.callPreparse = True - - def streamline(self) -> ParserElement: - # collapse any _PendingSkip's - if self.exprs and any( - isinstance(e, ParseExpression) - and e.exprs - and isinstance(e.exprs[-1], _PendingSkip) - for e in self.exprs[:-1] - ): - deleted_expr_marker = NoMatch() - for i, e in enumerate(self.exprs[:-1]): - if e is deleted_expr_marker: - continue - if ( - isinstance(e, ParseExpression) - and e.exprs - and isinstance(e.exprs[-1], _PendingSkip) - ): - e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1] - self.exprs[i + 1] = deleted_expr_marker - self.exprs = [e for e in self.exprs if e is not deleted_expr_marker] - - super().streamline() - - # link any IndentedBlocks to the prior expression - prev: ParserElement - cur: ParserElement - for prev, cur in zip(self.exprs, self.exprs[1:]): - # traverse cur or any first embedded expr of cur looking for an IndentedBlock - # (but watch out for recursive grammar) - seen = set() - while True: - if id(cur) in seen: - break - seen.add(id(cur)) - if isinstance(cur, IndentedBlock): - prev.add_parse_action( - lambda s, l, t, cur_=cur: setattr( - cur_, "parent_anchor", col(l, s) - ) - ) - break - subs = cur.recurse() - next_first = next(iter(subs), None) - if next_first is None: - break - cur = typing.cast(ParserElement, next_first) - - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - return self - - def parseImpl(self, instring, loc, doActions=True): - # pass False as callPreParse arg to _parse for first element, since we already - # pre-parsed the string as part of our And pre-parsing - loc, resultlist = self.exprs[0]._parse( - instring, loc, doActions, callPreParse=False - ) - errorStop = False - for e in self.exprs[1:]: - # if isinstance(e, And._ErrorStop): - if type(e) is And._ErrorStop: - errorStop = True - continue - if errorStop: - try: - loc, exprtokens = e._parse(instring, loc, doActions) - except ParseSyntaxException: - raise - except ParseBaseException as pe: - pe.__traceback__ = None - raise ParseSyntaxException._from_exception(pe) - except IndexError: - raise ParseSyntaxException( - instring, len(instring), self.errmsg, self - ) - else: - loc, exprtokens = e._parse(instring, loc, doActions) - resultlist += exprtokens - return loc, resultlist - - def __iadd__(self, other): - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - return NotImplemented - return self.append(other) # And([self, other]) - - def _checkRecursion(self, parseElementList): - subRecCheckList = parseElementList[:] + [self] - for e in self.exprs: - e._checkRecursion(subRecCheckList) - if not e.mayReturnEmpty: - break - - def _generateDefaultName(self) -> str: - inner = " ".join(str(e) for e in self.exprs) - # strip off redundant inner {}'s - while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}": - inner = inner[1:-1] - return f"{{{inner}}}" - - -class Or(ParseExpression): - """Requires that at least one :class:`ParseExpression` is found. If - two expressions match, the expression that matches the longest - string will be used. May be constructed using the ``'^'`` - operator. - - Example:: - - # construct Or using '^' operator - - number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums)) - print(number.search_string("123 3.1416 789")) - - prints:: - - [['123'], ['3.1416'], ['789']] - """ - - def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False): - super().__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - self.skipWhitespace = all(e.skipWhitespace for e in self.exprs) - else: - self.mayReturnEmpty = True - - def streamline(self) -> ParserElement: - super().streamline() - if self.exprs: - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - self.saveAsList = any(e.saveAsList for e in self.exprs) - self.skipWhitespace = all( - e.skipWhitespace and not isinstance(e, White) for e in self.exprs - ) - else: - self.saveAsList = False - return self - - def parseImpl(self, instring, loc, doActions=True): - maxExcLoc = -1 - maxException = None - matches = [] - fatals = [] - if all(e.callPreparse for e in self.exprs): - loc = self.preParse(instring, loc) - for e in self.exprs: - try: - loc2 = e.try_parse(instring, loc, raise_fatal=True) - except ParseFatalException as pfe: - pfe.__traceback__ = None - pfe.parser_element = e - fatals.append(pfe) - maxException = None - maxExcLoc = -1 - except ParseException as err: - if not fatals: - err.__traceback__ = None - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - except IndexError: - if len(instring) > maxExcLoc: - maxException = ParseException( - instring, len(instring), e.errmsg, self - ) - maxExcLoc = len(instring) - else: - # save match among all matches, to retry longest to shortest - matches.append((loc2, e)) - - if matches: - # re-evaluate all matches in descending order of length of match, in case attached actions - # might change whether or how much they match of the input. - matches.sort(key=itemgetter(0), reverse=True) - - if not doActions: - # no further conditions or parse actions to change the selection of - # alternative, so the first match will be the best match - best_expr = matches[0][1] - return best_expr._parse(instring, loc, doActions) - - longest = -1, None - for loc1, expr1 in matches: - if loc1 <= longest[0]: - # already have a longer match than this one will deliver, we are done - return longest - - try: - loc2, toks = expr1._parse(instring, loc, doActions) - except ParseException as err: - err.__traceback__ = None - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - else: - if loc2 >= loc1: - return loc2, toks - # didn't match as much as before - elif loc2 > longest[0]: - longest = loc2, toks - - if longest != (-1, None): - return longest - - if fatals: - if len(fatals) > 1: - fatals.sort(key=lambda e: -e.loc) - if fatals[0].loc == fatals[1].loc: - fatals.sort(key=lambda e: (-e.loc, -len(str(e.parser_element)))) - max_fatal = fatals[0] - raise max_fatal - - if maxException is not None: - # infer from this check that all alternatives failed at the current position - # so emit this collective error message instead of any single error message - if maxExcLoc == loc: - maxException.msg = self.errmsg - raise maxException - - raise ParseException(instring, loc, "no defined alternatives to match", self) - - def __ixor__(self, other): - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - return NotImplemented - return self.append(other) # Or([self, other]) - - def _generateDefaultName(self) -> str: - return f"{{{' ^ '.join(str(e) for e in self.exprs)}}}" - - def _setResultsName(self, name, listAllMatches=False): - if ( - __diag__.warn_multiple_tokens_in_named_alternation - and Diagnostics.warn_multiple_tokens_in_named_alternation - not in self.suppress_warnings_ - ): - if any( - isinstance(e, And) - and Diagnostics.warn_multiple_tokens_in_named_alternation - not in e.suppress_warnings_ - for e in self.exprs - ): - warning = ( - "warn_multiple_tokens_in_named_alternation:" - f" setting results name {name!r} on {type(self).__name__} expression" - " will return a list of all parsed tokens in an And alternative," - " in prior versions only the first token was returned; enclose" - " contained argument in Group" - ) - warnings.warn(warning, stacklevel=3) - - return super()._setResultsName(name, listAllMatches) - - -class MatchFirst(ParseExpression): - """Requires that at least one :class:`ParseExpression` is found. If - more than one expression matches, the first one listed is the one that will - match. May be constructed using the ``'|'`` operator. - - Example:: - - # construct MatchFirst using '|' operator - - # watch the order of expressions to match - number = Word(nums) | Combine(Word(nums) + '.' + Word(nums)) - print(number.search_string("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']] - - # put more selective expression first - number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums) - print(number.search_string("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']] - """ - - def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False): - super().__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - self.skipWhitespace = all(e.skipWhitespace for e in self.exprs) - else: - self.mayReturnEmpty = True - - def streamline(self) -> ParserElement: - if self.streamlined: - return self - - super().streamline() - if self.exprs: - self.saveAsList = any(e.saveAsList for e in self.exprs) - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - self.skipWhitespace = all( - e.skipWhitespace and not isinstance(e, White) for e in self.exprs - ) - else: - self.saveAsList = False - self.mayReturnEmpty = True - return self - - def parseImpl(self, instring, loc, doActions=True): - maxExcLoc = -1 - maxException = None - - for e in self.exprs: - try: - return e._parse(instring, loc, doActions) - except ParseFatalException as pfe: - pfe.__traceback__ = None - pfe.parser_element = e - raise - except ParseException as err: - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - except IndexError: - if len(instring) > maxExcLoc: - maxException = ParseException( - instring, len(instring), e.errmsg, self - ) - maxExcLoc = len(instring) - - if maxException is not None: - # infer from this check that all alternatives failed at the current position - # so emit this collective error message instead of any individual error message - if maxExcLoc == loc: - maxException.msg = self.errmsg - raise maxException - - raise ParseException(instring, loc, "no defined alternatives to match", self) - - def __ior__(self, other): - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - return NotImplemented - return self.append(other) # MatchFirst([self, other]) - - def _generateDefaultName(self) -> str: - return f"{{{' | '.join(str(e) for e in self.exprs)}}}" - - def _setResultsName(self, name, listAllMatches=False): - if ( - __diag__.warn_multiple_tokens_in_named_alternation - and Diagnostics.warn_multiple_tokens_in_named_alternation - not in self.suppress_warnings_ - ): - if any( - isinstance(e, And) - and Diagnostics.warn_multiple_tokens_in_named_alternation - not in e.suppress_warnings_ - for e in self.exprs - ): - warning = ( - "warn_multiple_tokens_in_named_alternation:" - f" setting results name {name!r} on {type(self).__name__} expression" - " will return a list of all parsed tokens in an And alternative," - " in prior versions only the first token was returned; enclose" - " contained argument in Group" - ) - warnings.warn(warning, stacklevel=3) - - return super()._setResultsName(name, listAllMatches) - - -class Each(ParseExpression): - """Requires all given :class:`ParseExpression` s to be found, but in - any order. Expressions may be separated by whitespace. - - May be constructed using the ``'&'`` operator. - - Example:: - - color = one_of("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN") - shape_type = one_of("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON") - integer = Word(nums) - shape_attr = "shape:" + shape_type("shape") - posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn") - color_attr = "color:" + color("color") - size_attr = "size:" + integer("size") - - # use Each (using operator '&') to accept attributes in any order - # (shape and posn are required, color and size are optional) - shape_spec = shape_attr & posn_attr & Opt(color_attr) & Opt(size_attr) - - shape_spec.run_tests(''' - shape: SQUARE color: BLACK posn: 100, 120 - shape: CIRCLE size: 50 color: BLUE posn: 50,80 - color:GREEN size:20 shape:TRIANGLE posn:20,40 - ''' - ) - - prints:: - - shape: SQUARE color: BLACK posn: 100, 120 - ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']] - - color: BLACK - - posn: ['100', ',', '120'] - - x: 100 - - y: 120 - - shape: SQUARE - - - shape: CIRCLE size: 50 color: BLUE posn: 50,80 - ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']] - - color: BLUE - - posn: ['50', ',', '80'] - - x: 50 - - y: 80 - - shape: CIRCLE - - size: 50 - - - color: GREEN size: 20 shape: TRIANGLE posn: 20,40 - ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']] - - color: GREEN - - posn: ['20', ',', '40'] - - x: 20 - - y: 40 - - shape: TRIANGLE - - size: 20 - """ - - def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = True): - super().__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - else: - self.mayReturnEmpty = True - self.skipWhitespace = True - self.initExprGroups = True - self.saveAsList = True - - def __iand__(self, other): - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - return NotImplemented - return self.append(other) # Each([self, other]) - - def streamline(self) -> ParserElement: - super().streamline() - if self.exprs: - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - else: - self.mayReturnEmpty = True - return self - - def parseImpl(self, instring, loc, doActions=True): - if self.initExprGroups: - self.opt1map = dict( - (id(e.expr), e) for e in self.exprs if isinstance(e, Opt) - ) - opt1 = [e.expr for e in self.exprs if isinstance(e, Opt)] - opt2 = [ - e - for e in self.exprs - if e.mayReturnEmpty and not isinstance(e, (Opt, Regex, ZeroOrMore)) - ] - self.optionals = opt1 + opt2 - self.multioptionals = [ - e.expr.set_results_name(e.resultsName, list_all_matches=True) - for e in self.exprs - if isinstance(e, _MultipleMatch) - ] - self.multirequired = [ - e.expr.set_results_name(e.resultsName, list_all_matches=True) - for e in self.exprs - if isinstance(e, OneOrMore) - ] - self.required = [ - e for e in self.exprs if not isinstance(e, (Opt, ZeroOrMore, OneOrMore)) - ] - self.required += self.multirequired - self.initExprGroups = False - - tmpLoc = loc - tmpReqd = self.required[:] - tmpOpt = self.optionals[:] - multis = self.multioptionals[:] - matchOrder = [] - - keepMatching = True - failed = [] - fatals = [] - while keepMatching: - tmpExprs = tmpReqd + tmpOpt + multis - failed.clear() - fatals.clear() - for e in tmpExprs: - try: - tmpLoc = e.try_parse(instring, tmpLoc, raise_fatal=True) - except ParseFatalException as pfe: - pfe.__traceback__ = None - pfe.parser_element = e - fatals.append(pfe) - failed.append(e) - except ParseException: - failed.append(e) - else: - matchOrder.append(self.opt1map.get(id(e), e)) - if e in tmpReqd: - tmpReqd.remove(e) - elif e in tmpOpt: - tmpOpt.remove(e) - if len(failed) == len(tmpExprs): - keepMatching = False - - # look for any ParseFatalExceptions - if fatals: - if len(fatals) > 1: - fatals.sort(key=lambda e: -e.loc) - if fatals[0].loc == fatals[1].loc: - fatals.sort(key=lambda e: (-e.loc, -len(str(e.parser_element)))) - max_fatal = fatals[0] - raise max_fatal - - if tmpReqd: - missing = ", ".join([str(e) for e in tmpReqd]) - raise ParseException( - instring, - loc, - f"Missing one or more required elements ({missing})", - ) - - # add any unmatched Opts, in case they have default values defined - matchOrder += [e for e in self.exprs if isinstance(e, Opt) and e.expr in tmpOpt] - - total_results = ParseResults([]) - for e in matchOrder: - loc, results = e._parse(instring, loc, doActions) - total_results += results - - return loc, total_results - - def _generateDefaultName(self) -> str: - return f"{{{' & '.join(str(e) for e in self.exprs)}}}" - - -class ParseElementEnhance(ParserElement): - """Abstract subclass of :class:`ParserElement`, for combining and - post-processing parsed tokens. - """ - - def __init__(self, expr: Union[ParserElement, str], savelist: bool = False): - super().__init__(savelist) - if isinstance(expr, str_type): - expr_str = typing.cast(str, expr) - if issubclass(self._literalStringClass, Token): - expr = self._literalStringClass(expr_str) # type: ignore[call-arg] - elif issubclass(type(self), self._literalStringClass): - expr = Literal(expr_str) - else: - expr = self._literalStringClass(Literal(expr_str)) # type: ignore[assignment, call-arg] - expr = typing.cast(ParserElement, expr) - self.expr = expr - if expr is not None: - self.mayIndexError = expr.mayIndexError - self.mayReturnEmpty = expr.mayReturnEmpty - self.set_whitespace_chars( - expr.whiteChars, copy_defaults=expr.copyDefaultWhiteChars - ) - self.skipWhitespace = expr.skipWhitespace - self.saveAsList = expr.saveAsList - self.callPreparse = expr.callPreparse - self.ignoreExprs.extend(expr.ignoreExprs) - - def recurse(self) -> List[ParserElement]: - return [self.expr] if self.expr is not None else [] - - def parseImpl(self, instring, loc, doActions=True): - if self.expr is None: - raise ParseException(instring, loc, "No expression defined", self) - - try: - return self.expr._parse(instring, loc, doActions, callPreParse=False) - except ParseBaseException as pbe: - if not isinstance(self, Forward) or self.customName is not None: - if self.errmsg: - pbe.msg = self.errmsg - raise - - def leave_whitespace(self, recursive: bool = True) -> ParserElement: - super().leave_whitespace(recursive) - - if recursive: - if self.expr is not None: - self.expr = self.expr.copy() - self.expr.leave_whitespace(recursive) - return self - - def ignore_whitespace(self, recursive: bool = True) -> ParserElement: - super().ignore_whitespace(recursive) - - if recursive: - if self.expr is not None: - self.expr = self.expr.copy() - self.expr.ignore_whitespace(recursive) - return self - - def ignore(self, other) -> ParserElement: - if not isinstance(other, Suppress) or other not in self.ignoreExprs: - super().ignore(other) - if self.expr is not None: - self.expr.ignore(self.ignoreExprs[-1]) - - return self - - def streamline(self) -> ParserElement: - super().streamline() - if self.expr is not None: - self.expr.streamline() - return self - - def _checkRecursion(self, parseElementList): - if self in parseElementList: - raise RecursiveGrammarException(parseElementList + [self]) - subRecCheckList = parseElementList[:] + [self] - if self.expr is not None: - self.expr._checkRecursion(subRecCheckList) - - def validate(self, validateTrace=None) -> None: - warnings.warn( - "ParserElement.validate() is deprecated, and should not be used to check for left recursion", - DeprecationWarning, - stacklevel=2, - ) - if validateTrace is None: - validateTrace = [] - tmp = validateTrace[:] + [self] - if self.expr is not None: - self.expr.validate(tmp) - self._checkRecursion([]) - - def _generateDefaultName(self) -> str: - return f"{type(self).__name__}:({self.expr})" - - # Compatibility synonyms - # fmt: off - leaveWhitespace = replaced_by_pep8("leaveWhitespace", leave_whitespace) - ignoreWhitespace = replaced_by_pep8("ignoreWhitespace", ignore_whitespace) - # fmt: on - - -class IndentedBlock(ParseElementEnhance): - """ - Expression to match one or more expressions at a given indentation level. - Useful for parsing text where structure is implied by indentation (like Python source code). - """ - - class _Indent(Empty): - def __init__(self, ref_col: int): - super().__init__() - self.errmsg = f"expected indent at column {ref_col}" - self.add_condition(lambda s, l, t: col(l, s) == ref_col) - - class _IndentGreater(Empty): - def __init__(self, ref_col: int): - super().__init__() - self.errmsg = f"expected indent at column greater than {ref_col}" - self.add_condition(lambda s, l, t: col(l, s) > ref_col) - - def __init__( - self, expr: ParserElement, *, recursive: bool = False, grouped: bool = True - ): - super().__init__(expr, savelist=True) - # if recursive: - # raise NotImplementedError("IndentedBlock with recursive is not implemented") - self._recursive = recursive - self._grouped = grouped - self.parent_anchor = 1 - - def parseImpl(self, instring, loc, doActions=True): - # advance parse position to non-whitespace by using an Empty() - # this should be the column to be used for all subsequent indented lines - anchor_loc = Empty().preParse(instring, loc) - - # see if self.expr matches at the current location - if not it will raise an exception - # and no further work is necessary - self.expr.try_parse(instring, anchor_loc, do_actions=doActions) - - indent_col = col(anchor_loc, instring) - peer_detect_expr = self._Indent(indent_col) - - inner_expr = Empty() + peer_detect_expr + self.expr - if self._recursive: - sub_indent = self._IndentGreater(indent_col) - nested_block = IndentedBlock( - self.expr, recursive=self._recursive, grouped=self._grouped - ) - nested_block.set_debug(self.debug) - nested_block.parent_anchor = indent_col - inner_expr += Opt(sub_indent + nested_block) - - inner_expr.set_name(f"inner {hex(id(inner_expr))[-4:].upper()}@{indent_col}") - block = OneOrMore(inner_expr) - - trailing_undent = self._Indent(self.parent_anchor) | StringEnd() - - if self._grouped: - wrapper = Group - else: - wrapper = lambda expr: expr - return (wrapper(block) + Optional(trailing_undent)).parseImpl( - instring, anchor_loc, doActions - ) - - -class AtStringStart(ParseElementEnhance): - """Matches if expression matches at the beginning of the parse - string:: - - AtStringStart(Word(nums)).parse_string("123") - # prints ["123"] - - AtStringStart(Word(nums)).parse_string(" 123") - # raises ParseException - """ - - def __init__(self, expr: Union[ParserElement, str]): - super().__init__(expr) - self.callPreparse = False - - def parseImpl(self, instring, loc, doActions=True): - if loc != 0: - raise ParseException(instring, loc, "not found at string start") - return super().parseImpl(instring, loc, doActions) - - -class AtLineStart(ParseElementEnhance): - r"""Matches if an expression matches at the beginning of a line within - the parse string - - Example:: - - test = '''\ - AAA this line - AAA and this line - AAA but not this one - B AAA and definitely not this one - ''' - - for t in (AtLineStart('AAA') + rest_of_line).search_string(test): - print(t) - - prints:: - - ['AAA', ' this line'] - ['AAA', ' and this line'] - - """ - - def __init__(self, expr: Union[ParserElement, str]): - super().__init__(expr) - self.callPreparse = False - - def parseImpl(self, instring, loc, doActions=True): - if col(loc, instring) != 1: - raise ParseException(instring, loc, "not found at line start") - return super().parseImpl(instring, loc, doActions) - - -class FollowedBy(ParseElementEnhance): - """Lookahead matching of the given parse expression. - ``FollowedBy`` does *not* advance the parsing position within - the input string, it only verifies that the specified parse - expression matches at the current position. ``FollowedBy`` - always returns a null token list. If any results names are defined - in the lookahead expression, those *will* be returned for access by - name. - - Example:: - - # use FollowedBy to match a label only if it is followed by a ':' - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) - - attr_expr[1, ...].parse_string("shape: SQUARE color: BLACK posn: upper left").pprint() - - prints:: - - [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] - """ - - def __init__(self, expr: Union[ParserElement, str]): - super().__init__(expr) - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - # by using self._expr.parse and deleting the contents of the returned ParseResults list - # we keep any named results that were defined in the FollowedBy expression - _, ret = self.expr._parse(instring, loc, doActions=doActions) - del ret[:] - - return loc, ret - - -class PrecededBy(ParseElementEnhance): - """Lookbehind matching of the given parse expression. - ``PrecededBy`` does not advance the parsing position within the - input string, it only verifies that the specified parse expression - matches prior to the current position. ``PrecededBy`` always - returns a null token list, but if a results name is defined on the - given expression, it is returned. - - Parameters: - - - ``expr`` - expression that must match prior to the current parse - location - - ``retreat`` - (default= ``None``) - (int) maximum number of characters - to lookbehind prior to the current parse location - - If the lookbehind expression is a string, :class:`Literal`, - :class:`Keyword`, or a :class:`Word` or :class:`CharsNotIn` - with a specified exact or maximum length, then the retreat - parameter is not required. Otherwise, retreat must be specified to - give a maximum number of characters to look back from - the current parse position for a lookbehind match. - - Example:: - - # VB-style variable names with type prefixes - int_var = PrecededBy("#") + pyparsing_common.identifier - str_var = PrecededBy("$") + pyparsing_common.identifier - - """ - - def __init__( - self, expr: Union[ParserElement, str], retreat: typing.Optional[int] = None - ): - super().__init__(expr) - self.expr = self.expr().leave_whitespace() - self.mayReturnEmpty = True - self.mayIndexError = False - self.exact = False - if isinstance(expr, str_type): - expr = typing.cast(str, expr) - retreat = len(expr) - self.exact = True - elif isinstance(expr, (Literal, Keyword)): - retreat = expr.matchLen - self.exact = True - elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT: - retreat = expr.maxLen - self.exact = True - elif isinstance(expr, PositionToken): - retreat = 0 - self.exact = True - self.retreat = retreat - self.errmsg = f"not preceded by {expr}" - self.skipWhitespace = False - self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None))) - - def parseImpl(self, instring, loc=0, doActions=True): - if self.exact: - if loc < self.retreat: - raise ParseException(instring, loc, self.errmsg) - start = loc - self.retreat - _, ret = self.expr._parse(instring, start) - return loc, ret - - # retreat specified a maximum lookbehind window, iterate - test_expr = self.expr + StringEnd() - instring_slice = instring[max(0, loc - self.retreat) : loc] - last_expr = ParseException(instring, loc, self.errmsg) - - for offset in range(1, min(loc, self.retreat + 1) + 1): - try: - # print('trying', offset, instring_slice, repr(instring_slice[loc - offset:])) - _, ret = test_expr._parse(instring_slice, len(instring_slice) - offset) - except ParseBaseException as pbe: - last_expr = pbe - else: - break - else: - raise last_expr - - return loc, ret - - -class Located(ParseElementEnhance): - """ - Decorates a returned token with its starting and ending - locations in the input string. - - This helper adds the following results names: - - - ``locn_start`` - location where matched expression begins - - ``locn_end`` - location where matched expression ends - - ``value`` - the actual parsed results - - Be careful if the input text contains ```` characters, you - may want to call :class:`ParserElement.parse_with_tabs` - - Example:: - - wd = Word(alphas) - for match in Located(wd).search_string("ljsdf123lksdjjf123lkkjj1222"): - print(match) - - prints:: - - [0, ['ljsdf'], 5] - [8, ['lksdjjf'], 15] - [18, ['lkkjj'], 23] - - """ - - def parseImpl(self, instring, loc, doActions=True): - start = loc - loc, tokens = self.expr._parse(instring, start, doActions, callPreParse=False) - ret_tokens = ParseResults([start, tokens, loc]) - ret_tokens["locn_start"] = start - ret_tokens["value"] = tokens - ret_tokens["locn_end"] = loc - if self.resultsName: - # must return as a list, so that the name will be attached to the complete group - return loc, [ret_tokens] - else: - return loc, ret_tokens - - -class NotAny(ParseElementEnhance): - """ - Lookahead to disallow matching with the given parse expression. - ``NotAny`` does *not* advance the parsing position within the - input string, it only verifies that the specified parse expression - does *not* match at the current position. Also, ``NotAny`` does - *not* skip over leading whitespace. ``NotAny`` always returns - a null token list. May be constructed using the ``'~'`` operator. - - Example:: - - AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split()) - - # take care not to mistake keywords for identifiers - ident = ~(AND | OR | NOT) + Word(alphas) - boolean_term = Opt(NOT) + ident - - # very crude boolean expression - to support parenthesis groups and - # operation hierarchy, use infix_notation - boolean_expr = boolean_term + ((AND | OR) + boolean_term)[...] - - # integers that are followed by "." are actually floats - integer = Word(nums) + ~Char(".") - """ - - def __init__(self, expr: Union[ParserElement, str]): - super().__init__(expr) - # do NOT use self.leave_whitespace(), don't want to propagate to exprs - # self.leave_whitespace() - self.skipWhitespace = False - - self.mayReturnEmpty = True - self.errmsg = f"Found unwanted token, {self.expr}" - - def parseImpl(self, instring, loc, doActions=True): - if self.expr.can_parse_next(instring, loc, do_actions=doActions): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - def _generateDefaultName(self) -> str: - return f"~{{{self.expr}}}" - - -class _MultipleMatch(ParseElementEnhance): - def __init__( - self, - expr: Union[str, ParserElement], - stop_on: typing.Optional[Union[ParserElement, str]] = None, - *, - stopOn: typing.Optional[Union[ParserElement, str]] = None, - ): - super().__init__(expr) - stopOn = stopOn or stop_on - self.saveAsList = True - ender = stopOn - if isinstance(ender, str_type): - ender = self._literalStringClass(ender) - self.stopOn(ender) - - def stopOn(self, ender) -> ParserElement: - if isinstance(ender, str_type): - ender = self._literalStringClass(ender) - self.not_ender = ~ender if ender is not None else None - return self - - def parseImpl(self, instring, loc, doActions=True): - self_expr_parse = self.expr._parse - self_skip_ignorables = self._skipIgnorables - check_ender = self.not_ender is not None - if check_ender: - try_not_ender = self.not_ender.try_parse - - # must be at least one (but first see if we are the stopOn sentinel; - # if so, fail) - if check_ender: - try_not_ender(instring, loc) - loc, tokens = self_expr_parse(instring, loc, doActions) - try: - hasIgnoreExprs = not not self.ignoreExprs - while 1: - if check_ender: - try_not_ender(instring, loc) - if hasIgnoreExprs: - preloc = self_skip_ignorables(instring, loc) - else: - preloc = loc - loc, tmptokens = self_expr_parse(instring, preloc, doActions) - tokens += tmptokens - except (ParseException, IndexError): - pass - - return loc, tokens - - def _setResultsName(self, name, listAllMatches=False): - if ( - __diag__.warn_ungrouped_named_tokens_in_collection - and Diagnostics.warn_ungrouped_named_tokens_in_collection - not in self.suppress_warnings_ - ): - for e in [self.expr] + self.expr.recurse(): - if ( - isinstance(e, ParserElement) - and e.resultsName - and ( - Diagnostics.warn_ungrouped_named_tokens_in_collection - not in e.suppress_warnings_ - ) - ): - warning = ( - "warn_ungrouped_named_tokens_in_collection:" - f" setting results name {name!r} on {type(self).__name__} expression" - f" collides with {e.resultsName!r} on contained expression" - ) - warnings.warn(warning, stacklevel=3) - break - - return super()._setResultsName(name, listAllMatches) - - -class OneOrMore(_MultipleMatch): - """ - Repetition of one or more of the given expression. - - Parameters: - - - ``expr`` - expression that must match one or more times - - ``stop_on`` - (default= ``None``) - expression for a terminating sentinel - (only required if the sentinel would ordinarily match the repetition - expression) - - Example:: - - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).set_parse_action(' '.join)) - - text = "shape: SQUARE posn: upper left color: BLACK" - attr_expr[1, ...].parse_string(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']] - - # use stop_on attribute for OneOrMore to avoid reading label string as part of the data - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) - OneOrMore(attr_expr).parse_string(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']] - - # could also be written as - (attr_expr * (1,)).parse_string(text).pprint() - """ - - def _generateDefaultName(self) -> str: - return f"{{{self.expr}}}..." - - -class ZeroOrMore(_MultipleMatch): - """ - Optional repetition of zero or more of the given expression. - - Parameters: - - - ``expr`` - expression that must match zero or more times - - ``stop_on`` - expression for a terminating sentinel - (only required if the sentinel would ordinarily match the repetition - expression) - (default= ``None``) - - Example: similar to :class:`OneOrMore` - """ - - def __init__( - self, - expr: Union[str, ParserElement], - stop_on: typing.Optional[Union[ParserElement, str]] = None, - *, - stopOn: typing.Optional[Union[ParserElement, str]] = None, - ): - super().__init__(expr, stopOn=stopOn or stop_on) - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - try: - return super().parseImpl(instring, loc, doActions) - except (ParseException, IndexError): - return loc, ParseResults([], name=self.resultsName) - - def _generateDefaultName(self) -> str: - return f"[{self.expr}]..." - - -class DelimitedList(ParseElementEnhance): - def __init__( - self, - expr: Union[str, ParserElement], - delim: Union[str, ParserElement] = ",", - combine: bool = False, - min: typing.Optional[int] = None, - max: typing.Optional[int] = None, - *, - allow_trailing_delim: bool = False, - ): - """Helper to define a delimited list of expressions - the delimiter - defaults to ','. By default, the list elements and delimiters can - have intervening whitespace, and comments, but this can be - overridden by passing ``combine=True`` in the constructor. If - ``combine`` is set to ``True``, the matching tokens are - returned as a single token string, with the delimiters included; - otherwise, the matching tokens are returned as a list of tokens, - with the delimiters suppressed. - - If ``allow_trailing_delim`` is set to True, then the list may end with - a delimiter. - - Example:: - - DelimitedList(Word(alphas)).parse_string("aa,bb,cc") # -> ['aa', 'bb', 'cc'] - DelimitedList(Word(hexnums), delim=':', combine=True).parse_string("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] - """ - if isinstance(expr, str_type): - expr = ParserElement._literalStringClass(expr) - expr = typing.cast(ParserElement, expr) - - if min is not None and min < 1: - raise ValueError("min must be greater than 0") - - if max is not None and min is not None and max < min: - raise ValueError("max must be greater than, or equal to min") - - self.content = expr - self.raw_delim = str(delim) - self.delim = delim - self.combine = combine - if not combine: - self.delim = Suppress(delim) - self.min = min or 1 - self.max = max - self.allow_trailing_delim = allow_trailing_delim - - delim_list_expr = self.content + (self.delim + self.content) * ( - self.min - 1, - None if self.max is None else self.max - 1, - ) - if self.allow_trailing_delim: - delim_list_expr += Opt(self.delim) - - if self.combine: - delim_list_expr = Combine(delim_list_expr) - - super().__init__(delim_list_expr, savelist=True) - - def _generateDefaultName(self) -> str: - content_expr = self.content.streamline() - return f"{content_expr} [{self.raw_delim} {content_expr}]..." - - -class _NullToken: - def __bool__(self): - return False - - def __str__(self): - return "" - - -class Opt(ParseElementEnhance): - """ - Optional matching of the given expression. - - Parameters: - - - ``expr`` - expression that must match zero or more times - - ``default`` (optional) - value to be returned if the optional expression is not found. - - Example:: - - # US postal code can be a 5-digit zip, plus optional 4-digit qualifier - zip = Combine(Word(nums, exact=5) + Opt('-' + Word(nums, exact=4))) - zip.run_tests(''' - # traditional ZIP code - 12345 - - # ZIP+4 form - 12101-0001 - - # invalid ZIP - 98765- - ''') - - prints:: - - # traditional ZIP code - 12345 - ['12345'] - - # ZIP+4 form - 12101-0001 - ['12101-0001'] - - # invalid ZIP - 98765- - ^ - FAIL: Expected end of text (at char 5), (line:1, col:6) - """ - - __optionalNotMatched = _NullToken() - - def __init__( - self, expr: Union[ParserElement, str], default: Any = __optionalNotMatched - ): - super().__init__(expr, savelist=False) - self.saveAsList = self.expr.saveAsList - self.defaultValue = default - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - self_expr = self.expr - try: - loc, tokens = self_expr._parse(instring, loc, doActions, callPreParse=False) - except (ParseException, IndexError): - default_value = self.defaultValue - if default_value is not self.__optionalNotMatched: - if self_expr.resultsName: - tokens = ParseResults([default_value]) - tokens[self_expr.resultsName] = default_value - else: - tokens = [default_value] - else: - tokens = [] - return loc, tokens - - def _generateDefaultName(self) -> str: - inner = str(self.expr) - # strip off redundant inner {}'s - while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}": - inner = inner[1:-1] - return f"[{inner}]" - - -Optional = Opt - - -class SkipTo(ParseElementEnhance): - """ - Token for skipping over all undefined text until the matched - expression is found. - - Parameters: - - - ``expr`` - target expression marking the end of the data to be skipped - - ``include`` - if ``True``, the target expression is also parsed - (the skipped text and target expression are returned as a 2-element - list) (default= ``False``). - - ``ignore`` - (default= ``None``) used to define grammars (typically quoted strings and - comments) that might contain false matches to the target expression - - ``fail_on`` - (default= ``None``) define expressions that are not allowed to be - included in the skipped test; if found before the target expression is found, - the :class:`SkipTo` is not a match - - Example:: - - report = ''' - Outstanding Issues Report - 1 Jan 2000 - - # | Severity | Description | Days Open - -----+----------+-------------------------------------------+----------- - 101 | Critical | Intermittent system crash | 6 - 94 | Cosmetic | Spelling error on Login ('log|n') | 14 - 79 | Minor | System slow when running too many reports | 47 - ''' - integer = Word(nums) - SEP = Suppress('|') - # use SkipTo to simply match everything up until the next SEP - # - ignore quoted strings, so that a '|' character inside a quoted string does not match - # - parse action will call token.strip() for each matched token, i.e., the description body - string_data = SkipTo(SEP, ignore=quoted_string) - string_data.set_parse_action(token_map(str.strip)) - ticket_expr = (integer("issue_num") + SEP - + string_data("sev") + SEP - + string_data("desc") + SEP - + integer("days_open")) - - for tkt in ticket_expr.search_string(report): - print tkt.dump() - - prints:: - - ['101', 'Critical', 'Intermittent system crash', '6'] - - days_open: '6' - - desc: 'Intermittent system crash' - - issue_num: '101' - - sev: 'Critical' - ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14'] - - days_open: '14' - - desc: "Spelling error on Login ('log|n')" - - issue_num: '94' - - sev: 'Cosmetic' - ['79', 'Minor', 'System slow when running too many reports', '47'] - - days_open: '47' - - desc: 'System slow when running too many reports' - - issue_num: '79' - - sev: 'Minor' - """ - - def __init__( - self, - other: Union[ParserElement, str], - include: bool = False, - ignore: typing.Optional[Union[ParserElement, str]] = None, - fail_on: typing.Optional[Union[ParserElement, str]] = None, - *, - failOn: typing.Optional[Union[ParserElement, str]] = None, - ): - super().__init__(other) - failOn = failOn or fail_on - self.ignoreExpr = ignore - self.mayReturnEmpty = True - self.mayIndexError = False - self.includeMatch = include - self.saveAsList = False - if isinstance(failOn, str_type): - self.failOn = self._literalStringClass(failOn) - else: - self.failOn = failOn - self.errmsg = "No match found for " + str(self.expr) - self.ignorer = Empty().leave_whitespace() - self._update_ignorer() - - def _update_ignorer(self): - # rebuild internal ignore expr from current ignore exprs and assigned ignoreExpr - self.ignorer.ignoreExprs.clear() - for e in self.expr.ignoreExprs: - self.ignorer.ignore(e) - if self.ignoreExpr: - self.ignorer.ignore(self.ignoreExpr) - - def ignore(self, expr): - super().ignore(expr) - self._update_ignorer() - - def parseImpl(self, instring, loc, doActions=True): - startloc = loc - instrlen = len(instring) - self_expr_parse = self.expr._parse - self_failOn_canParseNext = ( - self.failOn.canParseNext if self.failOn is not None else None - ) - ignorer_try_parse = self.ignorer.try_parse if self.ignorer.ignoreExprs else None - - tmploc = loc - while tmploc <= instrlen: - if self_failOn_canParseNext is not None: - # break if failOn expression matches - if self_failOn_canParseNext(instring, tmploc): - break - - if ignorer_try_parse is not None: - # advance past ignore expressions - prev_tmploc = tmploc - while 1: - try: - tmploc = ignorer_try_parse(instring, tmploc) - except ParseBaseException: - break - # see if all ignorers matched, but didn't actually ignore anything - if tmploc == prev_tmploc: - break - prev_tmploc = tmploc - - try: - self_expr_parse(instring, tmploc, doActions=False, callPreParse=False) - except (ParseException, IndexError): - # no match, advance loc in string - tmploc += 1 - else: - # matched skipto expr, done - break - - else: - # ran off the end of the input string without matching skipto expr, fail - raise ParseException(instring, loc, self.errmsg, self) - - # build up return values - loc = tmploc - skiptext = instring[startloc:loc] - skipresult = ParseResults(skiptext) - - if self.includeMatch: - loc, mat = self_expr_parse(instring, loc, doActions, callPreParse=False) - skipresult += mat - - return loc, skipresult - - -class Forward(ParseElementEnhance): - """ - Forward declaration of an expression to be defined later - - used for recursive grammars, such as algebraic infix notation. - When the expression is known, it is assigned to the ``Forward`` - variable using the ``'<<'`` operator. - - Note: take care when assigning to ``Forward`` not to overlook - precedence of operators. - - Specifically, ``'|'`` has a lower precedence than ``'<<'``, so that:: - - fwd_expr << a | b | c - - will actually be evaluated as:: - - (fwd_expr << a) | b | c - - thereby leaving b and c out as parseable alternatives. It is recommended that you - explicitly group the values inserted into the ``Forward``:: - - fwd_expr << (a | b | c) - - Converting to use the ``'<<='`` operator instead will avoid this problem. - - See :class:`ParseResults.pprint` for an example of a recursive - parser created using ``Forward``. - """ - - def __init__(self, other: typing.Optional[Union[ParserElement, str]] = None): - self.caller_frame = traceback.extract_stack(limit=2)[0] - super().__init__(other, savelist=False) # type: ignore[arg-type] - self.lshift_line = None - - def __lshift__(self, other) -> "Forward": - if hasattr(self, "caller_frame"): - del self.caller_frame - if isinstance(other, str_type): - other = self._literalStringClass(other) - - if not isinstance(other, ParserElement): - return NotImplemented - - self.expr = other - self.streamlined = other.streamlined - self.mayIndexError = self.expr.mayIndexError - self.mayReturnEmpty = self.expr.mayReturnEmpty - self.set_whitespace_chars( - self.expr.whiteChars, copy_defaults=self.expr.copyDefaultWhiteChars - ) - self.skipWhitespace = self.expr.skipWhitespace - self.saveAsList = self.expr.saveAsList - self.ignoreExprs.extend(self.expr.ignoreExprs) - self.lshift_line = traceback.extract_stack(limit=2)[-2] # type: ignore[assignment] - return self - - def __ilshift__(self, other) -> "Forward": - if not isinstance(other, ParserElement): - return NotImplemented - - return self << other - - def __or__(self, other) -> "ParserElement": - caller_line = traceback.extract_stack(limit=2)[-2] - if ( - __diag__.warn_on_match_first_with_lshift_operator - and caller_line == self.lshift_line - and Diagnostics.warn_on_match_first_with_lshift_operator - not in self.suppress_warnings_ - ): - warnings.warn( - "using '<<' operator with '|' is probably an error, use '<<='", - stacklevel=2, - ) - ret = super().__or__(other) - return ret - - def __del__(self): - # see if we are getting dropped because of '=' reassignment of var instead of '<<=' or '<<' - if ( - self.expr is None - and __diag__.warn_on_assignment_to_Forward - and Diagnostics.warn_on_assignment_to_Forward not in self.suppress_warnings_ - ): - warnings.warn_explicit( - "Forward defined here but no expression attached later using '<<=' or '<<'", - UserWarning, - filename=self.caller_frame.filename, - lineno=self.caller_frame.lineno, - ) - - def parseImpl(self, instring, loc, doActions=True): - if ( - self.expr is None - and __diag__.warn_on_parse_using_empty_Forward - and Diagnostics.warn_on_parse_using_empty_Forward - not in self.suppress_warnings_ - ): - # walk stack until parse_string, scan_string, search_string, or transform_string is found - parse_fns = ( - "parse_string", - "scan_string", - "search_string", - "transform_string", - ) - tb = traceback.extract_stack(limit=200) - for i, frm in enumerate(reversed(tb), start=1): - if frm.name in parse_fns: - stacklevel = i + 1 - break - else: - stacklevel = 2 - warnings.warn( - "Forward expression was never assigned a value, will not parse any input", - stacklevel=stacklevel, - ) - if not ParserElement._left_recursion_enabled: - return super().parseImpl(instring, loc, doActions) - # ## Bounded Recursion algorithm ## - # Recursion only needs to be processed at ``Forward`` elements, since they are - # the only ones that can actually refer to themselves. The general idea is - # to handle recursion stepwise: We start at no recursion, then recurse once, - # recurse twice, ..., until more recursion offers no benefit (we hit the bound). - # - # The "trick" here is that each ``Forward`` gets evaluated in two contexts - # - to *match* a specific recursion level, and - # - to *search* the bounded recursion level - # and the two run concurrently. The *search* must *match* each recursion level - # to find the best possible match. This is handled by a memo table, which - # provides the previous match to the next level match attempt. - # - # See also "Left Recursion in Parsing Expression Grammars", Medeiros et al. - # - # There is a complication since we not only *parse* but also *transform* via - # actions: We do not want to run the actions too often while expanding. Thus, - # we expand using `doActions=False` and only run `doActions=True` if the next - # recursion level is acceptable. - with ParserElement.recursion_lock: - memo = ParserElement.recursion_memos - try: - # we are parsing at a specific recursion expansion - use it as-is - prev_loc, prev_result = memo[loc, self, doActions] - if isinstance(prev_result, Exception): - raise prev_result - return prev_loc, prev_result.copy() - except KeyError: - act_key = (loc, self, True) - peek_key = (loc, self, False) - # we are searching for the best recursion expansion - keep on improving - # both `doActions` cases must be tracked separately here! - prev_loc, prev_peek = memo[peek_key] = ( - loc - 1, - ParseException( - instring, loc, "Forward recursion without base case", self - ), - ) - if doActions: - memo[act_key] = memo[peek_key] - while True: - try: - new_loc, new_peek = super().parseImpl(instring, loc, False) - except ParseException: - # we failed before getting any match – do not hide the error - if isinstance(prev_peek, Exception): - raise - new_loc, new_peek = prev_loc, prev_peek - # the match did not get better: we are done - if new_loc <= prev_loc: - if doActions: - # replace the match for doActions=False as well, - # in case the action did backtrack - prev_loc, prev_result = memo[peek_key] = memo[act_key] - del memo[peek_key], memo[act_key] - return prev_loc, prev_result.copy() - del memo[peek_key] - return prev_loc, prev_peek.copy() - # the match did get better: see if we can improve further - if doActions: - try: - memo[act_key] = super().parseImpl(instring, loc, True) - except ParseException as e: - memo[peek_key] = memo[act_key] = (new_loc, e) - raise - prev_loc, prev_peek = memo[peek_key] = new_loc, new_peek - - def leave_whitespace(self, recursive: bool = True) -> ParserElement: - self.skipWhitespace = False - return self - - def ignore_whitespace(self, recursive: bool = True) -> ParserElement: - self.skipWhitespace = True - return self - - def streamline(self) -> ParserElement: - if not self.streamlined: - self.streamlined = True - if self.expr is not None: - self.expr.streamline() - return self - - def validate(self, validateTrace=None) -> None: - warnings.warn( - "ParserElement.validate() is deprecated, and should not be used to check for left recursion", - DeprecationWarning, - stacklevel=2, - ) - if validateTrace is None: - validateTrace = [] - - if self not in validateTrace: - tmp = validateTrace[:] + [self] - if self.expr is not None: - self.expr.validate(tmp) - self._checkRecursion([]) - - def _generateDefaultName(self) -> str: - # Avoid infinite recursion by setting a temporary _defaultName - self._defaultName = ": ..." - - # Use the string representation of main expression. - retString = "..." - try: - if self.expr is not None: - retString = str(self.expr)[:1000] - else: - retString = "None" - finally: - return f"{type(self).__name__}: {retString}" - - def copy(self) -> ParserElement: - if self.expr is not None: - return super().copy() - else: - ret = Forward() - ret <<= self - return ret - - def _setResultsName(self, name, list_all_matches=False): - # fmt: off - if ( - __diag__.warn_name_set_on_empty_Forward - and Diagnostics.warn_name_set_on_empty_Forward not in self.suppress_warnings_ - and self.expr is None - ): - warning = ( - "warn_name_set_on_empty_Forward:" - f" setting results name {name!r} on {type(self).__name__} expression" - " that has no contained expression" - ) - warnings.warn(warning, stacklevel=3) - # fmt: on - - return super()._setResultsName(name, list_all_matches) - - # Compatibility synonyms - # fmt: off - leaveWhitespace = replaced_by_pep8("leaveWhitespace", leave_whitespace) - ignoreWhitespace = replaced_by_pep8("ignoreWhitespace", ignore_whitespace) - # fmt: on - - -class TokenConverter(ParseElementEnhance): - """ - Abstract subclass of :class:`ParseExpression`, for converting parsed results. - """ - - def __init__(self, expr: Union[ParserElement, str], savelist=False): - super().__init__(expr) # , savelist) - self.saveAsList = False - - -class Combine(TokenConverter): - """Converter to concatenate all matching tokens to a single string. - By default, the matching patterns must also be contiguous in the - input string; this can be disabled by specifying - ``'adjacent=False'`` in the constructor. - - Example:: - - real = Word(nums) + '.' + Word(nums) - print(real.parse_string('3.1416')) # -> ['3', '.', '1416'] - # will also erroneously match the following - print(real.parse_string('3. 1416')) # -> ['3', '.', '1416'] - - real = Combine(Word(nums) + '.' + Word(nums)) - print(real.parse_string('3.1416')) # -> ['3.1416'] - # no match when there are internal spaces - print(real.parse_string('3. 1416')) # -> Exception: Expected W:(0123...) - """ - - def __init__( - self, - expr: ParserElement, - join_string: str = "", - adjacent: bool = True, - *, - joinString: typing.Optional[str] = None, - ): - super().__init__(expr) - joinString = joinString if joinString is not None else join_string - # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself - if adjacent: - self.leave_whitespace() - self.adjacent = adjacent - self.skipWhitespace = True - self.joinString = joinString - self.callPreparse = True - - def ignore(self, other) -> ParserElement: - if self.adjacent: - ParserElement.ignore(self, other) - else: - super().ignore(other) - return self - - def postParse(self, instring, loc, tokenlist): - retToks = tokenlist.copy() - del retToks[:] - retToks += ParseResults( - ["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults - ) - - if self.resultsName and retToks.haskeys(): - return [retToks] - else: - return retToks - - -class Group(TokenConverter): - """Converter to return the matched tokens as a list - useful for - returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions. - - The optional ``aslist`` argument when set to True will return the - parsed tokens as a Python list instead of a pyparsing ParseResults. - - Example:: - - ident = Word(alphas) - num = Word(nums) - term = ident | num - func = ident + Opt(DelimitedList(term)) - print(func.parse_string("fn a, b, 100")) - # -> ['fn', 'a', 'b', '100'] - - func = ident + Group(Opt(DelimitedList(term))) - print(func.parse_string("fn a, b, 100")) - # -> ['fn', ['a', 'b', '100']] - """ - - def __init__(self, expr: ParserElement, aslist: bool = False): - super().__init__(expr) - self.saveAsList = True - self._asPythonList = aslist - - def postParse(self, instring, loc, tokenlist): - if self._asPythonList: - return ParseResults.List( - tokenlist.asList() - if isinstance(tokenlist, ParseResults) - else list(tokenlist) - ) - - return [tokenlist] - - -class Dict(TokenConverter): - """Converter to return a repetitive expression as a list, but also - as a dictionary. Each element can also be referenced using the first - token in the expression as its key. Useful for tabular report - scraping when the first column can be used as a item key. - - The optional ``asdict`` argument when set to True will return the - parsed tokens as a Python dict instead of a pyparsing ParseResults. - - Example:: - - data_word = Word(alphas) - label = data_word + FollowedBy(':') - - text = "shape: SQUARE posn: upper left color: light blue texture: burlap" - attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) - - # print attributes as plain groups - print(attr_expr[1, ...].parse_string(text).dump()) - - # instead of OneOrMore(expr), parse using Dict(Group(expr)[1, ...]) - Dict will auto-assign names - result = Dict(Group(attr_expr)[1, ...]).parse_string(text) - print(result.dump()) - - # access named fields as dict entries, or output as dict - print(result['shape']) - print(result.as_dict()) - - prints:: - - ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] - [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - - color: 'light blue' - - posn: 'upper left' - - shape: 'SQUARE' - - texture: 'burlap' - SQUARE - {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'} - - See more examples at :class:`ParseResults` of accessing fields by results name. - """ - - def __init__(self, expr: ParserElement, asdict: bool = False): - super().__init__(expr) - self.saveAsList = True - self._asPythonDict = asdict - - def postParse(self, instring, loc, tokenlist): - for i, tok in enumerate(tokenlist): - if len(tok) == 0: - continue - - ikey = tok[0] - if isinstance(ikey, int): - ikey = str(ikey).strip() - - if len(tok) == 1: - tokenlist[ikey] = _ParseResultsWithOffset("", i) - - elif len(tok) == 2 and not isinstance(tok[1], ParseResults): - tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i) - - else: - try: - dictvalue = tok.copy() # ParseResults(i) - except Exception: - exc = TypeError( - "could not extract dict values from parsed results" - " - Dict expression must contain Grouped expressions" - ) - raise exc from None - - del dictvalue[0] - - if len(dictvalue) != 1 or ( - isinstance(dictvalue, ParseResults) and dictvalue.haskeys() - ): - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i) - else: - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i) - - if self._asPythonDict: - return [tokenlist.as_dict()] if self.resultsName else tokenlist.as_dict() - - return [tokenlist] if self.resultsName else tokenlist - - -class Suppress(TokenConverter): - """Converter for ignoring the results of a parsed expression. - - Example:: - - source = "a, b, c,d" - wd = Word(alphas) - wd_list1 = wd + (',' + wd)[...] - print(wd_list1.parse_string(source)) - - # often, delimiters that are useful during parsing are just in the - # way afterward - use Suppress to keep them out of the parsed output - wd_list2 = wd + (Suppress(',') + wd)[...] - print(wd_list2.parse_string(source)) - - # Skipped text (using '...') can be suppressed as well - source = "lead in START relevant text END trailing text" - start_marker = Keyword("START") - end_marker = Keyword("END") - find_body = Suppress(...) + start_marker + ... + end_marker - print(find_body.parse_string(source) - - prints:: - - ['a', ',', 'b', ',', 'c', ',', 'd'] - ['a', 'b', 'c', 'd'] - ['START', 'relevant text ', 'END'] - - (See also :class:`DelimitedList`.) - """ - - def __init__(self, expr: Union[ParserElement, str], savelist: bool = False): - if expr is ...: - expr = _PendingSkip(NoMatch()) - super().__init__(expr) - - def __add__(self, other) -> "ParserElement": - if isinstance(self.expr, _PendingSkip): - return Suppress(SkipTo(other)) + other - - return super().__add__(other) - - def __sub__(self, other) -> "ParserElement": - if isinstance(self.expr, _PendingSkip): - return Suppress(SkipTo(other)) - other - - return super().__sub__(other) - - def postParse(self, instring, loc, tokenlist): - return [] - - def suppress(self) -> ParserElement: - return self - - -def trace_parse_action(f: ParseAction) -> ParseAction: - """Decorator for debugging parse actions. - - When the parse action is called, this decorator will print - ``">> entering method-name(line:, , )"``. - When the parse action completes, the decorator will print - ``"<<"`` followed by the returned value, or any exception that the parse action raised. - - Example:: - - wd = Word(alphas) - - @trace_parse_action - def remove_duplicate_chars(tokens): - return ''.join(sorted(set(''.join(tokens)))) - - wds = wd[1, ...].set_parse_action(remove_duplicate_chars) - print(wds.parse_string("slkdjs sld sldd sdlf sdljf")) - - prints:: - - >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) - < 3: - thisFunc = f"{type(paArgs[0]).__name__}.{thisFunc}" - sys.stderr.write(f">>entering {thisFunc}(line: {line(l, s)!r}, {l}, {t!r})\n") - try: - ret = f(*paArgs) - except Exception as exc: - sys.stderr.write(f"< str: - r"""Helper to easily define string ranges for use in :class:`Word` - construction. Borrows syntax from regexp ``'[]'`` string range - definitions:: - - srange("[0-9]") -> "0123456789" - srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" - srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" - - The input string must be enclosed in []'s, and the returned string - is the expanded character set joined into a single string. The - values enclosed in the []'s may be: - - - a single character - - an escaped character with a leading backslash (such as ``\-`` - or ``\]``) - - an escaped hex character with a leading ``'\x'`` - (``\x21``, which is a ``'!'`` character) (``\0x##`` - is also supported for backwards compatibility) - - an escaped octal character with a leading ``'\0'`` - (``\041``, which is a ``'!'`` character) - - a range of any of the above, separated by a dash (``'a-z'``, - etc.) - - any combination of the above (``'aeiouy'``, - ``'a-zA-Z0-9_$'``, etc.) - """ - _expanded = lambda p: ( - p - if not isinstance(p, ParseResults) - else "".join(chr(c) for c in range(ord(p[0]), ord(p[1]) + 1)) - ) - try: - return "".join(_expanded(part) for part in _reBracketExpr.parse_string(s).body) - except Exception as e: - return "" - - -def token_map(func, *args) -> ParseAction: - """Helper to define a parse action by mapping a function to all - elements of a :class:`ParseResults` list. If any additional args are passed, - they are forwarded to the given function as additional arguments - after the token, as in - ``hex_integer = Word(hexnums).set_parse_action(token_map(int, 16))``, - which will convert the parsed data to an integer using base 16. - - Example (compare the last to example in :class:`ParserElement.transform_string`:: - - hex_ints = Word(hexnums)[1, ...].set_parse_action(token_map(int, 16)) - hex_ints.run_tests(''' - 00 11 22 aa FF 0a 0d 1a - ''') - - upperword = Word(alphas).set_parse_action(token_map(str.upper)) - upperword[1, ...].run_tests(''' - my kingdom for a horse - ''') - - wd = Word(alphas).set_parse_action(token_map(str.title)) - wd[1, ...].set_parse_action(' '.join).run_tests(''' - now is the winter of our discontent made glorious summer by this sun of york - ''') - - prints:: - - 00 11 22 aa FF 0a 0d 1a - [0, 17, 34, 170, 255, 10, 13, 26] - - my kingdom for a horse - ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE'] - - now is the winter of our discontent made glorious summer by this sun of york - ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York'] - """ - - def pa(s, l, t): - return [func(tokn, *args) for tokn in t] - - func_name = getattr(func, "__name__", getattr(func, "__class__").__name__) - pa.__name__ = func_name - - return pa - - -def autoname_elements() -> None: - """ - Utility to simplify mass-naming of parser elements, for - generating railroad diagram with named subdiagrams. - """ - calling_frame = sys._getframe().f_back - if calling_frame is None: - return - calling_frame = typing.cast(types.FrameType, calling_frame) - for name, var in calling_frame.f_locals.items(): - if isinstance(var, ParserElement) and not var.customName: - var.set_name(name) - - -dbl_quoted_string = Combine( - Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' -).set_name("string enclosed in double quotes") - -sgl_quoted_string = Combine( - Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'" -).set_name("string enclosed in single quotes") - -quoted_string = Combine( - (Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"').set_name( - "double quoted string" - ) - | (Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'").set_name( - "single quoted string" - ) -).set_name("quoted string using single or double quotes") - -python_quoted_string = Combine( - (Regex(r'"""(?:[^"\\]|""(?!")|"(?!"")|\\.)*', flags=re.MULTILINE) + '"""').set_name( - "multiline double quoted string" - ) - ^ ( - Regex(r"'''(?:[^'\\]|''(?!')|'(?!'')|\\.)*", flags=re.MULTILINE) + "'''" - ).set_name("multiline single quoted string") - ^ (Regex(r'"(?:[^"\n\r\\]|(?:\\")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"').set_name( - "double quoted string" - ) - ^ (Regex(r"'(?:[^'\n\r\\]|(?:\\')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'").set_name( - "single quoted string" - ) -).set_name("Python quoted string") - -unicode_string = Combine("u" + quoted_string.copy()).set_name("unicode string literal") - - -alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") -punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") - -# build list of built-in expressions, for future reference if a global default value -# gets updated -_builtin_exprs: List[ParserElement] = [ - v for v in vars().values() if isinstance(v, ParserElement) -] - -# backward compatibility names -# fmt: off -sglQuotedString = sgl_quoted_string -dblQuotedString = dbl_quoted_string -quotedString = quoted_string -unicodeString = unicode_string -lineStart = line_start -lineEnd = line_end -stringStart = string_start -stringEnd = string_end -nullDebugAction = replaced_by_pep8("nullDebugAction", null_debug_action) -traceParseAction = replaced_by_pep8("traceParseAction", trace_parse_action) -conditionAsParseAction = replaced_by_pep8("conditionAsParseAction", condition_as_parse_action) -tokenMap = replaced_by_pep8("tokenMap", token_map) -# fmt: on diff --git a/src/pip/_vendor/pyparsing/diagram/__init__.py b/src/pip/_vendor/pyparsing/diagram/__init__.py deleted file mode 100644 index 6074d2bfd0f..00000000000 --- a/src/pip/_vendor/pyparsing/diagram/__init__.py +++ /dev/null @@ -1,656 +0,0 @@ -# mypy: ignore-errors -import railroad -from pip._vendor import pyparsing -import typing -from typing import ( - List, - NamedTuple, - Generic, - TypeVar, - Dict, - Callable, - Set, - Iterable, -) -from jinja2 import Template -from io import StringIO -import inspect - - -jinja2_template_source = """\ -{% if not embed %} - - - -{% endif %} - {% if not head %} - - {% else %} - {{ head | safe }} - {% endif %} -{% if not embed %} - - -{% endif %} -{{ body | safe }} -{% for diagram in diagrams %} -
-

{{ diagram.title }}

-
{{ diagram.text }}
-
- {{ diagram.svg }} -
-
-{% endfor %} -{% if not embed %} - - -{% endif %} -""" - -template = Template(jinja2_template_source) - -# Note: ideally this would be a dataclass, but we're supporting Python 3.5+ so we can't do this yet -NamedDiagram = NamedTuple( - "NamedDiagram", - [("name", str), ("diagram", typing.Optional[railroad.DiagramItem]), ("index", int)], -) -""" -A simple structure for associating a name with a railroad diagram -""" - -T = TypeVar("T") - - -class EachItem(railroad.Group): - """ - Custom railroad item to compose a: - - Group containing a - - OneOrMore containing a - - Choice of the elements in the Each - with the group label indicating that all must be matched - """ - - all_label = "[ALL]" - - def __init__(self, *items): - choice_item = railroad.Choice(len(items) - 1, *items) - one_or_more_item = railroad.OneOrMore(item=choice_item) - super().__init__(one_or_more_item, label=self.all_label) - - -class AnnotatedItem(railroad.Group): - """ - Simple subclass of Group that creates an annotation label - """ - - def __init__(self, label: str, item): - super().__init__(item=item, label="[{}]".format(label) if label else label) - - -class EditablePartial(Generic[T]): - """ - Acts like a functools.partial, but can be edited. In other words, it represents a type that hasn't yet been - constructed. - """ - - # We need this here because the railroad constructors actually transform the data, so can't be called until the - # entire tree is assembled - - def __init__(self, func: Callable[..., T], args: list, kwargs: dict): - self.func = func - self.args = args - self.kwargs = kwargs - - @classmethod - def from_call(cls, func: Callable[..., T], *args, **kwargs) -> "EditablePartial[T]": - """ - If you call this function in the same way that you would call the constructor, it will store the arguments - as you expect. For example EditablePartial.from_call(Fraction, 1, 3)() == Fraction(1, 3) - """ - return EditablePartial(func=func, args=list(args), kwargs=kwargs) - - @property - def name(self): - return self.kwargs["name"] - - def __call__(self) -> T: - """ - Evaluate the partial and return the result - """ - args = self.args.copy() - kwargs = self.kwargs.copy() - - # This is a helpful hack to allow you to specify varargs parameters (e.g. *args) as keyword args (e.g. - # args=['list', 'of', 'things']) - arg_spec = inspect.getfullargspec(self.func) - if arg_spec.varargs in self.kwargs: - args += kwargs.pop(arg_spec.varargs) - - return self.func(*args, **kwargs) - - -def railroad_to_html(diagrams: List[NamedDiagram], embed=False, **kwargs) -> str: - """ - Given a list of NamedDiagram, produce a single HTML string that visualises those diagrams - :params kwargs: kwargs to be passed in to the template - """ - data = [] - for diagram in diagrams: - if diagram.diagram is None: - continue - io = StringIO() - try: - css = kwargs.get('css') - diagram.diagram.writeStandalone(io.write, css=css) - except AttributeError: - diagram.diagram.writeSvg(io.write) - title = diagram.name - if diagram.index == 0: - title += " (root)" - data.append({"title": title, "text": "", "svg": io.getvalue()}) - - return template.render(diagrams=data, embed=embed, **kwargs) - - -def resolve_partial(partial: "EditablePartial[T]") -> T: - """ - Recursively resolves a collection of Partials into whatever type they are - """ - if isinstance(partial, EditablePartial): - partial.args = resolve_partial(partial.args) - partial.kwargs = resolve_partial(partial.kwargs) - return partial() - elif isinstance(partial, list): - return [resolve_partial(x) for x in partial] - elif isinstance(partial, dict): - return {key: resolve_partial(x) for key, x in partial.items()} - else: - return partial - - -def to_railroad( - element: pyparsing.ParserElement, - diagram_kwargs: typing.Optional[dict] = None, - vertical: int = 3, - show_results_names: bool = False, - show_groups: bool = False, -) -> List[NamedDiagram]: - """ - Convert a pyparsing element tree into a list of diagrams. This is the recommended entrypoint to diagram - creation if you want to access the Railroad tree before it is converted to HTML - :param element: base element of the parser being diagrammed - :param diagram_kwargs: kwargs to pass to the Diagram() constructor - :param vertical: (optional) - int - limit at which number of alternatives should be - shown vertically instead of horizontally - :param show_results_names - bool to indicate whether results name annotations should be - included in the diagram - :param show_groups - bool to indicate whether groups should be highlighted with an unlabeled - surrounding box - """ - # Convert the whole tree underneath the root - lookup = ConverterState(diagram_kwargs=diagram_kwargs or {}) - _to_diagram_element( - element, - lookup=lookup, - parent=None, - vertical=vertical, - show_results_names=show_results_names, - show_groups=show_groups, - ) - - root_id = id(element) - # Convert the root if it hasn't been already - if root_id in lookup: - if not element.customName: - lookup[root_id].name = "" - lookup[root_id].mark_for_extraction(root_id, lookup, force=True) - - # Now that we're finished, we can convert from intermediate structures into Railroad elements - diags = list(lookup.diagrams.values()) - if len(diags) > 1: - # collapse out duplicate diags with the same name - seen = set() - deduped_diags = [] - for d in diags: - # don't extract SkipTo elements, they are uninformative as subdiagrams - if d.name == "...": - continue - if d.name is not None and d.name not in seen: - seen.add(d.name) - deduped_diags.append(d) - resolved = [resolve_partial(partial) for partial in deduped_diags] - else: - # special case - if just one diagram, always display it, even if - # it has no name - resolved = [resolve_partial(partial) for partial in diags] - return sorted(resolved, key=lambda diag: diag.index) - - -def _should_vertical( - specification: int, exprs: Iterable[pyparsing.ParserElement] -) -> bool: - """ - Returns true if we should return a vertical list of elements - """ - if specification is None: - return False - else: - return len(_visible_exprs(exprs)) >= specification - - -class ElementState: - """ - State recorded for an individual pyparsing Element - """ - - # Note: this should be a dataclass, but we have to support Python 3.5 - def __init__( - self, - element: pyparsing.ParserElement, - converted: EditablePartial, - parent: EditablePartial, - number: int, - name: str = None, - parent_index: typing.Optional[int] = None, - ): - #: The pyparsing element that this represents - self.element: pyparsing.ParserElement = element - #: The name of the element - self.name: typing.Optional[str] = name - #: The output Railroad element in an unconverted state - self.converted: EditablePartial = converted - #: The parent Railroad element, which we store so that we can extract this if it's duplicated - self.parent: EditablePartial = parent - #: The order in which we found this element, used for sorting diagrams if this is extracted into a diagram - self.number: int = number - #: The index of this inside its parent - self.parent_index: typing.Optional[int] = parent_index - #: If true, we should extract this out into a subdiagram - self.extract: bool = False - #: If true, all of this element's children have been filled out - self.complete: bool = False - - def mark_for_extraction( - self, el_id: int, state: "ConverterState", name: str = None, force: bool = False - ): - """ - Called when this instance has been seen twice, and thus should eventually be extracted into a sub-diagram - :param el_id: id of the element - :param state: element/diagram state tracker - :param name: name to use for this element's text - :param force: If true, force extraction now, regardless of the state of this. Only useful for extracting the - root element when we know we're finished - """ - self.extract = True - - # Set the name - if not self.name: - if name: - # Allow forcing a custom name - self.name = name - elif self.element.customName: - self.name = self.element.customName - else: - self.name = "" - - # Just because this is marked for extraction doesn't mean we can do it yet. We may have to wait for children - # to be added - # Also, if this is just a string literal etc, don't bother extracting it - if force or (self.complete and _worth_extracting(self.element)): - state.extract_into_diagram(el_id) - - -class ConverterState: - """ - Stores some state that persists between recursions into the element tree - """ - - def __init__(self, diagram_kwargs: typing.Optional[dict] = None): - #: A dictionary mapping ParserElements to state relating to them - self._element_diagram_states: Dict[int, ElementState] = {} - #: A dictionary mapping ParserElement IDs to subdiagrams generated from them - self.diagrams: Dict[int, EditablePartial[NamedDiagram]] = {} - #: The index of the next unnamed element - self.unnamed_index: int = 1 - #: The index of the next element. This is used for sorting - self.index: int = 0 - #: Shared kwargs that are used to customize the construction of diagrams - self.diagram_kwargs: dict = diagram_kwargs or {} - self.extracted_diagram_names: Set[str] = set() - - def __setitem__(self, key: int, value: ElementState): - self._element_diagram_states[key] = value - - def __getitem__(self, key: int) -> ElementState: - return self._element_diagram_states[key] - - def __delitem__(self, key: int): - del self._element_diagram_states[key] - - def __contains__(self, key: int): - return key in self._element_diagram_states - - def generate_unnamed(self) -> int: - """ - Generate a number used in the name of an otherwise unnamed diagram - """ - self.unnamed_index += 1 - return self.unnamed_index - - def generate_index(self) -> int: - """ - Generate a number used to index a diagram - """ - self.index += 1 - return self.index - - def extract_into_diagram(self, el_id: int): - """ - Used when we encounter the same token twice in the same tree. When this - happens, we replace all instances of that token with a terminal, and - create a new subdiagram for the token - """ - position = self[el_id] - - # Replace the original definition of this element with a regular block - if position.parent: - ret = EditablePartial.from_call(railroad.NonTerminal, text=position.name) - if "item" in position.parent.kwargs: - position.parent.kwargs["item"] = ret - elif "items" in position.parent.kwargs: - position.parent.kwargs["items"][position.parent_index] = ret - - # If the element we're extracting is a group, skip to its content but keep the title - if position.converted.func == railroad.Group: - content = position.converted.kwargs["item"] - else: - content = position.converted - - self.diagrams[el_id] = EditablePartial.from_call( - NamedDiagram, - name=position.name, - diagram=EditablePartial.from_call( - railroad.Diagram, content, **self.diagram_kwargs - ), - index=position.number, - ) - - del self[el_id] - - -def _worth_extracting(element: pyparsing.ParserElement) -> bool: - """ - Returns true if this element is worth having its own sub-diagram. Simply, if any of its children - themselves have children, then its complex enough to extract - """ - children = element.recurse() - return any(child.recurse() for child in children) - - -def _apply_diagram_item_enhancements(fn): - """ - decorator to ensure enhancements to a diagram item (such as results name annotations) - get applied on return from _to_diagram_element (we do this since there are several - returns in _to_diagram_element) - """ - - def _inner( - element: pyparsing.ParserElement, - parent: typing.Optional[EditablePartial], - lookup: ConverterState = None, - vertical: int = None, - index: int = 0, - name_hint: str = None, - show_results_names: bool = False, - show_groups: bool = False, - ) -> typing.Optional[EditablePartial]: - ret = fn( - element, - parent, - lookup, - vertical, - index, - name_hint, - show_results_names, - show_groups, - ) - - # apply annotation for results name, if present - if show_results_names and ret is not None: - element_results_name = element.resultsName - if element_results_name: - # add "*" to indicate if this is a "list all results" name - element_results_name += "" if element.modalResults else "*" - ret = EditablePartial.from_call( - railroad.Group, item=ret, label=element_results_name - ) - - return ret - - return _inner - - -def _visible_exprs(exprs: Iterable[pyparsing.ParserElement]): - non_diagramming_exprs = ( - pyparsing.ParseElementEnhance, - pyparsing.PositionToken, - pyparsing.And._ErrorStop, - ) - return [ - e - for e in exprs - if not (e.customName or e.resultsName or isinstance(e, non_diagramming_exprs)) - ] - - -@_apply_diagram_item_enhancements -def _to_diagram_element( - element: pyparsing.ParserElement, - parent: typing.Optional[EditablePartial], - lookup: ConverterState = None, - vertical: int = None, - index: int = 0, - name_hint: str = None, - show_results_names: bool = False, - show_groups: bool = False, -) -> typing.Optional[EditablePartial]: - """ - Recursively converts a PyParsing Element to a railroad Element - :param lookup: The shared converter state that keeps track of useful things - :param index: The index of this element within the parent - :param parent: The parent of this element in the output tree - :param vertical: Controls at what point we make a list of elements vertical. If this is an integer (the default), - it sets the threshold of the number of items before we go vertical. If True, always go vertical, if False, never - do so - :param name_hint: If provided, this will override the generated name - :param show_results_names: bool flag indicating whether to add annotations for results names - :returns: The converted version of the input element, but as a Partial that hasn't yet been constructed - :param show_groups: bool flag indicating whether to show groups using bounding box - """ - exprs = element.recurse() - name = name_hint or element.customName or type(element).__name__ - - # Python's id() is used to provide a unique identifier for elements - el_id = id(element) - - element_results_name = element.resultsName - - # Here we basically bypass processing certain wrapper elements if they contribute nothing to the diagram - if not element.customName: - if isinstance( - element, - ( - # pyparsing.TokenConverter, - # pyparsing.Forward, - pyparsing.Located, - ), - ): - # However, if this element has a useful custom name, and its child does not, we can pass it on to the child - if exprs: - if not exprs[0].customName: - propagated_name = name - else: - propagated_name = None - - return _to_diagram_element( - element.expr, - parent=parent, - lookup=lookup, - vertical=vertical, - index=index, - name_hint=propagated_name, - show_results_names=show_results_names, - show_groups=show_groups, - ) - - # If the element isn't worth extracting, we always treat it as the first time we say it - if _worth_extracting(element): - if el_id in lookup: - # If we've seen this element exactly once before, we are only just now finding out that it's a duplicate, - # so we have to extract it into a new diagram. - looked_up = lookup[el_id] - looked_up.mark_for_extraction(el_id, lookup, name=name_hint) - ret = EditablePartial.from_call(railroad.NonTerminal, text=looked_up.name) - return ret - - elif el_id in lookup.diagrams: - # If we have seen the element at least twice before, and have already extracted it into a subdiagram, we - # just put in a marker element that refers to the sub-diagram - ret = EditablePartial.from_call( - railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"] - ) - return ret - - # Recursively convert child elements - # Here we find the most relevant Railroad element for matching pyparsing Element - # We use ``items=[]`` here to hold the place for where the child elements will go once created - if isinstance(element, pyparsing.And): - # detect And's created with ``expr*N`` notation - for these use a OneOrMore with a repeat - # (all will have the same name, and resultsName) - if not exprs: - return None - if len(set((e.name, e.resultsName) for e in exprs)) == 1: - ret = EditablePartial.from_call( - railroad.OneOrMore, item="", repeat=str(len(exprs)) - ) - elif _should_vertical(vertical, exprs): - ret = EditablePartial.from_call(railroad.Stack, items=[]) - else: - ret = EditablePartial.from_call(railroad.Sequence, items=[]) - elif isinstance(element, (pyparsing.Or, pyparsing.MatchFirst)): - if not exprs: - return None - if _should_vertical(vertical, exprs): - ret = EditablePartial.from_call(railroad.Choice, 0, items=[]) - else: - ret = EditablePartial.from_call(railroad.HorizontalChoice, items=[]) - elif isinstance(element, pyparsing.Each): - if not exprs: - return None - ret = EditablePartial.from_call(EachItem, items=[]) - elif isinstance(element, pyparsing.NotAny): - ret = EditablePartial.from_call(AnnotatedItem, label="NOT", item="") - elif isinstance(element, pyparsing.FollowedBy): - ret = EditablePartial.from_call(AnnotatedItem, label="LOOKAHEAD", item="") - elif isinstance(element, pyparsing.PrecededBy): - ret = EditablePartial.from_call(AnnotatedItem, label="LOOKBEHIND", item="") - elif isinstance(element, pyparsing.Group): - if show_groups: - ret = EditablePartial.from_call(AnnotatedItem, label="", item="") - else: - ret = EditablePartial.from_call(railroad.Group, label="", item="") - elif isinstance(element, pyparsing.TokenConverter): - label = type(element).__name__.lower() - if label == "tokenconverter": - ret = EditablePartial.from_call(railroad.Sequence, items=[]) - else: - ret = EditablePartial.from_call(AnnotatedItem, label=label, item="") - elif isinstance(element, pyparsing.Opt): - ret = EditablePartial.from_call(railroad.Optional, item="") - elif isinstance(element, pyparsing.OneOrMore): - ret = EditablePartial.from_call(railroad.OneOrMore, item="") - elif isinstance(element, pyparsing.ZeroOrMore): - ret = EditablePartial.from_call(railroad.ZeroOrMore, item="") - elif isinstance(element, pyparsing.Group): - ret = EditablePartial.from_call( - railroad.Group, item=None, label=element_results_name - ) - elif isinstance(element, pyparsing.Empty) and not element.customName: - # Skip unnamed "Empty" elements - ret = None - elif isinstance(element, pyparsing.ParseElementEnhance): - ret = EditablePartial.from_call(railroad.Sequence, items=[]) - elif len(exprs) > 0 and not element_results_name: - ret = EditablePartial.from_call(railroad.Group, item="", label=name) - elif len(exprs) > 0: - ret = EditablePartial.from_call(railroad.Sequence, items=[]) - else: - terminal = EditablePartial.from_call(railroad.Terminal, element.defaultName) - ret = terminal - - if ret is None: - return - - # Indicate this element's position in the tree so we can extract it if necessary - lookup[el_id] = ElementState( - element=element, - converted=ret, - parent=parent, - parent_index=index, - number=lookup.generate_index(), - ) - if element.customName: - lookup[el_id].mark_for_extraction(el_id, lookup, element.customName) - - i = 0 - for expr in exprs: - # Add a placeholder index in case we have to extract the child before we even add it to the parent - if "items" in ret.kwargs: - ret.kwargs["items"].insert(i, None) - - item = _to_diagram_element( - expr, - parent=ret, - lookup=lookup, - vertical=vertical, - index=i, - show_results_names=show_results_names, - show_groups=show_groups, - ) - - # Some elements don't need to be shown in the diagram - if item is not None: - if "item" in ret.kwargs: - ret.kwargs["item"] = item - elif "items" in ret.kwargs: - # If we've already extracted the child, don't touch this index, since it's occupied by a nonterminal - ret.kwargs["items"][i] = item - i += 1 - elif "items" in ret.kwargs: - # If we're supposed to skip this element, remove it from the parent - del ret.kwargs["items"][i] - - # If all this items children are none, skip this item - if ret and ( - ("items" in ret.kwargs and len(ret.kwargs["items"]) == 0) - or ("item" in ret.kwargs and ret.kwargs["item"] is None) - ): - ret = EditablePartial.from_call(railroad.Terminal, name) - - # Mark this element as "complete", ie it has all of its children - if el_id in lookup: - lookup[el_id].complete = True - - if el_id in lookup and lookup[el_id].extract and lookup[el_id].complete: - lookup.extract_into_diagram(el_id) - if ret is not None: - ret = EditablePartial.from_call( - railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"] - ) - - return ret diff --git a/src/pip/_vendor/pyparsing/exceptions.py b/src/pip/_vendor/pyparsing/exceptions.py deleted file mode 100644 index 1aaea56f54d..00000000000 --- a/src/pip/_vendor/pyparsing/exceptions.py +++ /dev/null @@ -1,300 +0,0 @@ -# exceptions.py - -import re -import sys -import typing - -from .util import ( - col, - line, - lineno, - _collapse_string_to_ranges, - replaced_by_pep8, -) -from .unicode import pyparsing_unicode as ppu - - -class _ExceptionWordUnicodeSet( - ppu.Latin1, ppu.LatinA, ppu.LatinB, ppu.Greek, ppu.Cyrillic -): - pass - - -_extract_alphanums = _collapse_string_to_ranges(_ExceptionWordUnicodeSet.alphanums) -_exception_word_extractor = re.compile("([" + _extract_alphanums + "]{1,16})|.") - - -class ParseBaseException(Exception): - """base exception class for all parsing runtime exceptions""" - - loc: int - msg: str - pstr: str - parser_element: typing.Any # "ParserElement" - args: typing.Tuple[str, int, typing.Optional[str]] - - __slots__ = ( - "loc", - "msg", - "pstr", - "parser_element", - "args", - ) - - # Performance tuning: we construct a *lot* of these, so keep this - # constructor as small and fast as possible - def __init__( - self, - pstr: str, - loc: int = 0, - msg: typing.Optional[str] = None, - elem=None, - ): - self.loc = loc - if msg is None: - self.msg = pstr - self.pstr = "" - else: - self.msg = msg - self.pstr = pstr - self.parser_element = elem - self.args = (pstr, loc, msg) - - @staticmethod - def explain_exception(exc, depth=16): - """ - Method to take an exception and translate the Python internal traceback into a list - of the pyparsing expressions that caused the exception to be raised. - - Parameters: - - - exc - exception raised during parsing (need not be a ParseException, in support - of Python exceptions that might be raised in a parse action) - - depth (default=16) - number of levels back in the stack trace to list expression - and function names; if None, the full stack trace names will be listed; if 0, only - the failing input line, marker, and exception string will be shown - - Returns a multi-line string listing the ParserElements and/or function names in the - exception's stack trace. - """ - import inspect - from .core import ParserElement - - if depth is None: - depth = sys.getrecursionlimit() - ret = [] - if isinstance(exc, ParseBaseException): - ret.append(exc.line) - ret.append(" " * (exc.column - 1) + "^") - ret.append(f"{type(exc).__name__}: {exc}") - - if depth <= 0: - return "\n".join(ret) - - callers = inspect.getinnerframes(exc.__traceback__, context=depth) - seen = set() - for ff in callers[-depth:]: - frm = ff[0] - - f_self = frm.f_locals.get("self", None) - if isinstance(f_self, ParserElement): - if not frm.f_code.co_name.startswith(("parseImpl", "_parseNoCache")): - continue - if id(f_self) in seen: - continue - seen.add(id(f_self)) - - self_type = type(f_self) - ret.append(f"{self_type.__module__}.{self_type.__name__} - {f_self}") - - elif f_self is not None: - self_type = type(f_self) - ret.append(f"{self_type.__module__}.{self_type.__name__}") - - else: - code = frm.f_code - if code.co_name in ("wrapper", ""): - continue - - ret.append(code.co_name) - - depth -= 1 - if not depth: - break - - return "\n".join(ret) - - @classmethod - def _from_exception(cls, pe): - """ - internal factory method to simplify creating one type of ParseException - from another - avoids having __init__ signature conflicts among subclasses - """ - return cls(pe.pstr, pe.loc, pe.msg, pe.parser_element) - - @property - def line(self) -> str: - """ - Return the line of text where the exception occurred. - """ - return line(self.loc, self.pstr) - - @property - def lineno(self) -> int: - """ - Return the 1-based line number of text where the exception occurred. - """ - return lineno(self.loc, self.pstr) - - @property - def col(self) -> int: - """ - Return the 1-based column on the line of text where the exception occurred. - """ - return col(self.loc, self.pstr) - - @property - def column(self) -> int: - """ - Return the 1-based column on the line of text where the exception occurred. - """ - return col(self.loc, self.pstr) - - # pre-PEP8 compatibility - @property - def parserElement(self): - return self.parser_element - - @parserElement.setter - def parserElement(self, elem): - self.parser_element = elem - - def __str__(self) -> str: - if self.pstr: - if self.loc >= len(self.pstr): - foundstr = ", found end of text" - else: - # pull out next word at error location - found_match = _exception_word_extractor.match(self.pstr, self.loc) - if found_match is not None: - found = found_match.group(0) - else: - found = self.pstr[self.loc : self.loc + 1] - foundstr = (", found %r" % found).replace(r"\\", "\\") - else: - foundstr = "" - return f"{self.msg}{foundstr} (at char {self.loc}), (line:{self.lineno}, col:{self.column})" - - def __repr__(self): - return str(self) - - def mark_input_line( - self, marker_string: typing.Optional[str] = None, *, markerString: str = ">!<" - ) -> str: - """ - Extracts the exception line from the input string, and marks - the location of the exception with a special symbol. - """ - markerString = marker_string if marker_string is not None else markerString - line_str = self.line - line_column = self.column - 1 - if markerString: - line_str = "".join( - (line_str[:line_column], markerString, line_str[line_column:]) - ) - return line_str.strip() - - def explain(self, depth=16) -> str: - """ - Method to translate the Python internal traceback into a list - of the pyparsing expressions that caused the exception to be raised. - - Parameters: - - - depth (default=16) - number of levels back in the stack trace to list expression - and function names; if None, the full stack trace names will be listed; if 0, only - the failing input line, marker, and exception string will be shown - - Returns a multi-line string listing the ParserElements and/or function names in the - exception's stack trace. - - Example:: - - # an expression to parse 3 integers - expr = pp.Word(pp.nums) * 3 - try: - # a failing parse - the third integer is prefixed with "A" - expr.parse_string("123 456 A789") - except pp.ParseException as pe: - print(pe.explain(depth=0)) - - prints:: - - 123 456 A789 - ^ - ParseException: Expected W:(0-9), found 'A' (at char 8), (line:1, col:9) - - Note: the diagnostic output will include string representations of the expressions - that failed to parse. These representations will be more helpful if you use `set_name` to - give identifiable names to your expressions. Otherwise they will use the default string - forms, which may be cryptic to read. - - Note: pyparsing's default truncation of exception tracebacks may also truncate the - stack of expressions that are displayed in the ``explain`` output. To get the full listing - of parser expressions, you may have to set ``ParserElement.verbose_stacktrace = True`` - """ - return self.explain_exception(self, depth) - - # fmt: off - markInputline = replaced_by_pep8("markInputline", mark_input_line) - # fmt: on - - -class ParseException(ParseBaseException): - """ - Exception thrown when a parse expression doesn't match the input string - - Example:: - - integer = Word(nums).set_name("integer") - try: - integer.parse_string("ABC") - except ParseException as pe: - print(pe) - print(f"column: {pe.column}") - - prints:: - - Expected integer (at char 0), (line:1, col:1) column: 1 - - """ - - -class ParseFatalException(ParseBaseException): - """ - User-throwable exception thrown when inconsistent parse content - is found; stops all parsing immediately - """ - - -class ParseSyntaxException(ParseFatalException): - """ - Just like :class:`ParseFatalException`, but thrown internally - when an :class:`ErrorStop` ('-' operator) indicates - that parsing is to stop immediately because an unbacktrackable - syntax error has been found. - """ - - -class RecursiveGrammarException(Exception): - """ - Exception thrown by :class:`ParserElement.validate` if the - grammar could be left-recursive; parser may need to enable - left recursion using :class:`ParserElement.enable_left_recursion` - """ - - def __init__(self, parseElementList): - self.parseElementTrace = parseElementList - - def __str__(self) -> str: - return f"RecursiveGrammarException: {self.parseElementTrace}" diff --git a/src/pip/_vendor/pyparsing/helpers.py b/src/pip/_vendor/pyparsing/helpers.py deleted file mode 100644 index dcfdb8fe4bf..00000000000 --- a/src/pip/_vendor/pyparsing/helpers.py +++ /dev/null @@ -1,1078 +0,0 @@ -# helpers.py -import html.entities -import re -import sys -import typing - -from . import __diag__ -from .core import * -from .util import ( - _bslash, - _flatten, - _escape_regex_range_chars, - replaced_by_pep8, -) - - -# -# global helpers -# -def counted_array( - expr: ParserElement, - int_expr: typing.Optional[ParserElement] = None, - *, - intExpr: typing.Optional[ParserElement] = None, -) -> ParserElement: - """Helper to define a counted list of expressions. - - This helper defines a pattern of the form:: - - integer expr expr expr... - - where the leading integer tells how many expr expressions follow. - The matched tokens returns the array of expr tokens as a list - the - leading count token is suppressed. - - If ``int_expr`` is specified, it should be a pyparsing expression - that produces an integer value. - - Example:: - - counted_array(Word(alphas)).parse_string('2 ab cd ef') # -> ['ab', 'cd'] - - # in this parser, the leading integer value is given in binary, - # '10' indicating that 2 values are in the array - binary_constant = Word('01').set_parse_action(lambda t: int(t[0], 2)) - counted_array(Word(alphas), int_expr=binary_constant).parse_string('10 ab cd ef') # -> ['ab', 'cd'] - - # if other fields must be parsed after the count but before the - # list items, give the fields results names and they will - # be preserved in the returned ParseResults: - count_with_metadata = integer + Word(alphas)("type") - typed_array = counted_array(Word(alphanums), int_expr=count_with_metadata)("items") - result = typed_array.parse_string("3 bool True True False") - print(result.dump()) - - # prints - # ['True', 'True', 'False'] - # - items: ['True', 'True', 'False'] - # - type: 'bool' - """ - intExpr = intExpr or int_expr - array_expr = Forward() - - def count_field_parse_action(s, l, t): - nonlocal array_expr - n = t[0] - array_expr <<= (expr * n) if n else Empty() - # clear list contents, but keep any named results - del t[:] - - if intExpr is None: - intExpr = Word(nums).set_parse_action(lambda t: int(t[0])) - else: - intExpr = intExpr.copy() - intExpr.set_name("arrayLen") - intExpr.add_parse_action(count_field_parse_action, call_during_try=True) - return (intExpr + array_expr).set_name(f"(len) {expr}...") - - -def match_previous_literal(expr: ParserElement) -> ParserElement: - """Helper to define an expression that is indirectly defined from - the tokens matched in a previous expression, that is, it looks for - a 'repeat' of a previous expression. For example:: - - first = Word(nums) - second = match_previous_literal(first) - match_expr = first + ":" + second - - will match ``"1:1"``, but not ``"1:2"``. Because this - matches a previous literal, will also match the leading - ``"1:1"`` in ``"1:10"``. If this is not desired, use - :class:`match_previous_expr`. Do *not* use with packrat parsing - enabled. - """ - rep = Forward() - - def copy_token_to_repeater(s, l, t): - if not t: - rep << Empty() - return - - if len(t) == 1: - rep << t[0] - return - - # flatten t tokens - tflat = _flatten(t.as_list()) - rep << And(Literal(tt) for tt in tflat) - - expr.add_parse_action(copy_token_to_repeater, callDuringTry=True) - rep.set_name("(prev) " + str(expr)) - return rep - - -def match_previous_expr(expr: ParserElement) -> ParserElement: - """Helper to define an expression that is indirectly defined from - the tokens matched in a previous expression, that is, it looks for - a 'repeat' of a previous expression. For example:: - - first = Word(nums) - second = match_previous_expr(first) - match_expr = first + ":" + second - - will match ``"1:1"``, but not ``"1:2"``. Because this - matches by expressions, will *not* match the leading ``"1:1"`` - in ``"1:10"``; the expressions are evaluated first, and then - compared, so ``"1"`` is compared with ``"10"``. Do *not* use - with packrat parsing enabled. - """ - rep = Forward() - e2 = expr.copy() - rep <<= e2 - - def copy_token_to_repeater(s, l, t): - matchTokens = _flatten(t.as_list()) - - def must_match_these_tokens(s, l, t): - theseTokens = _flatten(t.as_list()) - if theseTokens != matchTokens: - raise ParseException( - s, l, f"Expected {matchTokens}, found{theseTokens}" - ) - - rep.set_parse_action(must_match_these_tokens, callDuringTry=True) - - expr.add_parse_action(copy_token_to_repeater, callDuringTry=True) - rep.set_name("(prev) " + str(expr)) - return rep - - -def one_of( - strs: Union[typing.Iterable[str], str], - caseless: bool = False, - use_regex: bool = True, - as_keyword: bool = False, - *, - useRegex: bool = True, - asKeyword: bool = False, -) -> ParserElement: - """Helper to quickly define a set of alternative :class:`Literal` s, - and makes sure to do longest-first testing when there is a conflict, - regardless of the input order, but returns - a :class:`MatchFirst` for best performance. - - Parameters: - - - ``strs`` - a string of space-delimited literals, or a collection of - string literals - - ``caseless`` - treat all literals as caseless - (default= ``False``) - - ``use_regex`` - as an optimization, will - generate a :class:`Regex` object; otherwise, will generate - a :class:`MatchFirst` object (if ``caseless=True`` or ``as_keyword=True``, or if - creating a :class:`Regex` raises an exception) - (default= ``True``) - - ``as_keyword`` - enforce :class:`Keyword`-style matching on the - generated expressions - (default= ``False``) - - ``asKeyword`` and ``useRegex`` are retained for pre-PEP8 compatibility, - but will be removed in a future release - - Example:: - - comp_oper = one_of("< = > <= >= !=") - var = Word(alphas) - number = Word(nums) - term = var | number - comparison_expr = term + comp_oper + term - print(comparison_expr.search_string("B = 12 AA=23 B<=AA AA>12")) - - prints:: - - [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] - """ - asKeyword = asKeyword or as_keyword - useRegex = useRegex and use_regex - - if ( - isinstance(caseless, str_type) - and __diag__.warn_on_multiple_string_args_to_oneof - ): - warnings.warn( - "More than one string argument passed to one_of, pass" - " choices as a list or space-delimited string", - stacklevel=2, - ) - - if caseless: - isequal = lambda a, b: a.upper() == b.upper() - masks = lambda a, b: b.upper().startswith(a.upper()) - parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral - else: - isequal = lambda a, b: a == b - masks = lambda a, b: b.startswith(a) - parseElementClass = Keyword if asKeyword else Literal - - symbols: List[str] = [] - if isinstance(strs, str_type): - strs = typing.cast(str, strs) - symbols = strs.split() - elif isinstance(strs, Iterable): - symbols = list(strs) - else: - raise TypeError("Invalid argument to one_of, expected string or iterable") - if not symbols: - return NoMatch() - - # reorder given symbols to take care to avoid masking longer choices with shorter ones - # (but only if the given symbols are not just single characters) - if any(len(sym) > 1 for sym in symbols): - i = 0 - while i < len(symbols) - 1: - cur = symbols[i] - for j, other in enumerate(symbols[i + 1 :]): - if isequal(other, cur): - del symbols[i + j + 1] - break - if masks(cur, other): - del symbols[i + j + 1] - symbols.insert(i, other) - break - else: - i += 1 - - if useRegex: - re_flags: int = re.IGNORECASE if caseless else 0 - - try: - if all(len(sym) == 1 for sym in symbols): - # symbols are just single characters, create range regex pattern - patt = f"[{''.join(_escape_regex_range_chars(sym) for sym in symbols)}]" - else: - patt = "|".join(re.escape(sym) for sym in symbols) - - # wrap with \b word break markers if defining as keywords - if asKeyword: - patt = rf"\b(?:{patt})\b" - - ret = Regex(patt, flags=re_flags).set_name(" | ".join(symbols)) - - if caseless: - # add parse action to return symbols as specified, not in random - # casing as found in input string - symbol_map = {sym.lower(): sym for sym in symbols} - ret.add_parse_action(lambda s, l, t: symbol_map[t[0].lower()]) - - return ret - - except re.error: - warnings.warn( - "Exception creating Regex for one_of, building MatchFirst", stacklevel=2 - ) - - # last resort, just use MatchFirst - return MatchFirst(parseElementClass(sym) for sym in symbols).set_name( - " | ".join(symbols) - ) - - -def dict_of(key: ParserElement, value: ParserElement) -> ParserElement: - """Helper to easily and clearly define a dictionary by specifying - the respective patterns for the key and value. Takes care of - defining the :class:`Dict`, :class:`ZeroOrMore`, and - :class:`Group` tokens in the proper order. The key pattern - can include delimiting markers or punctuation, as long as they are - suppressed, thereby leaving the significant key text. The value - pattern can include named results, so that the :class:`Dict` results - can include named token fields. - - Example:: - - text = "shape: SQUARE posn: upper left color: light blue texture: burlap" - attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) - print(attr_expr[1, ...].parse_string(text).dump()) - - attr_label = label - attr_value = Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join) - - # similar to Dict, but simpler call format - result = dict_of(attr_label, attr_value).parse_string(text) - print(result.dump()) - print(result['shape']) - print(result.shape) # object attribute access works too - print(result.as_dict()) - - prints:: - - [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - - color: 'light blue' - - posn: 'upper left' - - shape: 'SQUARE' - - texture: 'burlap' - SQUARE - SQUARE - {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'} - """ - return Dict(OneOrMore(Group(key + value))) - - -def original_text_for( - expr: ParserElement, as_string: bool = True, *, asString: bool = True -) -> ParserElement: - """Helper to return the original, untokenized text for a given - expression. Useful to restore the parsed fields of an HTML start - tag into the raw tag text itself, or to revert separate tokens with - intervening whitespace back to the original matching input text. By - default, returns a string containing the original parsed text. - - If the optional ``as_string`` argument is passed as - ``False``, then the return value is - a :class:`ParseResults` containing any results names that - were originally matched, and a single token containing the original - matched text from the input string. So if the expression passed to - :class:`original_text_for` contains expressions with defined - results names, you must set ``as_string`` to ``False`` if you - want to preserve those results name values. - - The ``asString`` pre-PEP8 argument is retained for compatibility, - but will be removed in a future release. - - Example:: - - src = "this is test bold text normal text " - for tag in ("b", "i"): - opener, closer = make_html_tags(tag) - patt = original_text_for(opener + ... + closer) - print(patt.search_string(src)[0]) - - prints:: - - [' bold text '] - ['text'] - """ - asString = asString and as_string - - locMarker = Empty().set_parse_action(lambda s, loc, t: loc) - endlocMarker = locMarker.copy() - endlocMarker.callPreparse = False - matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end") - if asString: - extractText = lambda s, l, t: s[t._original_start : t._original_end] - else: - - def extractText(s, l, t): - t[:] = [s[t.pop("_original_start") : t.pop("_original_end")]] - - matchExpr.set_parse_action(extractText) - matchExpr.ignoreExprs = expr.ignoreExprs - matchExpr.suppress_warning(Diagnostics.warn_ungrouped_named_tokens_in_collection) - return matchExpr - - -def ungroup(expr: ParserElement) -> ParserElement: - """Helper to undo pyparsing's default grouping of And expressions, - even if all but one are non-empty. - """ - return TokenConverter(expr).add_parse_action(lambda t: t[0]) - - -def locatedExpr(expr: ParserElement) -> ParserElement: - """ - (DEPRECATED - future code should use the :class:`Located` class) - Helper to decorate a returned token with its starting and ending - locations in the input string. - - This helper adds the following results names: - - - ``locn_start`` - location where matched expression begins - - ``locn_end`` - location where matched expression ends - - ``value`` - the actual parsed results - - Be careful if the input text contains ```` characters, you - may want to call :class:`ParserElement.parse_with_tabs` - - Example:: - - wd = Word(alphas) - for match in locatedExpr(wd).search_string("ljsdf123lksdjjf123lkkjj1222"): - print(match) - - prints:: - - [[0, 'ljsdf', 5]] - [[8, 'lksdjjf', 15]] - [[18, 'lkkjj', 23]] - """ - locator = Empty().set_parse_action(lambda ss, ll, tt: ll) - return Group( - locator("locn_start") - + expr("value") - + locator.copy().leaveWhitespace()("locn_end") - ) - - -def nested_expr( - opener: Union[str, ParserElement] = "(", - closer: Union[str, ParserElement] = ")", - content: typing.Optional[ParserElement] = None, - ignore_expr: ParserElement = quoted_string(), - *, - ignoreExpr: ParserElement = quoted_string(), -) -> ParserElement: - """Helper method for defining nested lists enclosed in opening and - closing delimiters (``"("`` and ``")"`` are the default). - - Parameters: - - - ``opener`` - opening character for a nested list - (default= ``"("``); can also be a pyparsing expression - - ``closer`` - closing character for a nested list - (default= ``")"``); can also be a pyparsing expression - - ``content`` - expression for items within the nested lists - (default= ``None``) - - ``ignore_expr`` - expression for ignoring opening and closing delimiters - (default= :class:`quoted_string`) - - ``ignoreExpr`` - this pre-PEP8 argument is retained for compatibility - but will be removed in a future release - - If an expression is not provided for the content argument, the - nested expression will capture all whitespace-delimited content - between delimiters as a list of separate values. - - Use the ``ignore_expr`` argument to define expressions that may - contain opening or closing characters that should not be treated as - opening or closing characters for nesting, such as quoted_string or - a comment expression. Specify multiple expressions using an - :class:`Or` or :class:`MatchFirst`. The default is - :class:`quoted_string`, but if no expressions are to be ignored, then - pass ``None`` for this argument. - - Example:: - - data_type = one_of("void int short long char float double") - decl_data_type = Combine(data_type + Opt(Word('*'))) - ident = Word(alphas+'_', alphanums+'_') - number = pyparsing_common.number - arg = Group(decl_data_type + ident) - LPAR, RPAR = map(Suppress, "()") - - code_body = nested_expr('{', '}', ignore_expr=(quoted_string | c_style_comment)) - - c_function = (decl_data_type("type") - + ident("name") - + LPAR + Opt(DelimitedList(arg), [])("args") + RPAR - + code_body("body")) - c_function.ignore(c_style_comment) - - source_code = ''' - int is_odd(int x) { - return (x%2); - } - - int dec_to_hex(char hchar) { - if (hchar >= '0' && hchar <= '9') { - return (ord(hchar)-ord('0')); - } else { - return (10+ord(hchar)-ord('A')); - } - } - ''' - for func in c_function.search_string(source_code): - print("%(name)s (%(type)s) args: %(args)s" % func) - - - prints:: - - is_odd (int) args: [['int', 'x']] - dec_to_hex (int) args: [['char', 'hchar']] - """ - if ignoreExpr != ignore_expr: - ignoreExpr = ignore_expr if ignoreExpr == quoted_string() else ignoreExpr - if opener == closer: - raise ValueError("opening and closing strings cannot be the same") - if content is None: - if isinstance(opener, str_type) and isinstance(closer, str_type): - opener = typing.cast(str, opener) - closer = typing.cast(str, closer) - if len(opener) == 1 and len(closer) == 1: - if ignoreExpr is not None: - content = Combine( - OneOrMore( - ~ignoreExpr - + CharsNotIn( - opener + closer + ParserElement.DEFAULT_WHITE_CHARS, - exact=1, - ) - ) - ).set_parse_action(lambda t: t[0].strip()) - else: - content = empty.copy() + CharsNotIn( - opener + closer + ParserElement.DEFAULT_WHITE_CHARS - ).set_parse_action(lambda t: t[0].strip()) - else: - if ignoreExpr is not None: - content = Combine( - OneOrMore( - ~ignoreExpr - + ~Literal(opener) - + ~Literal(closer) - + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1) - ) - ).set_parse_action(lambda t: t[0].strip()) - else: - content = Combine( - OneOrMore( - ~Literal(opener) - + ~Literal(closer) - + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1) - ) - ).set_parse_action(lambda t: t[0].strip()) - else: - raise ValueError( - "opening and closing arguments must be strings if no content expression is given" - ) - ret = Forward() - if ignoreExpr is not None: - ret <<= Group( - Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer) - ) - else: - ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer)) - ret.set_name(f"nested {opener}{closer} expression") - # don't override error message from content expressions - ret.errmsg = None - return ret - - -def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")): - """Internal helper to construct opening and closing tag expressions, given a tag name""" - if isinstance(tagStr, str_type): - resname = tagStr - tagStr = Keyword(tagStr, caseless=not xml) - else: - resname = tagStr.name - - tagAttrName = Word(alphas, alphanums + "_-:") - if xml: - tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes) - openTag = ( - suppress_LT - + tagStr("tag") - + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue))) - + Opt("/", default=[False])("empty").set_parse_action( - lambda s, l, t: t[0] == "/" - ) - + suppress_GT - ) - else: - tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word( - printables, exclude_chars=">" - ) - openTag = ( - suppress_LT - + tagStr("tag") - + Dict( - ZeroOrMore( - Group( - tagAttrName.set_parse_action(lambda t: t[0].lower()) - + Opt(Suppress("=") + tagAttrValue) - ) - ) - ) - + Opt("/", default=[False])("empty").set_parse_action( - lambda s, l, t: t[0] == "/" - ) - + suppress_GT - ) - closeTag = Combine(Literal("", adjacent=False) - - openTag.set_name(f"<{resname}>") - # add start results name in parse action now that ungrouped names are not reported at two levels - openTag.add_parse_action( - lambda t: t.__setitem__( - "start" + "".join(resname.replace(":", " ").title().split()), t.copy() - ) - ) - closeTag = closeTag( - "end" + "".join(resname.replace(":", " ").title().split()) - ).set_name(f"") - openTag.tag = resname - closeTag.tag = resname - openTag.tag_body = SkipTo(closeTag()) - return openTag, closeTag - - -def make_html_tags( - tag_str: Union[str, ParserElement] -) -> Tuple[ParserElement, ParserElement]: - """Helper to construct opening and closing tag expressions for HTML, - given a tag name. Matches tags in either upper or lower case, - attributes with namespaces and with quoted or unquoted values. - - Example:: - - text = 'More info at the pyparsing wiki page' - # make_html_tags returns pyparsing expressions for the opening and - # closing tags as a 2-tuple - a, a_end = make_html_tags("A") - link_expr = a + SkipTo(a_end)("link_text") + a_end - - for link in link_expr.search_string(text): - # attributes in the tag (like "href" shown here) are - # also accessible as named results - print(link.link_text, '->', link.href) - - prints:: - - pyparsing -> https://github.com/pyparsing/pyparsing/wiki - """ - return _makeTags(tag_str, False) - - -def make_xml_tags( - tag_str: Union[str, ParserElement] -) -> Tuple[ParserElement, ParserElement]: - """Helper to construct opening and closing tag expressions for XML, - given a tag name. Matches tags only in the given upper/lower case. - - Example: similar to :class:`make_html_tags` - """ - return _makeTags(tag_str, True) - - -any_open_tag: ParserElement -any_close_tag: ParserElement -any_open_tag, any_close_tag = make_html_tags( - Word(alphas, alphanums + "_:").set_name("any tag") -) - -_htmlEntityMap = {k.rstrip(";"): v for k, v in html.entities.html5.items()} -common_html_entity = Regex("&(?P" + "|".join(_htmlEntityMap) + ");").set_name( - "common HTML entity" -) - - -def replace_html_entity(s, l, t): - """Helper parser action to replace common HTML entities with their special characters""" - return _htmlEntityMap.get(t.entity) - - -class OpAssoc(Enum): - """Enumeration of operator associativity - - used in constructing InfixNotationOperatorSpec for :class:`infix_notation`""" - - LEFT = 1 - RIGHT = 2 - - -InfixNotationOperatorArgType = Union[ - ParserElement, str, Tuple[Union[ParserElement, str], Union[ParserElement, str]] -] -InfixNotationOperatorSpec = Union[ - Tuple[ - InfixNotationOperatorArgType, - int, - OpAssoc, - typing.Optional[ParseAction], - ], - Tuple[ - InfixNotationOperatorArgType, - int, - OpAssoc, - ], -] - - -def infix_notation( - base_expr: ParserElement, - op_list: List[InfixNotationOperatorSpec], - lpar: Union[str, ParserElement] = Suppress("("), - rpar: Union[str, ParserElement] = Suppress(")"), -) -> ParserElement: - """Helper method for constructing grammars of expressions made up of - operators working in a precedence hierarchy. Operators may be unary - or binary, left- or right-associative. Parse actions can also be - attached to operator expressions. The generated parser will also - recognize the use of parentheses to override operator precedences - (see example below). - - Note: if you define a deep operator list, you may see performance - issues when using infix_notation. See - :class:`ParserElement.enable_packrat` for a mechanism to potentially - improve your parser performance. - - Parameters: - - - ``base_expr`` - expression representing the most basic operand to - be used in the expression - - ``op_list`` - list of tuples, one for each operator precedence level - in the expression grammar; each tuple is of the form ``(op_expr, - num_operands, right_left_assoc, (optional)parse_action)``, where: - - - ``op_expr`` is the pyparsing expression for the operator; may also - be a string, which will be converted to a Literal; if ``num_operands`` - is 3, ``op_expr`` is a tuple of two expressions, for the two - operators separating the 3 terms - - ``num_operands`` is the number of terms for this operator (must be 1, - 2, or 3) - - ``right_left_assoc`` is the indicator whether the operator is right - or left associative, using the pyparsing-defined constants - ``OpAssoc.RIGHT`` and ``OpAssoc.LEFT``. - - ``parse_action`` is the parse action to be associated with - expressions matching this operator expression (the parse action - tuple member may be omitted); if the parse action is passed - a tuple or list of functions, this is equivalent to calling - ``set_parse_action(*fn)`` - (:class:`ParserElement.set_parse_action`) - - ``lpar`` - expression for matching left-parentheses; if passed as a - str, then will be parsed as ``Suppress(lpar)``. If lpar is passed as - an expression (such as ``Literal('(')``), then it will be kept in - the parsed results, and grouped with them. (default= ``Suppress('(')``) - - ``rpar`` - expression for matching right-parentheses; if passed as a - str, then will be parsed as ``Suppress(rpar)``. If rpar is passed as - an expression (such as ``Literal(')')``), then it will be kept in - the parsed results, and grouped with them. (default= ``Suppress(')')``) - - Example:: - - # simple example of four-function arithmetic with ints and - # variable names - integer = pyparsing_common.signed_integer - varname = pyparsing_common.identifier - - arith_expr = infix_notation(integer | varname, - [ - ('-', 1, OpAssoc.RIGHT), - (one_of('* /'), 2, OpAssoc.LEFT), - (one_of('+ -'), 2, OpAssoc.LEFT), - ]) - - arith_expr.run_tests(''' - 5+3*6 - (5+3)*6 - -2--11 - ''', full_dump=False) - - prints:: - - 5+3*6 - [[5, '+', [3, '*', 6]]] - - (5+3)*6 - [[[5, '+', 3], '*', 6]] - - (5+x)*y - [[[5, '+', 'x'], '*', 'y']] - - -2--11 - [[['-', 2], '-', ['-', 11]]] - """ - - # captive version of FollowedBy that does not do parse actions or capture results names - class _FB(FollowedBy): - def parseImpl(self, instring, loc, doActions=True): - self.expr.try_parse(instring, loc) - return loc, [] - - _FB.__name__ = "FollowedBy>" - - ret = Forward() - if isinstance(lpar, str): - lpar = Suppress(lpar) - if isinstance(rpar, str): - rpar = Suppress(rpar) - - # if lpar and rpar are not suppressed, wrap in group - if not (isinstance(lpar, Suppress) and isinstance(rpar, Suppress)): - lastExpr = base_expr | Group(lpar + ret + rpar) - else: - lastExpr = base_expr | (lpar + ret + rpar) - - arity: int - rightLeftAssoc: opAssoc - pa: typing.Optional[ParseAction] - opExpr1: ParserElement - opExpr2: ParserElement - for operDef in op_list: - opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4] # type: ignore[assignment] - if isinstance(opExpr, str_type): - opExpr = ParserElement._literalStringClass(opExpr) - opExpr = typing.cast(ParserElement, opExpr) - if arity == 3: - if not isinstance(opExpr, (tuple, list)) or len(opExpr) != 2: - raise ValueError( - "if numterms=3, opExpr must be a tuple or list of two expressions" - ) - opExpr1, opExpr2 = opExpr - term_name = f"{opExpr1}{opExpr2} term" - else: - term_name = f"{opExpr} term" - - if not 1 <= arity <= 3: - raise ValueError("operator must be unary (1), binary (2), or ternary (3)") - - if rightLeftAssoc not in (OpAssoc.LEFT, OpAssoc.RIGHT): - raise ValueError("operator must indicate right or left associativity") - - thisExpr: ParserElement = Forward().set_name(term_name) - thisExpr = typing.cast(Forward, thisExpr) - if rightLeftAssoc is OpAssoc.LEFT: - if arity == 1: - matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + opExpr[1, ...]) - elif arity == 2: - if opExpr is not None: - matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group( - lastExpr + (opExpr + lastExpr)[1, ...] - ) - else: - matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr[2, ...]) - elif arity == 3: - matchExpr = _FB( - lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr - ) + Group(lastExpr + OneOrMore(opExpr1 + lastExpr + opExpr2 + lastExpr)) - elif rightLeftAssoc is OpAssoc.RIGHT: - if arity == 1: - # try to avoid LR with this extra test - if not isinstance(opExpr, Opt): - opExpr = Opt(opExpr) - matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr) - elif arity == 2: - if opExpr is not None: - matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group( - lastExpr + (opExpr + thisExpr)[1, ...] - ) - else: - matchExpr = _FB(lastExpr + thisExpr) + Group( - lastExpr + thisExpr[1, ...] - ) - elif arity == 3: - matchExpr = _FB( - lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr - ) + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) - if pa: - if isinstance(pa, (tuple, list)): - matchExpr.set_parse_action(*pa) - else: - matchExpr.set_parse_action(pa) - thisExpr <<= (matchExpr | lastExpr).setName(term_name) - lastExpr = thisExpr - ret <<= lastExpr - return ret - - -def indentedBlock(blockStatementExpr, indentStack, indent=True, backup_stacks=[]): - """ - (DEPRECATED - use :class:`IndentedBlock` class instead) - Helper method for defining space-delimited indentation blocks, - such as those used to define block statements in Python source code. - - Parameters: - - - ``blockStatementExpr`` - expression defining syntax of statement that - is repeated within the indented block - - ``indentStack`` - list created by caller to manage indentation stack - (multiple ``statementWithIndentedBlock`` expressions within a single - grammar should share a common ``indentStack``) - - ``indent`` - boolean indicating whether block must be indented beyond - the current level; set to ``False`` for block of left-most statements - (default= ``True``) - - A valid block must contain at least one ``blockStatement``. - - (Note that indentedBlock uses internal parse actions which make it - incompatible with packrat parsing.) - - Example:: - - data = ''' - def A(z): - A1 - B = 100 - G = A2 - A2 - A3 - B - def BB(a,b,c): - BB1 - def BBA(): - bba1 - bba2 - bba3 - C - D - def spam(x,y): - def eggs(z): - pass - ''' - - - indentStack = [1] - stmt = Forward() - - identifier = Word(alphas, alphanums) - funcDecl = ("def" + identifier + Group("(" + Opt(delimitedList(identifier)) + ")") + ":") - func_body = indentedBlock(stmt, indentStack) - funcDef = Group(funcDecl + func_body) - - rvalue = Forward() - funcCall = Group(identifier + "(" + Opt(delimitedList(rvalue)) + ")") - rvalue << (funcCall | identifier | Word(nums)) - assignment = Group(identifier + "=" + rvalue) - stmt << (funcDef | assignment | identifier) - - module_body = stmt[1, ...] - - parseTree = module_body.parseString(data) - parseTree.pprint() - - prints:: - - [['def', - 'A', - ['(', 'z', ')'], - ':', - [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], - 'B', - ['def', - 'BB', - ['(', 'a', 'b', 'c', ')'], - ':', - [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], - 'C', - 'D', - ['def', - 'spam', - ['(', 'x', 'y', ')'], - ':', - [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] - """ - backup_stacks.append(indentStack[:]) - - def reset_stack(): - indentStack[:] = backup_stacks[-1] - - def checkPeerIndent(s, l, t): - if l >= len(s): - return - curCol = col(l, s) - if curCol != indentStack[-1]: - if curCol > indentStack[-1]: - raise ParseException(s, l, "illegal nesting") - raise ParseException(s, l, "not a peer entry") - - def checkSubIndent(s, l, t): - curCol = col(l, s) - if curCol > indentStack[-1]: - indentStack.append(curCol) - else: - raise ParseException(s, l, "not a subentry") - - def checkUnindent(s, l, t): - if l >= len(s): - return - curCol = col(l, s) - if not (indentStack and curCol in indentStack): - raise ParseException(s, l, "not an unindent") - if curCol < indentStack[-1]: - indentStack.pop() - - NL = OneOrMore(LineEnd().set_whitespace_chars("\t ").suppress()) - INDENT = (Empty() + Empty().set_parse_action(checkSubIndent)).set_name("INDENT") - PEER = Empty().set_parse_action(checkPeerIndent).set_name("") - UNDENT = Empty().set_parse_action(checkUnindent).set_name("UNINDENT") - if indent: - smExpr = Group( - Opt(NL) - + INDENT - + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL)) - + UNDENT - ) - else: - smExpr = Group( - Opt(NL) - + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL)) - + Opt(UNDENT) - ) - - # add a parse action to remove backup_stack from list of backups - smExpr.add_parse_action( - lambda: backup_stacks.pop(-1) and None if backup_stacks else None - ) - smExpr.set_fail_action(lambda a, b, c, d: reset_stack()) - blockStatementExpr.ignore(_bslash + LineEnd()) - return smExpr.set_name("indented block") - - -# it's easy to get these comment structures wrong - they're very common, so may as well make them available -c_style_comment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/").set_name( - "C style comment" -) -"Comment of the form ``/* ... */``" - -html_comment = Regex(r"").set_name("HTML comment") -"Comment of the form ````" - -rest_of_line = Regex(r".*").leave_whitespace().set_name("rest of line") -dbl_slash_comment = Regex(r"//(?:\\\n|[^\n])*").set_name("// comment") -"Comment of the form ``// ... (to end of line)``" - -cpp_style_comment = Combine( - Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/" | dbl_slash_comment -).set_name("C++ style comment") -"Comment of either form :class:`c_style_comment` or :class:`dbl_slash_comment`" - -java_style_comment = cpp_style_comment -"Same as :class:`cpp_style_comment`" - -python_style_comment = Regex(r"#.*").set_name("Python style comment") -"Comment of the form ``# ... (to end of line)``" - - -# build list of built-in expressions, for future reference if a global default value -# gets updated -_builtin_exprs: List[ParserElement] = [ - v for v in vars().values() if isinstance(v, ParserElement) -] - - -# compatibility function, superseded by DelimitedList class -def delimited_list( - expr: Union[str, ParserElement], - delim: Union[str, ParserElement] = ",", - combine: bool = False, - min: typing.Optional[int] = None, - max: typing.Optional[int] = None, - *, - allow_trailing_delim: bool = False, -) -> ParserElement: - """(DEPRECATED - use :class:`DelimitedList` class)""" - return DelimitedList( - expr, delim, combine, min, max, allow_trailing_delim=allow_trailing_delim - ) - - -# pre-PEP8 compatible names -# fmt: off -opAssoc = OpAssoc -anyOpenTag = any_open_tag -anyCloseTag = any_close_tag -commonHTMLEntity = common_html_entity -cStyleComment = c_style_comment -htmlComment = html_comment -restOfLine = rest_of_line -dblSlashComment = dbl_slash_comment -cppStyleComment = cpp_style_comment -javaStyleComment = java_style_comment -pythonStyleComment = python_style_comment -delimitedList = replaced_by_pep8("delimitedList", DelimitedList) -delimited_list = replaced_by_pep8("delimited_list", DelimitedList) -countedArray = replaced_by_pep8("countedArray", counted_array) -matchPreviousLiteral = replaced_by_pep8("matchPreviousLiteral", match_previous_literal) -matchPreviousExpr = replaced_by_pep8("matchPreviousExpr", match_previous_expr) -oneOf = replaced_by_pep8("oneOf", one_of) -dictOf = replaced_by_pep8("dictOf", dict_of) -originalTextFor = replaced_by_pep8("originalTextFor", original_text_for) -nestedExpr = replaced_by_pep8("nestedExpr", nested_expr) -makeHTMLTags = replaced_by_pep8("makeHTMLTags", make_html_tags) -makeXMLTags = replaced_by_pep8("makeXMLTags", make_xml_tags) -replaceHTMLEntity = replaced_by_pep8("replaceHTMLEntity", replace_html_entity) -infixNotation = replaced_by_pep8("infixNotation", infix_notation) -# fmt: on diff --git a/src/pip/_vendor/pyparsing/py.typed b/src/pip/_vendor/pyparsing/py.typed deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/src/pip/_vendor/pyparsing/results.py b/src/pip/_vendor/pyparsing/results.py deleted file mode 100644 index 3e5fe2089bd..00000000000 --- a/src/pip/_vendor/pyparsing/results.py +++ /dev/null @@ -1,797 +0,0 @@ -# results.py -from collections.abc import ( - MutableMapping, - Mapping, - MutableSequence, - Iterator, - Sequence, - Container, -) -import pprint -from typing import Tuple, Any, Dict, Set, List - -str_type: Tuple[type, ...] = (str, bytes) -_generator_type = type((_ for _ in ())) - - -class _ParseResultsWithOffset: - tup: Tuple["ParseResults", int] - __slots__ = ["tup"] - - def __init__(self, p1: "ParseResults", p2: int): - self.tup: Tuple[ParseResults, int] = (p1, p2) - - def __getitem__(self, i): - return self.tup[i] - - def __getstate__(self): - return self.tup - - def __setstate__(self, *args): - self.tup = args[0] - - -class ParseResults: - """Structured parse results, to provide multiple means of access to - the parsed data: - - - as a list (``len(results)``) - - by list index (``results[0], results[1]``, etc.) - - by attribute (``results.`` - see :class:`ParserElement.set_results_name`) - - Example:: - - integer = Word(nums) - date_str = (integer.set_results_name("year") + '/' - + integer.set_results_name("month") + '/' - + integer.set_results_name("day")) - # equivalent form: - # date_str = (integer("year") + '/' - # + integer("month") + '/' - # + integer("day")) - - # parse_string returns a ParseResults object - result = date_str.parse_string("1999/12/31") - - def test(s, fn=repr): - print(f"{s} -> {fn(eval(s))}") - test("list(result)") - test("result[0]") - test("result['month']") - test("result.day") - test("'month' in result") - test("'minutes' in result") - test("result.dump()", str) - - prints:: - - list(result) -> ['1999', '/', '12', '/', '31'] - result[0] -> '1999' - result['month'] -> '12' - result.day -> '31' - 'month' in result -> True - 'minutes' in result -> False - result.dump() -> ['1999', '/', '12', '/', '31'] - - day: '31' - - month: '12' - - year: '1999' - """ - - _null_values: Tuple[Any, ...] = (None, [], ()) - - _name: str - _parent: "ParseResults" - _all_names: Set[str] - _modal: bool - _toklist: List[Any] - _tokdict: Dict[str, Any] - - __slots__ = ( - "_name", - "_parent", - "_all_names", - "_modal", - "_toklist", - "_tokdict", - ) - - class List(list): - """ - Simple wrapper class to distinguish parsed list results that should be preserved - as actual Python lists, instead of being converted to :class:`ParseResults`:: - - LBRACK, RBRACK = map(pp.Suppress, "[]") - element = pp.Forward() - item = ppc.integer - element_list = LBRACK + pp.DelimitedList(element) + RBRACK - - # add parse actions to convert from ParseResults to actual Python collection types - def as_python_list(t): - return pp.ParseResults.List(t.as_list()) - element_list.add_parse_action(as_python_list) - - element <<= item | element_list - - element.run_tests(''' - 100 - [2,3,4] - [[2, 1],3,4] - [(2, 1),3,4] - (2,3,4) - ''', post_parse=lambda s, r: (r[0], type(r[0]))) - - prints:: - - 100 - (100, ) - - [2,3,4] - ([2, 3, 4], ) - - [[2, 1],3,4] - ([[2, 1], 3, 4], ) - - (Used internally by :class:`Group` when `aslist=True`.) - """ - - def __new__(cls, contained=None): - if contained is None: - contained = [] - - if not isinstance(contained, list): - raise TypeError( - f"{cls.__name__} may only be constructed with a list, not {type(contained).__name__}" - ) - - return list.__new__(cls) - - def __new__(cls, toklist=None, name=None, **kwargs): - if isinstance(toklist, ParseResults): - return toklist - self = object.__new__(cls) - self._name = None - self._parent = None - self._all_names = set() - - if toklist is None: - self._toklist = [] - elif isinstance(toklist, (list, _generator_type)): - self._toklist = ( - [toklist[:]] - if isinstance(toklist, ParseResults.List) - else list(toklist) - ) - else: - self._toklist = [toklist] - self._tokdict = dict() - return self - - # Performance tuning: we construct a *lot* of these, so keep this - # constructor as small and fast as possible - def __init__( - self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance - ): - self._tokdict: Dict[str, _ParseResultsWithOffset] - self._modal = modal - - if name is None or name == "": - return - - if isinstance(name, int): - name = str(name) - - if not modal: - self._all_names = {name} - - self._name = name - - if toklist in self._null_values: - return - - if isinstance(toklist, (str_type, type)): - toklist = [toklist] - - if asList: - if isinstance(toklist, ParseResults): - self[name] = _ParseResultsWithOffset(ParseResults(toklist._toklist), 0) - else: - self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]), 0) - self[name]._name = name - return - - try: - self[name] = toklist[0] - except (KeyError, TypeError, IndexError): - if toklist is not self: - self[name] = toklist - else: - self._name = name - - def __getitem__(self, i): - if isinstance(i, (int, slice)): - return self._toklist[i] - - if i not in self._all_names: - return self._tokdict[i][-1][0] - - return ParseResults([v[0] for v in self._tokdict[i]]) - - def __setitem__(self, k, v, isinstance=isinstance): - if isinstance(v, _ParseResultsWithOffset): - self._tokdict[k] = self._tokdict.get(k, list()) + [v] - sub = v[0] - elif isinstance(k, (int, slice)): - self._toklist[k] = v - sub = v - else: - self._tokdict[k] = self._tokdict.get(k, list()) + [ - _ParseResultsWithOffset(v, 0) - ] - sub = v - if isinstance(sub, ParseResults): - sub._parent = self - - def __delitem__(self, i): - if not isinstance(i, (int, slice)): - del self._tokdict[i] - return - - mylen = len(self._toklist) - del self._toklist[i] - - # convert int to slice - if isinstance(i, int): - if i < 0: - i += mylen - i = slice(i, i + 1) - # get removed indices - removed = list(range(*i.indices(mylen))) - removed.reverse() - # fixup indices in token dictionary - for occurrences in self._tokdict.values(): - for j in removed: - for k, (value, position) in enumerate(occurrences): - occurrences[k] = _ParseResultsWithOffset( - value, position - (position > j) - ) - - def __contains__(self, k) -> bool: - return k in self._tokdict - - def __len__(self) -> int: - return len(self._toklist) - - def __bool__(self) -> bool: - return not not (self._toklist or self._tokdict) - - def __iter__(self) -> Iterator: - return iter(self._toklist) - - def __reversed__(self) -> Iterator: - return iter(self._toklist[::-1]) - - def keys(self): - return iter(self._tokdict) - - def values(self): - return (self[k] for k in self.keys()) - - def items(self): - return ((k, self[k]) for k in self.keys()) - - def haskeys(self) -> bool: - """ - Since ``keys()`` returns an iterator, this method is helpful in bypassing - code that looks for the existence of any defined results names.""" - return not not self._tokdict - - def pop(self, *args, **kwargs): - """ - Removes and returns item at specified index (default= ``last``). - Supports both ``list`` and ``dict`` semantics for ``pop()``. If - passed no argument or an integer argument, it will use ``list`` - semantics and pop tokens from the list of parsed tokens. If passed - a non-integer argument (most likely a string), it will use ``dict`` - semantics and pop the corresponding value from any defined results - names. A second default return value argument is supported, just as in - ``dict.pop()``. - - Example:: - - numlist = Word(nums)[...] - print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321'] - - def remove_first(tokens): - tokens.pop(0) - numlist.add_parse_action(remove_first) - print(numlist.parse_string("0 123 321")) # -> ['123', '321'] - - label = Word(alphas) - patt = label("LABEL") + Word(nums)[1, ...] - print(patt.parse_string("AAB 123 321").dump()) - - # Use pop() in a parse action to remove named result (note that corresponding value is not - # removed from list form of results) - def remove_LABEL(tokens): - tokens.pop("LABEL") - return tokens - patt.add_parse_action(remove_LABEL) - print(patt.parse_string("AAB 123 321").dump()) - - prints:: - - ['AAB', '123', '321'] - - LABEL: 'AAB' - - ['AAB', '123', '321'] - """ - if not args: - args = [-1] - for k, v in kwargs.items(): - if k == "default": - args = (args[0], v) - else: - raise TypeError(f"pop() got an unexpected keyword argument {k!r}") - if isinstance(args[0], int) or len(args) == 1 or args[0] in self: - index = args[0] - ret = self[index] - del self[index] - return ret - else: - defaultvalue = args[1] - return defaultvalue - - def get(self, key, default_value=None): - """ - Returns named result matching the given key, or if there is no - such name, then returns the given ``default_value`` or ``None`` if no - ``default_value`` is specified. - - Similar to ``dict.get()``. - - Example:: - - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parse_string("1999/12/31") - print(result.get("year")) # -> '1999' - print(result.get("hour", "not specified")) # -> 'not specified' - print(result.get("hour")) # -> None - """ - if key in self: - return self[key] - else: - return default_value - - def insert(self, index, ins_string): - """ - Inserts new element at location index in the list of parsed tokens. - - Similar to ``list.insert()``. - - Example:: - - numlist = Word(nums)[...] - print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321'] - - # use a parse action to insert the parse location in the front of the parsed results - def insert_locn(locn, tokens): - tokens.insert(0, locn) - numlist.add_parse_action(insert_locn) - print(numlist.parse_string("0 123 321")) # -> [0, '0', '123', '321'] - """ - self._toklist.insert(index, ins_string) - # fixup indices in token dictionary - for occurrences in self._tokdict.values(): - for k, (value, position) in enumerate(occurrences): - occurrences[k] = _ParseResultsWithOffset( - value, position + (position > index) - ) - - def append(self, item): - """ - Add single element to end of ``ParseResults`` list of elements. - - Example:: - - numlist = Word(nums)[...] - print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321'] - - # use a parse action to compute the sum of the parsed integers, and add it to the end - def append_sum(tokens): - tokens.append(sum(map(int, tokens))) - numlist.add_parse_action(append_sum) - print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321', 444] - """ - self._toklist.append(item) - - def extend(self, itemseq): - """ - Add sequence of elements to end of ``ParseResults`` list of elements. - - Example:: - - patt = Word(alphas)[1, ...] - - # use a parse action to append the reverse of the matched strings, to make a palindrome - def make_palindrome(tokens): - tokens.extend(reversed([t[::-1] for t in tokens])) - return ''.join(tokens) - patt.add_parse_action(make_palindrome) - print(patt.parse_string("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl' - """ - if isinstance(itemseq, ParseResults): - self.__iadd__(itemseq) - else: - self._toklist.extend(itemseq) - - def clear(self): - """ - Clear all elements and results names. - """ - del self._toklist[:] - self._tokdict.clear() - - def __getattr__(self, name): - try: - return self[name] - except KeyError: - if name.startswith("__"): - raise AttributeError(name) - return "" - - def __add__(self, other: "ParseResults") -> "ParseResults": - ret = self.copy() - ret += other - return ret - - def __iadd__(self, other: "ParseResults") -> "ParseResults": - if not other: - return self - - if other._tokdict: - offset = len(self._toklist) - addoffset = lambda a: offset if a < 0 else a + offset - otheritems = other._tokdict.items() - otherdictitems = [ - (k, _ParseResultsWithOffset(v[0], addoffset(v[1]))) - for k, vlist in otheritems - for v in vlist - ] - for k, v in otherdictitems: - self[k] = v - if isinstance(v[0], ParseResults): - v[0]._parent = self - - self._toklist += other._toklist - self._all_names |= other._all_names - return self - - def __radd__(self, other) -> "ParseResults": - if isinstance(other, int) and other == 0: - # useful for merging many ParseResults using sum() builtin - return self.copy() - else: - # this may raise a TypeError - so be it - return other + self - - def __repr__(self) -> str: - return f"{type(self).__name__}({self._toklist!r}, {self.as_dict()})" - - def __str__(self) -> str: - return ( - "[" - + ", ".join( - [ - str(i) if isinstance(i, ParseResults) else repr(i) - for i in self._toklist - ] - ) - + "]" - ) - - def _asStringList(self, sep=""): - out = [] - for item in self._toklist: - if out and sep: - out.append(sep) - if isinstance(item, ParseResults): - out += item._asStringList() - else: - out.append(str(item)) - return out - - def as_list(self) -> list: - """ - Returns the parse results as a nested list of matching tokens, all converted to strings. - - Example:: - - patt = Word(alphas)[1, ...] - result = patt.parse_string("sldkj lsdkj sldkj") - # even though the result prints in string-like form, it is actually a pyparsing ParseResults - print(type(result), result) # -> ['sldkj', 'lsdkj', 'sldkj'] - - # Use as_list() to create an actual list - result_list = result.as_list() - print(type(result_list), result_list) # -> ['sldkj', 'lsdkj', 'sldkj'] - """ - return [ - res.as_list() if isinstance(res, ParseResults) else res - for res in self._toklist - ] - - def as_dict(self) -> dict: - """ - Returns the named parse results as a nested dictionary. - - Example:: - - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parse_string('12/31/1999') - print(type(result), repr(result)) # -> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) - - result_dict = result.as_dict() - print(type(result_dict), repr(result_dict)) # -> {'day': '1999', 'year': '12', 'month': '31'} - - # even though a ParseResults supports dict-like access, sometime you just need to have a dict - import json - print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable - print(json.dumps(result.as_dict())) # -> {"month": "31", "day": "1999", "year": "12"} - """ - - def to_item(obj): - if isinstance(obj, ParseResults): - return obj.as_dict() if obj.haskeys() else [to_item(v) for v in obj] - else: - return obj - - return dict((k, to_item(v)) for k, v in self.items()) - - def copy(self) -> "ParseResults": - """ - Returns a new shallow copy of a :class:`ParseResults` object. `ParseResults` - items contained within the source are shared with the copy. Use - :class:`ParseResults.deepcopy()` to create a copy with its own separate - content values. - """ - ret = ParseResults(self._toklist) - ret._tokdict = self._tokdict.copy() - ret._parent = self._parent - ret._all_names |= self._all_names - ret._name = self._name - return ret - - def deepcopy(self) -> "ParseResults": - """ - Returns a new deep copy of a :class:`ParseResults` object. - """ - ret = self.copy() - # replace values with copies if they are of known mutable types - for i, obj in enumerate(self._toklist): - if isinstance(obj, ParseResults): - self._toklist[i] = obj.deepcopy() - elif isinstance(obj, (str, bytes)): - pass - elif isinstance(obj, MutableMapping): - self._toklist[i] = dest = type(obj)() - for k, v in obj.items(): - dest[k] = v.deepcopy() if isinstance(v, ParseResults) else v - elif isinstance(obj, Container): - self._toklist[i] = type(obj)( - v.deepcopy() if isinstance(v, ParseResults) else v for v in obj - ) - return ret - - def get_name(self): - r""" - Returns the results name for this token expression. Useful when several - different expressions might match at a particular location. - - Example:: - - integer = Word(nums) - ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d") - house_number_expr = Suppress('#') + Word(nums, alphanums) - user_data = (Group(house_number_expr)("house_number") - | Group(ssn_expr)("ssn") - | Group(integer)("age")) - user_info = user_data[1, ...] - - result = user_info.parse_string("22 111-22-3333 #221B") - for item in result: - print(item.get_name(), ':', item[0]) - - prints:: - - age : 22 - ssn : 111-22-3333 - house_number : 221B - """ - if self._name: - return self._name - elif self._parent: - par: "ParseResults" = self._parent - parent_tokdict_items = par._tokdict.items() - return next( - ( - k - for k, vlist in parent_tokdict_items - for v, loc in vlist - if v is self - ), - None, - ) - elif ( - len(self) == 1 - and len(self._tokdict) == 1 - and next(iter(self._tokdict.values()))[0][1] in (0, -1) - ): - return next(iter(self._tokdict.keys())) - else: - return None - - def dump(self, indent="", full=True, include_list=True, _depth=0) -> str: - """ - Diagnostic method for listing out the contents of - a :class:`ParseResults`. Accepts an optional ``indent`` argument so - that this string can be embedded in a nested display of other data. - - Example:: - - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parse_string('1999/12/31') - print(result.dump()) - - prints:: - - ['1999', '/', '12', '/', '31'] - - day: '31' - - month: '12' - - year: '1999' - """ - out = [] - NL = "\n" - out.append(indent + str(self.as_list()) if include_list else "") - - if not full: - return "".join(out) - - if self.haskeys(): - items = sorted((str(k), v) for k, v in self.items()) - for k, v in items: - if out: - out.append(NL) - out.append(f"{indent}{(' ' * _depth)}- {k}: ") - if not isinstance(v, ParseResults): - out.append(repr(v)) - continue - - if not v: - out.append(str(v)) - continue - - out.append( - v.dump( - indent=indent, - full=full, - include_list=include_list, - _depth=_depth + 1, - ) - ) - if not any(isinstance(vv, ParseResults) for vv in self): - return "".join(out) - - v = self - incr = " " - nl = "\n" - for i, vv in enumerate(v): - if isinstance(vv, ParseResults): - vv_dump = vv.dump( - indent=indent, - full=full, - include_list=include_list, - _depth=_depth + 1, - ) - out.append( - f"{nl}{indent}{incr * _depth}[{i}]:{nl}{indent}{incr * (_depth + 1)}{vv_dump}" - ) - else: - out.append( - f"{nl}{indent}{incr * _depth}[{i}]:{nl}{indent}{incr * (_depth + 1)}{vv}" - ) - - return "".join(out) - - def pprint(self, *args, **kwargs): - """ - Pretty-printer for parsed results as a list, using the - `pprint `_ module. - Accepts additional positional or keyword args as defined for - `pprint.pprint `_ . - - Example:: - - ident = Word(alphas, alphanums) - num = Word(nums) - func = Forward() - term = ident | num | Group('(' + func + ')') - func <<= ident + Group(Optional(DelimitedList(term))) - result = func.parse_string("fna a,b,(fnb c,d,200),100") - result.pprint(width=40) - - prints:: - - ['fna', - ['a', - 'b', - ['(', 'fnb', ['c', 'd', '200'], ')'], - '100']] - """ - pprint.pprint(self.as_list(), *args, **kwargs) - - # add support for pickle protocol - def __getstate__(self): - return ( - self._toklist, - ( - self._tokdict.copy(), - None, - self._all_names, - self._name, - ), - ) - - def __setstate__(self, state): - self._toklist, (self._tokdict, par, inAccumNames, self._name) = state - self._all_names = set(inAccumNames) - self._parent = None - - def __getnewargs__(self): - return self._toklist, self._name - - def __dir__(self): - return dir(type(self)) + list(self.keys()) - - @classmethod - def from_dict(cls, other, name=None) -> "ParseResults": - """ - Helper classmethod to construct a ``ParseResults`` from a ``dict``, preserving the - name-value relations as results names. If an optional ``name`` argument is - given, a nested ``ParseResults`` will be returned. - """ - - def is_iterable(obj): - try: - iter(obj) - except Exception: - return False - # str's are iterable, but in pyparsing, we don't want to iterate over them - else: - return not isinstance(obj, str_type) - - ret = cls([]) - for k, v in other.items(): - if isinstance(v, Mapping): - ret += cls.from_dict(v, name=k) - else: - ret += cls([v], name=k, asList=is_iterable(v)) - if name is not None: - ret = cls([ret], name=name) - return ret - - asList = as_list - """Deprecated - use :class:`as_list`""" - asDict = as_dict - """Deprecated - use :class:`as_dict`""" - getName = get_name - """Deprecated - use :class:`get_name`""" - - -MutableMapping.register(ParseResults) -MutableSequence.register(ParseResults) diff --git a/src/pip/_vendor/pyparsing/testing.py b/src/pip/_vendor/pyparsing/testing.py deleted file mode 100644 index 5654d47d62d..00000000000 --- a/src/pip/_vendor/pyparsing/testing.py +++ /dev/null @@ -1,345 +0,0 @@ -# testing.py - -from contextlib import contextmanager -import re -import typing - - -from .core import ( - ParserElement, - ParseException, - Keyword, - __diag__, - __compat__, -) - - -class pyparsing_test: - """ - namespace class for classes useful in writing unit tests - """ - - class reset_pyparsing_context: - """ - Context manager to be used when writing unit tests that modify pyparsing config values: - - packrat parsing - - bounded recursion parsing - - default whitespace characters. - - default keyword characters - - literal string auto-conversion class - - __diag__ settings - - Example:: - - with reset_pyparsing_context(): - # test that literals used to construct a grammar are automatically suppressed - ParserElement.inlineLiteralsUsing(Suppress) - - term = Word(alphas) | Word(nums) - group = Group('(' + term[...] + ')') - - # assert that the '()' characters are not included in the parsed tokens - self.assertParseAndCheckList(group, "(abc 123 def)", ['abc', '123', 'def']) - - # after exiting context manager, literals are converted to Literal expressions again - """ - - def __init__(self): - self._save_context = {} - - def save(self): - self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS - self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS - - self._save_context["literal_string_class"] = ( - ParserElement._literalStringClass - ) - - self._save_context["verbose_stacktrace"] = ParserElement.verbose_stacktrace - - self._save_context["packrat_enabled"] = ParserElement._packratEnabled - if ParserElement._packratEnabled: - self._save_context["packrat_cache_size"] = ( - ParserElement.packrat_cache.size - ) - else: - self._save_context["packrat_cache_size"] = None - self._save_context["packrat_parse"] = ParserElement._parse - self._save_context["recursion_enabled"] = ( - ParserElement._left_recursion_enabled - ) - - self._save_context["__diag__"] = { - name: getattr(__diag__, name) for name in __diag__._all_names - } - - self._save_context["__compat__"] = { - "collect_all_And_tokens": __compat__.collect_all_And_tokens - } - - return self - - def restore(self): - # reset pyparsing global state - if ( - ParserElement.DEFAULT_WHITE_CHARS - != self._save_context["default_whitespace"] - ): - ParserElement.set_default_whitespace_chars( - self._save_context["default_whitespace"] - ) - - ParserElement.verbose_stacktrace = self._save_context["verbose_stacktrace"] - - Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"] - ParserElement.inlineLiteralsUsing( - self._save_context["literal_string_class"] - ) - - for name, value in self._save_context["__diag__"].items(): - (__diag__.enable if value else __diag__.disable)(name) - - ParserElement._packratEnabled = False - if self._save_context["packrat_enabled"]: - ParserElement.enable_packrat(self._save_context["packrat_cache_size"]) - else: - ParserElement._parse = self._save_context["packrat_parse"] - ParserElement._left_recursion_enabled = self._save_context[ - "recursion_enabled" - ] - - __compat__.collect_all_And_tokens = self._save_context["__compat__"] - - return self - - def copy(self): - ret = type(self)() - ret._save_context.update(self._save_context) - return ret - - def __enter__(self): - return self.save() - - def __exit__(self, *args): - self.restore() - - class TestParseResultsAsserts: - """ - A mixin class to add parse results assertion methods to normal unittest.TestCase classes. - """ - - def assertParseResultsEquals( - self, result, expected_list=None, expected_dict=None, msg=None - ): - """ - Unit test assertion to compare a :class:`ParseResults` object with an optional ``expected_list``, - and compare any defined results names with an optional ``expected_dict``. - """ - if expected_list is not None: - self.assertEqual(expected_list, result.as_list(), msg=msg) - if expected_dict is not None: - self.assertEqual(expected_dict, result.as_dict(), msg=msg) - - def assertParseAndCheckList( - self, expr, test_string, expected_list, msg=None, verbose=True - ): - """ - Convenience wrapper assert to test a parser element and input string, and assert that - the resulting ``ParseResults.asList()`` is equal to the ``expected_list``. - """ - result = expr.parse_string(test_string, parse_all=True) - if verbose: - print(result.dump()) - else: - print(result.as_list()) - self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg) - - def assertParseAndCheckDict( - self, expr, test_string, expected_dict, msg=None, verbose=True - ): - """ - Convenience wrapper assert to test a parser element and input string, and assert that - the resulting ``ParseResults.asDict()`` is equal to the ``expected_dict``. - """ - result = expr.parse_string(test_string, parseAll=True) - if verbose: - print(result.dump()) - else: - print(result.as_list()) - self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg) - - def assertRunTestResults( - self, run_tests_report, expected_parse_results=None, msg=None - ): - """ - Unit test assertion to evaluate output of ``ParserElement.runTests()``. If a list of - list-dict tuples is given as the ``expected_parse_results`` argument, then these are zipped - with the report tuples returned by ``runTests`` and evaluated using ``assertParseResultsEquals``. - Finally, asserts that the overall ``runTests()`` success value is ``True``. - - :param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests - :param expected_parse_results (optional): [tuple(str, list, dict, Exception)] - """ - run_test_success, run_test_results = run_tests_report - - if expected_parse_results is None: - self.assertTrue( - run_test_success, msg=msg if msg is not None else "failed runTests" - ) - return - - merged = [ - (*rpt, expected) - for rpt, expected in zip(run_test_results, expected_parse_results) - ] - for test_string, result, expected in merged: - # expected should be a tuple containing a list and/or a dict or an exception, - # and optional failure message string - # an empty tuple will skip any result validation - fail_msg = next((exp for exp in expected if isinstance(exp, str)), None) - expected_exception = next( - ( - exp - for exp in expected - if isinstance(exp, type) and issubclass(exp, Exception) - ), - None, - ) - if expected_exception is not None: - with self.assertRaises( - expected_exception=expected_exception, msg=fail_msg or msg - ): - if isinstance(result, Exception): - raise result - else: - expected_list = next( - (exp for exp in expected if isinstance(exp, list)), None - ) - expected_dict = next( - (exp for exp in expected if isinstance(exp, dict)), None - ) - if (expected_list, expected_dict) != (None, None): - self.assertParseResultsEquals( - result, - expected_list=expected_list, - expected_dict=expected_dict, - msg=fail_msg or msg, - ) - else: - # warning here maybe? - print(f"no validation for {test_string!r}") - - # do this last, in case some specific test results can be reported instead - self.assertTrue( - run_test_success, msg=msg if msg is not None else "failed runTests" - ) - - @contextmanager - def assertRaisesParseException( - self, exc_type=ParseException, expected_msg=None, msg=None - ): - if expected_msg is not None: - if isinstance(expected_msg, str): - expected_msg = re.escape(expected_msg) - with self.assertRaisesRegex(exc_type, expected_msg, msg=msg) as ctx: - yield ctx - - else: - with self.assertRaises(exc_type, msg=msg) as ctx: - yield ctx - - @staticmethod - def with_line_numbers( - s: str, - start_line: typing.Optional[int] = None, - end_line: typing.Optional[int] = None, - expand_tabs: bool = True, - eol_mark: str = "|", - mark_spaces: typing.Optional[str] = None, - mark_control: typing.Optional[str] = None, - ) -> str: - """ - Helpful method for debugging a parser - prints a string with line and column numbers. - (Line and column numbers are 1-based.) - - :param s: tuple(bool, str - string to be printed with line and column numbers - :param start_line: int - (optional) starting line number in s to print (default=1) - :param end_line: int - (optional) ending line number in s to print (default=len(s)) - :param expand_tabs: bool - (optional) expand tabs to spaces, to match the pyparsing default - :param eol_mark: str - (optional) string to mark the end of lines, helps visualize trailing spaces (default="|") - :param mark_spaces: str - (optional) special character to display in place of spaces - :param mark_control: str - (optional) convert non-printing control characters to a placeholding - character; valid values: - - "unicode" - replaces control chars with Unicode symbols, such as "␍" and "␊" - - any single character string - replace control characters with given string - - None (default) - string is displayed as-is - - :return: str - input string with leading line numbers and column number headers - """ - if expand_tabs: - s = s.expandtabs() - if mark_control is not None: - mark_control = typing.cast(str, mark_control) - if mark_control == "unicode": - transtable_map = { - c: u for c, u in zip(range(0, 33), range(0x2400, 0x2433)) - } - transtable_map[127] = 0x2421 - tbl = str.maketrans(transtable_map) - eol_mark = "" - else: - ord_mark_control = ord(mark_control) - tbl = str.maketrans( - {c: ord_mark_control for c in list(range(0, 32)) + [127]} - ) - s = s.translate(tbl) - if mark_spaces is not None and mark_spaces != " ": - if mark_spaces == "unicode": - tbl = str.maketrans({9: 0x2409, 32: 0x2423}) - s = s.translate(tbl) - else: - s = s.replace(" ", mark_spaces) - if start_line is None: - start_line = 1 - if end_line is None: - end_line = len(s) - end_line = min(end_line, len(s)) - start_line = min(max(1, start_line), end_line) - - if mark_control != "unicode": - s_lines = s.splitlines()[start_line - 1 : end_line] - else: - s_lines = [line + "␊" for line in s.split("␊")[start_line - 1 : end_line]] - if not s_lines: - return "" - - lineno_width = len(str(end_line)) - max_line_len = max(len(line) for line in s_lines) - lead = " " * (lineno_width + 1) - if max_line_len >= 99: - header0 = ( - lead - + "".join( - f"{' ' * 99}{(i + 1) % 100}" - for i in range(max(max_line_len // 100, 1)) - ) - + "\n" - ) - else: - header0 = "" - header1 = ( - header0 - + lead - + "".join(f" {(i + 1) % 10}" for i in range(-(-max_line_len // 10))) - + "\n" - ) - header2 = lead + "1234567890" * (-(-max_line_len // 10)) + "\n" - return ( - header1 - + header2 - + "\n".join( - f"{i:{lineno_width}d}:{line}{eol_mark}" - for i, line in enumerate(s_lines, start=start_line) - ) - + "\n" - ) diff --git a/src/pip/_vendor/pyparsing/unicode.py b/src/pip/_vendor/pyparsing/unicode.py deleted file mode 100644 index e0b7b4ccb9a..00000000000 --- a/src/pip/_vendor/pyparsing/unicode.py +++ /dev/null @@ -1,354 +0,0 @@ -# unicode.py - -import sys -from itertools import filterfalse -from typing import List, Tuple, Union - - -class _lazyclassproperty: - def __init__(self, fn): - self.fn = fn - self.__doc__ = fn.__doc__ - self.__name__ = fn.__name__ - - def __get__(self, obj, cls): - if cls is None: - cls = type(obj) - if not hasattr(cls, "_intern") or any( - cls._intern is getattr(superclass, "_intern", []) - for superclass in cls.__mro__[1:] - ): - cls._intern = {} - attrname = self.fn.__name__ - if attrname not in cls._intern: - cls._intern[attrname] = self.fn(cls) - return cls._intern[attrname] - - -UnicodeRangeList = List[Union[Tuple[int, int], Tuple[int]]] - - -class unicode_set: - """ - A set of Unicode characters, for language-specific strings for - ``alphas``, ``nums``, ``alphanums``, and ``printables``. - A unicode_set is defined by a list of ranges in the Unicode character - set, in a class attribute ``_ranges``. Ranges can be specified using - 2-tuples or a 1-tuple, such as:: - - _ranges = [ - (0x0020, 0x007e), - (0x00a0, 0x00ff), - (0x0100,), - ] - - Ranges are left- and right-inclusive. A 1-tuple of (x,) is treated as (x, x). - - A unicode set can also be defined using multiple inheritance of other unicode sets:: - - class CJK(Chinese, Japanese, Korean): - pass - """ - - _ranges: UnicodeRangeList = [] - - @_lazyclassproperty - def _chars_for_ranges(cls): - ret = [] - for cc in cls.__mro__: - if cc is unicode_set: - break - for rr in getattr(cc, "_ranges", ()): - ret.extend(range(rr[0], rr[-1] + 1)) - return [chr(c) for c in sorted(set(ret))] - - @_lazyclassproperty - def printables(cls): - """all non-whitespace characters in this range""" - return "".join(filterfalse(str.isspace, cls._chars_for_ranges)) - - @_lazyclassproperty - def alphas(cls): - """all alphabetic characters in this range""" - return "".join(filter(str.isalpha, cls._chars_for_ranges)) - - @_lazyclassproperty - def nums(cls): - """all numeric digit characters in this range""" - return "".join(filter(str.isdigit, cls._chars_for_ranges)) - - @_lazyclassproperty - def alphanums(cls): - """all alphanumeric characters in this range""" - return cls.alphas + cls.nums - - @_lazyclassproperty - def identchars(cls): - """all characters in this range that are valid identifier characters, plus underscore '_'""" - return "".join( - sorted( - set( - "".join(filter(str.isidentifier, cls._chars_for_ranges)) - + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµº" - + "ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ" - + "_" - ) - ) - ) - - @_lazyclassproperty - def identbodychars(cls): - """ - all characters in this range that are valid identifier body characters, - plus the digits 0-9, and · (Unicode MIDDLE DOT) - """ - identifier_chars = set( - c for c in cls._chars_for_ranges if ("_" + c).isidentifier() - ) - return "".join(sorted(identifier_chars | set(cls.identchars + "0123456789·"))) - - @_lazyclassproperty - def identifier(cls): - """ - a pyparsing Word expression for an identifier using this range's definitions for - identchars and identbodychars - """ - from pip._vendor.pyparsing import Word - - return Word(cls.identchars, cls.identbodychars) - - -class pyparsing_unicode(unicode_set): - """ - A namespace class for defining common language unicode_sets. - """ - - # fmt: off - - # define ranges in language character sets - _ranges: UnicodeRangeList = [ - (0x0020, sys.maxunicode), - ] - - class BasicMultilingualPlane(unicode_set): - """Unicode set for the Basic Multilingual Plane""" - _ranges: UnicodeRangeList = [ - (0x0020, 0xFFFF), - ] - - class Latin1(unicode_set): - """Unicode set for Latin-1 Unicode Character Range""" - _ranges: UnicodeRangeList = [ - (0x0020, 0x007E), - (0x00A0, 0x00FF), - ] - - class LatinA(unicode_set): - """Unicode set for Latin-A Unicode Character Range""" - _ranges: UnicodeRangeList = [ - (0x0100, 0x017F), - ] - - class LatinB(unicode_set): - """Unicode set for Latin-B Unicode Character Range""" - _ranges: UnicodeRangeList = [ - (0x0180, 0x024F), - ] - - class Greek(unicode_set): - """Unicode set for Greek Unicode Character Ranges""" - _ranges: UnicodeRangeList = [ - (0x0342, 0x0345), - (0x0370, 0x0377), - (0x037A, 0x037F), - (0x0384, 0x038A), - (0x038C,), - (0x038E, 0x03A1), - (0x03A3, 0x03E1), - (0x03F0, 0x03FF), - (0x1D26, 0x1D2A), - (0x1D5E,), - (0x1D60,), - (0x1D66, 0x1D6A), - (0x1F00, 0x1F15), - (0x1F18, 0x1F1D), - (0x1F20, 0x1F45), - (0x1F48, 0x1F4D), - (0x1F50, 0x1F57), - (0x1F59,), - (0x1F5B,), - (0x1F5D,), - (0x1F5F, 0x1F7D), - (0x1F80, 0x1FB4), - (0x1FB6, 0x1FC4), - (0x1FC6, 0x1FD3), - (0x1FD6, 0x1FDB), - (0x1FDD, 0x1FEF), - (0x1FF2, 0x1FF4), - (0x1FF6, 0x1FFE), - (0x2129,), - (0x2719, 0x271A), - (0xAB65,), - (0x10140, 0x1018D), - (0x101A0,), - (0x1D200, 0x1D245), - (0x1F7A1, 0x1F7A7), - ] - - class Cyrillic(unicode_set): - """Unicode set for Cyrillic Unicode Character Range""" - _ranges: UnicodeRangeList = [ - (0x0400, 0x052F), - (0x1C80, 0x1C88), - (0x1D2B,), - (0x1D78,), - (0x2DE0, 0x2DFF), - (0xA640, 0xA672), - (0xA674, 0xA69F), - (0xFE2E, 0xFE2F), - ] - - class Chinese(unicode_set): - """Unicode set for Chinese Unicode Character Range""" - _ranges: UnicodeRangeList = [ - (0x2E80, 0x2E99), - (0x2E9B, 0x2EF3), - (0x31C0, 0x31E3), - (0x3400, 0x4DB5), - (0x4E00, 0x9FEF), - (0xA700, 0xA707), - (0xF900, 0xFA6D), - (0xFA70, 0xFAD9), - (0x16FE2, 0x16FE3), - (0x1F210, 0x1F212), - (0x1F214, 0x1F23B), - (0x1F240, 0x1F248), - (0x20000, 0x2A6D6), - (0x2A700, 0x2B734), - (0x2B740, 0x2B81D), - (0x2B820, 0x2CEA1), - (0x2CEB0, 0x2EBE0), - (0x2F800, 0x2FA1D), - ] - - class Japanese(unicode_set): - """Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges""" - - class Kanji(unicode_set): - "Unicode set for Kanji Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x4E00, 0x9FBF), - (0x3000, 0x303F), - ] - - class Hiragana(unicode_set): - """Unicode set for Hiragana Unicode Character Range""" - _ranges: UnicodeRangeList = [ - (0x3041, 0x3096), - (0x3099, 0x30A0), - (0x30FC,), - (0xFF70,), - (0x1B001,), - (0x1B150, 0x1B152), - (0x1F200,), - ] - - class Katakana(unicode_set): - """Unicode set for Katakana Unicode Character Range""" - _ranges: UnicodeRangeList = [ - (0x3099, 0x309C), - (0x30A0, 0x30FF), - (0x31F0, 0x31FF), - (0x32D0, 0x32FE), - (0xFF65, 0xFF9F), - (0x1B000,), - (0x1B164, 0x1B167), - (0x1F201, 0x1F202), - (0x1F213,), - ] - - 漢字 = Kanji - カタカナ = Katakana - ひらがな = Hiragana - - _ranges = ( - Kanji._ranges - + Hiragana._ranges - + Katakana._ranges - ) - - class Hangul(unicode_set): - """Unicode set for Hangul (Korean) Unicode Character Range""" - _ranges: UnicodeRangeList = [ - (0x1100, 0x11FF), - (0x302E, 0x302F), - (0x3131, 0x318E), - (0x3200, 0x321C), - (0x3260, 0x327B), - (0x327E,), - (0xA960, 0xA97C), - (0xAC00, 0xD7A3), - (0xD7B0, 0xD7C6), - (0xD7CB, 0xD7FB), - (0xFFA0, 0xFFBE), - (0xFFC2, 0xFFC7), - (0xFFCA, 0xFFCF), - (0xFFD2, 0xFFD7), - (0xFFDA, 0xFFDC), - ] - - Korean = Hangul - - class CJK(Chinese, Japanese, Hangul): - """Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range""" - - class Thai(unicode_set): - """Unicode set for Thai Unicode Character Range""" - _ranges: UnicodeRangeList = [ - (0x0E01, 0x0E3A), - (0x0E3F, 0x0E5B) - ] - - class Arabic(unicode_set): - """Unicode set for Arabic Unicode Character Range""" - _ranges: UnicodeRangeList = [ - (0x0600, 0x061B), - (0x061E, 0x06FF), - (0x0700, 0x077F), - ] - - class Hebrew(unicode_set): - """Unicode set for Hebrew Unicode Character Range""" - _ranges: UnicodeRangeList = [ - (0x0591, 0x05C7), - (0x05D0, 0x05EA), - (0x05EF, 0x05F4), - (0xFB1D, 0xFB36), - (0xFB38, 0xFB3C), - (0xFB3E,), - (0xFB40, 0xFB41), - (0xFB43, 0xFB44), - (0xFB46, 0xFB4F), - ] - - class Devanagari(unicode_set): - """Unicode set for Devanagari Unicode Character Range""" - _ranges: UnicodeRangeList = [ - (0x0900, 0x097F), - (0xA8E0, 0xA8FF) - ] - - BMP = BasicMultilingualPlane - - # add language identifiers using language Unicode - العربية = Arabic - 中文 = Chinese - кириллица = Cyrillic - Ελληνικά = Greek - עִברִית = Hebrew - 日本語 = Japanese - 한국어 = Korean - ไทย = Thai - देवनागरी = Devanagari - - # fmt: on diff --git a/src/pip/_vendor/pyparsing/util.py b/src/pip/_vendor/pyparsing/util.py deleted file mode 100644 index 4ae018a9636..00000000000 --- a/src/pip/_vendor/pyparsing/util.py +++ /dev/null @@ -1,277 +0,0 @@ -# util.py -import inspect -import warnings -import types -import collections -import itertools -from functools import lru_cache, wraps -from typing import Callable, List, Union, Iterable, TypeVar, cast - -_bslash = chr(92) -C = TypeVar("C", bound=Callable) - - -class __config_flags: - """Internal class for defining compatibility and debugging flags""" - - _all_names: List[str] = [] - _fixed_names: List[str] = [] - _type_desc = "configuration" - - @classmethod - def _set(cls, dname, value): - if dname in cls._fixed_names: - warnings.warn( - f"{cls.__name__}.{dname} {cls._type_desc} is {str(getattr(cls, dname)).upper()}" - f" and cannot be overridden", - stacklevel=3, - ) - return - if dname in cls._all_names: - setattr(cls, dname, value) - else: - raise ValueError(f"no such {cls._type_desc} {dname!r}") - - enable = classmethod(lambda cls, name: cls._set(name, True)) - disable = classmethod(lambda cls, name: cls._set(name, False)) - - -@lru_cache(maxsize=128) -def col(loc: int, strg: str) -> int: - """ - Returns current column within a string, counting newlines as line separators. - The first column is number 1. - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See - :class:`ParserElement.parse_string` for more - information on parsing strings containing ```` s, and suggested - methods to maintain a consistent view of the parsed string, the parse - location, and line and column positions within the parsed string. - """ - s = strg - return 1 if 0 < loc < len(s) and s[loc - 1] == "\n" else loc - s.rfind("\n", 0, loc) - - -@lru_cache(maxsize=128) -def lineno(loc: int, strg: str) -> int: - """Returns current line number within a string, counting newlines as line separators. - The first line is number 1. - - Note - the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See :class:`ParserElement.parse_string` - for more information on parsing strings containing ```` s, and - suggested methods to maintain a consistent view of the parsed string, the - parse location, and line and column positions within the parsed string. - """ - return strg.count("\n", 0, loc) + 1 - - -@lru_cache(maxsize=128) -def line(loc: int, strg: str) -> str: - """ - Returns the line of text containing loc within a string, counting newlines as line separators. - """ - last_cr = strg.rfind("\n", 0, loc) - next_cr = strg.find("\n", loc) - return strg[last_cr + 1 : next_cr] if next_cr >= 0 else strg[last_cr + 1 :] - - -class _UnboundedCache: - def __init__(self): - cache = {} - cache_get = cache.get - self.not_in_cache = not_in_cache = object() - - def get(_, key): - return cache_get(key, not_in_cache) - - def set_(_, key, value): - cache[key] = value - - def clear(_): - cache.clear() - - self.size = None - self.get = types.MethodType(get, self) - self.set = types.MethodType(set_, self) - self.clear = types.MethodType(clear, self) - - -class _FifoCache: - def __init__(self, size): - self.not_in_cache = not_in_cache = object() - cache = {} - keyring = [object()] * size - cache_get = cache.get - cache_pop = cache.pop - keyiter = itertools.cycle(range(size)) - - def get(_, key): - return cache_get(key, not_in_cache) - - def set_(_, key, value): - cache[key] = value - i = next(keyiter) - cache_pop(keyring[i], None) - keyring[i] = key - - def clear(_): - cache.clear() - keyring[:] = [object()] * size - - self.size = size - self.get = types.MethodType(get, self) - self.set = types.MethodType(set_, self) - self.clear = types.MethodType(clear, self) - - -class LRUMemo: - """ - A memoizing mapping that retains `capacity` deleted items - - The memo tracks retained items by their access order; once `capacity` items - are retained, the least recently used item is discarded. - """ - - def __init__(self, capacity): - self._capacity = capacity - self._active = {} - self._memory = collections.OrderedDict() - - def __getitem__(self, key): - try: - return self._active[key] - except KeyError: - self._memory.move_to_end(key) - return self._memory[key] - - def __setitem__(self, key, value): - self._memory.pop(key, None) - self._active[key] = value - - def __delitem__(self, key): - try: - value = self._active.pop(key) - except KeyError: - pass - else: - while len(self._memory) >= self._capacity: - self._memory.popitem(last=False) - self._memory[key] = value - - def clear(self): - self._active.clear() - self._memory.clear() - - -class UnboundedMemo(dict): - """ - A memoizing mapping that retains all deleted items - """ - - def __delitem__(self, key): - pass - - -def _escape_regex_range_chars(s: str) -> str: - # escape these chars: ^-[] - for c in r"\^-[]": - s = s.replace(c, _bslash + c) - s = s.replace("\n", r"\n") - s = s.replace("\t", r"\t") - return str(s) - - -def _collapse_string_to_ranges( - s: Union[str, Iterable[str]], re_escape: bool = True -) -> str: - def is_consecutive(c): - c_int = ord(c) - is_consecutive.prev, prev = c_int, is_consecutive.prev - if c_int - prev > 1: - is_consecutive.value = next(is_consecutive.counter) - return is_consecutive.value - - is_consecutive.prev = 0 # type: ignore [attr-defined] - is_consecutive.counter = itertools.count() # type: ignore [attr-defined] - is_consecutive.value = -1 # type: ignore [attr-defined] - - def escape_re_range_char(c): - return "\\" + c if c in r"\^-][" else c - - def no_escape_re_range_char(c): - return c - - if not re_escape: - escape_re_range_char = no_escape_re_range_char - - ret = [] - s = "".join(sorted(set(s))) - if len(s) > 3: - for _, chars in itertools.groupby(s, key=is_consecutive): - first = last = next(chars) - last = collections.deque( - itertools.chain(iter([last]), chars), maxlen=1 - ).pop() - if first == last: - ret.append(escape_re_range_char(first)) - else: - sep = "" if ord(last) == ord(first) + 1 else "-" - ret.append( - f"{escape_re_range_char(first)}{sep}{escape_re_range_char(last)}" - ) - else: - ret = [escape_re_range_char(c) for c in s] - - return "".join(ret) - - -def _flatten(ll: list) -> list: - ret = [] - for i in ll: - if isinstance(i, list): - ret.extend(_flatten(i)) - else: - ret.append(i) - return ret - - -def replaced_by_pep8(compat_name: str, fn: C) -> C: - # In a future version, uncomment the code in the internal _inner() functions - # to begin emitting DeprecationWarnings. - - # Unwrap staticmethod/classmethod - fn = getattr(fn, "__func__", fn) - - # (Presence of 'self' arg in signature is used by explain_exception() methods, so we take - # some extra steps to add it if present in decorated function.) - if "self" == list(inspect.signature(fn).parameters)[0]: - - @wraps(fn) - def _inner(self, *args, **kwargs): - # warnings.warn( - # f"Deprecated - use {fn.__name__}", DeprecationWarning, stacklevel=2 - # ) - return fn(self, *args, **kwargs) - - else: - - @wraps(fn) - def _inner(*args, **kwargs): - # warnings.warn( - # f"Deprecated - use {fn.__name__}", DeprecationWarning, stacklevel=2 - # ) - return fn(*args, **kwargs) - - _inner.__doc__ = f"""Deprecated - use :class:`{fn.__name__}`""" - _inner.__name__ = compat_name - _inner.__annotations__ = fn.__annotations__ - if isinstance(fn, types.FunctionType): - _inner.__kwdefaults__ = fn.__kwdefaults__ - elif isinstance(fn, type) and hasattr(fn, "__init__"): - _inner.__kwdefaults__ = fn.__init__.__kwdefaults__ - else: - _inner.__kwdefaults__ = None - _inner.__qualname__ = fn.__qualname__ - return cast(C, _inner) diff --git a/src/pip/_vendor/vendor.txt b/src/pip/_vendor/vendor.txt index c892f8d4ad0..75eeb1e0658 100644 --- a/src/pip/_vendor/vendor.txt +++ b/src/pip/_vendor/vendor.txt @@ -5,7 +5,6 @@ distro==1.9.0 msgpack==1.0.8 packaging==23.2 platformdirs==4.2.1 -pyparsing==3.1.2 pyproject-hooks==1.0.0 requests==2.31.0 certifi==2024.2.2 From 372c6163ebfe32a2c000b178f54a9dbcbe46449b Mon Sep 17 00:00:00 2001 From: Pradyun Gedam Date: Fri, 6 Oct 2023 19:40:27 +0100 Subject: [PATCH 076/113] Explicitly keep track of canonical extra names with `pkg_resources` This ensures that the Distribution only exposes canonical forms of the extras, papering over the lack of PEP 685 support in the vendored version of `pkg_resources`. --- src/pip/_internal/metadata/pkg_resources.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/src/pip/_internal/metadata/pkg_resources.py b/src/pip/_internal/metadata/pkg_resources.py index 57a9fdba1bc..5e6ffb0bf63 100644 --- a/src/pip/_internal/metadata/pkg_resources.py +++ b/src/pip/_internal/metadata/pkg_resources.py @@ -75,6 +75,9 @@ def run_script(self, script_name: str, namespace: str) -> None: class Distribution(BaseDistribution): def __init__(self, dist: pkg_resources.Distribution) -> None: self._dist = dist + self._extra_mapping = { + canonicalize_name(extra): extra for extra in self._dist.extras + } @classmethod def from_directory(cls, directory: str) -> BaseDistribution: @@ -215,16 +218,21 @@ def _metadata_impl(self) -> email.message.Message: return feed_parser.close() def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]: - if extras: # pkg_resources raises on invalid extras, so we sanitize. - extras = frozenset(pkg_resources.safe_extra(e) for e in extras) - extras = extras.intersection(self._dist.extras) + if extras: + sanitised_extras = {canonicalize_name(e) for e in extras} & set( + self._extra_mapping + ) + extras = [ + self._extra_mapping[canonicalize_name(extra)] + for extra in sanitised_extras + ] return self._dist.requires(extras) def iter_provided_extras(self) -> Iterable[str]: - return self._dist.extras + return self._extra_mapping.keys() def is_extra_provided(self, extra: str) -> bool: - return pkg_resources.safe_extra(extra) in self._dist.extras + return canonicalize_name(extra) in self._extra_mapping class Environment(BaseEnvironment): From e38c2b95d3a6cac8f51cd666c6b08e87b71233b1 Mon Sep 17 00:00:00 2001 From: Pradyun Gedam Date: Fri, 6 Oct 2023 19:58:09 +0100 Subject: [PATCH 077/113] Compare `Requirement` objects rather than requirement strings This ensures that the comparision does not fail due to mismatching extra normalisation. --- tests/unit/metadata/test_metadata_pkg_resources.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/unit/metadata/test_metadata_pkg_resources.py b/tests/unit/metadata/test_metadata_pkg_resources.py index 69efa209fdf..ee1b22003ba 100644 --- a/tests/unit/metadata/test_metadata_pkg_resources.py +++ b/tests/unit/metadata/test_metadata_pkg_resources.py @@ -4,6 +4,7 @@ from unittest import mock import pytest +from pip._vendor.packaging.requirements import Requirement from pip._vendor.packaging.specifiers import SpecifierSet from pip._vendor.packaging.version import parse as parse_version @@ -82,7 +83,7 @@ def test_wheel_metadata_works() -> None: name = "simple" version = "0.1.0" require_a = "a==1.0" - require_b = 'b==1.1; extra == "also-b"' + require_b = 'b==1.1; extra == "also_b"' requires = [require_a, require_b, 'c==1.2; extra == "also_c"'] extras = ["also_b", "also_c"] requires_python = ">=3" @@ -108,8 +109,8 @@ def test_wheel_metadata_works() -> None: assert parse_version(version) == dist.version assert set(extras) == set(dist.iter_provided_extras()) assert [require_a] == [str(r) for r in dist.iter_dependencies()] - assert [require_a, require_b] == [ - str(r) for r in dist.iter_dependencies(["also_b"]) + assert [Requirement(require_a), Requirement(require_b)] == [ + Requirement(str(r)) for r in dist.iter_dependencies(["also_b"]) ] assert metadata.as_string() == dist.metadata.as_string() assert SpecifierSet(requires_python) == dist.requires_python From 908e9130205ef7027dd6a095718ce7bfee58b461 Mon Sep 17 00:00:00 2001 From: Pradyun Gedam Date: Fri, 6 Oct 2023 19:59:17 +0100 Subject: [PATCH 078/113] Do not normalise `iter_provided_extras` from pkg_resources This is explicitly checked for in the test suite and it isn't strictly necessary to ensure the correct behaviour. This does expose that the test mock objects are incomplete so those get a missing `extra` attribute added. --- src/pip/_internal/metadata/pkg_resources.py | 2 +- tests/unit/metadata/test_metadata_pkg_resources.py | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/pip/_internal/metadata/pkg_resources.py b/src/pip/_internal/metadata/pkg_resources.py index 5e6ffb0bf63..783f901fe9d 100644 --- a/src/pip/_internal/metadata/pkg_resources.py +++ b/src/pip/_internal/metadata/pkg_resources.py @@ -229,7 +229,7 @@ def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requiremen return self._dist.requires(extras) def iter_provided_extras(self) -> Iterable[str]: - return self._extra_mapping.keys() + return self._dist.extras def is_extra_provided(self, extra: str) -> bool: return canonicalize_name(extra) in self._extra_mapping diff --git a/tests/unit/metadata/test_metadata_pkg_resources.py b/tests/unit/metadata/test_metadata_pkg_resources.py index ee1b22003ba..7795d3e982a 100644 --- a/tests/unit/metadata/test_metadata_pkg_resources.py +++ b/tests/unit/metadata/test_metadata_pkg_resources.py @@ -39,17 +39,17 @@ def require(self, name: str) -> None: workingset = _MockWorkingSet( ( - mock.Mock(test_name="global", project_name="global"), - mock.Mock(test_name="editable", project_name="editable"), - mock.Mock(test_name="normal", project_name="normal"), - mock.Mock(test_name="user", project_name="user"), + mock.Mock(test_name="global", project_name="global", extras=[]), + mock.Mock(test_name="editable", project_name="editable", extras=[]), + mock.Mock(test_name="normal", project_name="normal", extras=[]), + mock.Mock(test_name="user", project_name="user", extras=[]), ) ) workingset_stdlib = _MockWorkingSet( ( - mock.Mock(test_name="normal", project_name="argparse"), - mock.Mock(test_name="normal", project_name="wsgiref"), + mock.Mock(test_name="normal", project_name="argparse", extras=[]), + mock.Mock(test_name="normal", project_name="wsgiref", extras=[]), ) ) From 5cc540be94dbccfaaa3c6b458dd3bd7c0877ce65 Mon Sep 17 00:00:00 2001 From: Pradyun Gedam Date: Sat, 7 Oct 2023 00:55:48 +0100 Subject: [PATCH 079/113] Use extras directly from metadata when using `pkg_resources` Instead of relying on `pkg_resources`'s internal data structure, use the metadata has to be constructed regardless to lookup the information about relevant extras. --- src/pip/_internal/metadata/pkg_resources.py | 25 ++++++++++++------- .../metadata/test_metadata_pkg_resources.py | 12 ++++----- 2 files changed, 22 insertions(+), 15 deletions(-) diff --git a/src/pip/_internal/metadata/pkg_resources.py b/src/pip/_internal/metadata/pkg_resources.py index 783f901fe9d..094c1737e66 100644 --- a/src/pip/_internal/metadata/pkg_resources.py +++ b/src/pip/_internal/metadata/pkg_resources.py @@ -75,9 +75,19 @@ def run_script(self, script_name: str, namespace: str) -> None: class Distribution(BaseDistribution): def __init__(self, dist: pkg_resources.Distribution) -> None: self._dist = dist - self._extra_mapping = { - canonicalize_name(extra): extra for extra in self._dist.extras - } + # This is populated lazily, to avoid loading metadata for all possible + # distributions eagerly. + self.__extra_mapping: Optional[Mapping[NormalizedName, str]] = None + + @property + def _extra_mapping(self) -> Mapping[NormalizedName, str]: + if self.__extra_mapping is None: + self.__extra_mapping = { + canonicalize_name(extra): pkg_resources.safe_extra(extra) + for extra in self.metadata.get_all("Provides-Extra", []) + } + + return self.__extra_mapping @classmethod def from_directory(cls, directory: str) -> BaseDistribution: @@ -219,13 +229,10 @@ def _metadata_impl(self) -> email.message.Message: def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]: if extras: - sanitised_extras = {canonicalize_name(e) for e in extras} & set( - self._extra_mapping + relevant_extras = set(self._extra_mapping) & set( + map(canonicalize_name, extras) ) - extras = [ - self._extra_mapping[canonicalize_name(extra)] - for extra in sanitised_extras - ] + extras = [self._extra_mapping[extra] for extra in relevant_extras] return self._dist.requires(extras) def iter_provided_extras(self) -> Iterable[str]: diff --git a/tests/unit/metadata/test_metadata_pkg_resources.py b/tests/unit/metadata/test_metadata_pkg_resources.py index 7795d3e982a..ee1b22003ba 100644 --- a/tests/unit/metadata/test_metadata_pkg_resources.py +++ b/tests/unit/metadata/test_metadata_pkg_resources.py @@ -39,17 +39,17 @@ def require(self, name: str) -> None: workingset = _MockWorkingSet( ( - mock.Mock(test_name="global", project_name="global", extras=[]), - mock.Mock(test_name="editable", project_name="editable", extras=[]), - mock.Mock(test_name="normal", project_name="normal", extras=[]), - mock.Mock(test_name="user", project_name="user", extras=[]), + mock.Mock(test_name="global", project_name="global"), + mock.Mock(test_name="editable", project_name="editable"), + mock.Mock(test_name="normal", project_name="normal"), + mock.Mock(test_name="user", project_name="user"), ) ) workingset_stdlib = _MockWorkingSet( ( - mock.Mock(test_name="normal", project_name="argparse", extras=[]), - mock.Mock(test_name="normal", project_name="wsgiref", extras=[]), + mock.Mock(test_name="normal", project_name="argparse"), + mock.Mock(test_name="normal", project_name="wsgiref"), ) ) From 8d22e803bdf483c1ffde1ae7cd163d9ea4154bff Mon Sep 17 00:00:00 2001 From: Pradyun Gedam Date: Sat, 7 Oct 2023 00:47:26 +0100 Subject: [PATCH 080/113] Implement PEP 685 on distribution objects directly This uses normalised names across the board for extras, with comparisions outside this context relying on `packaging`'s support for the corresponding comparisions. --- src/pip/_internal/metadata/base.py | 17 ++---- .../_internal/metadata/importlib/_dists.py | 13 ++--- src/pip/_internal/metadata/pkg_resources.py | 20 ++++--- .../resolution/resolvelib/candidates.py | 57 ++++--------------- .../metadata/test_metadata_pkg_resources.py | 3 +- 5 files changed, 36 insertions(+), 74 deletions(-) diff --git a/src/pip/_internal/metadata/base.py b/src/pip/_internal/metadata/base.py index 843a4e60aeb..ac585c717e6 100644 --- a/src/pip/_internal/metadata/base.py +++ b/src/pip/_internal/metadata/base.py @@ -441,24 +441,15 @@ def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requiremen """ raise NotImplementedError() - def iter_provided_extras(self) -> Iterable[str]: + def iter_provided_extras(self) -> Iterable[NormalizedName]: """Extras provided by this distribution. For modern .dist-info distributions, this is the collection of "Provides-Extra:" entries in distribution metadata. - The return value of this function is not particularly useful other than - display purposes due to backward compatibility issues and the extra - names being poorly normalized prior to PEP 685. If you want to perform - logic operations on extras, use :func:`is_extra_provided` instead. - """ - raise NotImplementedError() - - def is_extra_provided(self, extra: str) -> bool: - """Check whether an extra is provided by this distribution. - - This is needed mostly for compatibility issues with pkg_resources not - following the extra normalization rules defined in PEP 685. + The return value of this function is expected to be normalised names, + per PEP 685, with the returned value being handled appropriately by + `iter_dependencies`. """ raise NotImplementedError() diff --git a/src/pip/_internal/metadata/importlib/_dists.py b/src/pip/_internal/metadata/importlib/_dists.py index 5e6a0345621..94a13a280d3 100644 --- a/src/pip/_internal/metadata/importlib/_dists.py +++ b/src/pip/_internal/metadata/importlib/_dists.py @@ -204,14 +204,11 @@ def _metadata_impl(self) -> email.message.Message: # until upstream can improve the protocol. (python/cpython#94952) return cast(email.message.Message, self._dist.metadata) - def iter_provided_extras(self) -> Iterable[str]: - return self.metadata.get_all("Provides-Extra", []) - - def is_extra_provided(self, extra: str) -> bool: - return any( - canonicalize_name(provided_extra) == canonicalize_name(extra) - for provided_extra in self.metadata.get_all("Provides-Extra", []) - ) + def iter_provided_extras(self) -> Iterable[NormalizedName]: + return [ + canonicalize_name(extra) + for extra in self.metadata.get_all("Provides-Extra", []) + ] def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]: contexts: Sequence[Dict[str, str]] = [{"extra": e} for e in extras] diff --git a/src/pip/_internal/metadata/pkg_resources.py b/src/pip/_internal/metadata/pkg_resources.py index 094c1737e66..3786b7cd394 100644 --- a/src/pip/_internal/metadata/pkg_resources.py +++ b/src/pip/_internal/metadata/pkg_resources.py @@ -3,7 +3,16 @@ import logging import os import zipfile -from typing import Collection, Iterable, Iterator, List, Mapping, NamedTuple, Optional +from typing import ( + Collection, + Iterable, + Iterator, + List, + Mapping, + NamedTuple, + Optional, + cast, +) from pip._vendor import pkg_resources from pip._vendor.packaging.requirements import Requirement @@ -83,7 +92,7 @@ def __init__(self, dist: pkg_resources.Distribution) -> None: def _extra_mapping(self) -> Mapping[NormalizedName, str]: if self.__extra_mapping is None: self.__extra_mapping = { - canonicalize_name(extra): pkg_resources.safe_extra(extra) + canonicalize_name(extra): pkg_resources.safe_extra(cast(str, extra)) for extra in self.metadata.get_all("Provides-Extra", []) } @@ -235,11 +244,8 @@ def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requiremen extras = [self._extra_mapping[extra] for extra in relevant_extras] return self._dist.requires(extras) - def iter_provided_extras(self) -> Iterable[str]: - return self._dist.extras - - def is_extra_provided(self, extra: str) -> bool: - return canonicalize_name(extra) in self._extra_mapping + def iter_provided_extras(self) -> Iterable[NormalizedName]: + return list(self._extra_mapping.keys()) class Environment(BaseEnvironment): diff --git a/src/pip/_internal/resolution/resolvelib/candidates.py b/src/pip/_internal/resolution/resolvelib/candidates.py index bda9147d7f9..a8d7d78da7c 100644 --- a/src/pip/_internal/resolution/resolvelib/candidates.py +++ b/src/pip/_internal/resolution/resolvelib/candidates.py @@ -485,50 +485,6 @@ def is_editable(self) -> bool: def source_link(self) -> Optional[Link]: return self.base.source_link - def _warn_invalid_extras( - self, - requested: FrozenSet[str], - valid: FrozenSet[str], - ) -> None: - """Emit warnings for invalid extras being requested. - - This emits a warning for each requested extra that is not in the - candidate's ``Provides-Extra`` list. - """ - invalid_extras_to_warn = frozenset( - extra - for extra in requested - if extra not in valid - # If an extra is requested in an unnormalized form, skip warning - # about the normalized form being missing. - and extra in self.extras - ) - if not invalid_extras_to_warn: - return - for extra in sorted(invalid_extras_to_warn): - logger.warning( - "%s %s does not provide the extra '%s'", - self.base.name, - self.version, - extra, - ) - - def _calculate_valid_requested_extras(self) -> FrozenSet[str]: - """Get a list of valid extras requested by this candidate. - - The user (or upstream dependent) may have specified extras that the - candidate doesn't support. Any unsupported extras are dropped, and each - cause a warning to be logged here. - """ - requested_extras = self.extras - valid_extras = frozenset( - extra - for extra in requested_extras - if self.base.dist.is_extra_provided(extra) - ) - self._warn_invalid_extras(requested_extras, valid_extras) - return valid_extras - def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]: factory = self.base._factory @@ -538,7 +494,18 @@ def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requiremen if not with_requires: return - valid_extras = self._calculate_valid_requested_extras() + # The user may have specified extras that the candidate doesn't + # support. We ignore any unsupported extras here. + valid_extras = self.extras.intersection(self.base.dist.iter_provided_extras()) + invalid_extras = self.extras.difference(self.base.dist.iter_provided_extras()) + for extra in sorted(invalid_extras): + logger.warning( + "%s %s does not provide the extra '%s'", + self.base.name, + self.version, + extra, + ) + for r in self.base.dist.iter_dependencies(valid_extras): yield from factory.make_requirements_from_spec( str(r), diff --git a/tests/unit/metadata/test_metadata_pkg_resources.py b/tests/unit/metadata/test_metadata_pkg_resources.py index ee1b22003ba..6044c14e4ca 100644 --- a/tests/unit/metadata/test_metadata_pkg_resources.py +++ b/tests/unit/metadata/test_metadata_pkg_resources.py @@ -6,6 +6,7 @@ import pytest from pip._vendor.packaging.requirements import Requirement from pip._vendor.packaging.specifiers import SpecifierSet +from pip._vendor.packaging.utils import canonicalize_name from pip._vendor.packaging.version import parse as parse_version from pip._internal.exceptions import UnsupportedWheel @@ -107,7 +108,7 @@ def test_wheel_metadata_works() -> None: assert name == dist.canonical_name == dist.raw_name assert parse_version(version) == dist.version - assert set(extras) == set(dist.iter_provided_extras()) + assert {canonicalize_name(e) for e in extras} == set(dist.iter_provided_extras()) assert [require_a] == [str(r) for r in dist.iter_dependencies()] assert [Requirement(require_a), Requirement(require_b)] == [ Requirement(str(r)) for r in dist.iter_dependencies(["also_b"]) From cec49eac759ee5bd2234677f5c47f10d38cf3ce7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Sat, 30 Mar 2024 18:45:59 +0100 Subject: [PATCH 081/113] Add failing test for index with legacy versions --- .../invalid-version/invalid-version/index.html | 7 +++++++ tests/data/packages/README.txt | 8 ++++++++ .../invalid_version-1.0-py3-none-any.whl | Bin 0 -> 983 bytes .../invalid_version-2010i-py3-none-any.whl | Bin 0 -> 1001 bytes .../test_invalid_versions_and_specifiers.py | 15 +++++++++++++++ 5 files changed, 30 insertions(+) create mode 100644 tests/data/indexes/invalid-version/invalid-version/index.html create mode 100644 tests/data/packages/invalid_version-1.0-py3-none-any.whl create mode 100644 tests/data/packages/invalid_version-2010i-py3-none-any.whl create mode 100644 tests/functional/test_invalid_versions_and_specifiers.py diff --git a/tests/data/indexes/invalid-version/invalid-version/index.html b/tests/data/indexes/invalid-version/invalid-version/index.html new file mode 100644 index 00000000000..db9c47fed4b --- /dev/null +++ b/tests/data/indexes/invalid-version/invalid-version/index.html @@ -0,0 +1,7 @@ + + + + invalid_version-2010i-py3-none-any.whl + invalid_version-1.0-py3-none-any.whl + + diff --git a/tests/data/packages/README.txt b/tests/data/packages/README.txt index 37860a21b9e..a0cb164c4f1 100644 --- a/tests/data/packages/README.txt +++ b/tests/data/packages/README.txt @@ -104,3 +104,11 @@ require_simple-1.0.tar.gz ------------------------ contains "require_simple" package which requires simple>=2.0 - used for testing if dependencies are handled correctly. + +invalid_version-2010i-py3-none-any.whl +-------------------------------------- +The invalid-version package with a legacy version. + +invalid_version-1.0-py3-none-any.whl +------------------------------------ +A valid variant of the invalid-version package. diff --git a/tests/data/packages/invalid_version-1.0-py3-none-any.whl b/tests/data/packages/invalid_version-1.0-py3-none-any.whl new file mode 100644 index 0000000000000000000000000000000000000000..eeee9527792f522dd0303081b416208f6237e5a6 GIT binary patch literal 983 zcmWIWW@Zs#U|`^25T0BY5&d$GkRgy~1;lDVoS9dan3I_jUzS=_oSC1eYp7?Smy%gr zqMMnQmap&Y8sg~U7~=TZSJ%_WQ^)fPueYw&xijZC2N_&4e)7rtjPKT<6KAx&&z$tR zsN3vOcyOx3E6IXFMuq?sOaJgyd&dAR17T%?mWF${y83XR_wYS?5n~3m5fhr@CbL~PVg@>j5s0-3I<6$YAU-FxEHy{3q@v_8x>=hO zN=hyQ&9Da=rb5uHAXjJqAeYv&-bGCY3=9|crgL^&afmEo{IvSdF_DxPZH)rCTtaOu z0e<(V-mYr=o?#y@n|o?z&zwzhw@!B53D8ry6@SC5DCn|Tu<(cZUahy<`2v4%b9+CG zIJ?iJI;dds+Kl(dEax4|aC@b@C_h_XoaexI-oN7KYmalz-&B6Z_2#o_zaw6~l5#lI z<3CL|aot9Dr%MmYCh@%5{rpEAQ`;{mu^bI+qse|^Gd+fHJ KP){Kfhz9`D$wTx2 literal 0 HcmV?d00001 diff --git a/tests/data/packages/invalid_version-2010i-py3-none-any.whl b/tests/data/packages/invalid_version-2010i-py3-none-any.whl new file mode 100644 index 0000000000000000000000000000000000000000..1048e6ffc382e2a0a42b1aad512dbffa21941830 GIT binary patch literal 1001 zcmWIWW@Zs#U|`^2h@4Otaqf|(wGohK1H>9YoS9dan3I_jUzS=_oSC1eYh++(kg1oF zSzMx`|yVMM6RI*gl?Z#oJzQOVxX*j|p1p{$@rv<- zBLB1Ax?Vab^*051c%9VI>(pmo8F<0qg0bN>;|ou?PMz1k_*Lf`uZFJI*^@q-f;2R> ze4lc8`fLxmx#_78GtJ?mZ6WWd01VQ;#pj*-);Mcfw{ShY-p zR_00AtxRL$RMlLu{QW6Y@rwS=jX9CQJtYA?Tp3c@&AQUdOi~`X@2h!m=I*YU+*h`m z=>GQQ6P>x}qqKZnd7b;ZCKF~Wku7oNE0p(lN@Zj${G2XVoFL6@cKEsCv6Gkil$LPM ztNvHJ>Xdg*;LAOy*K3~4t@O_nE3baB;e!3Uyy^!tHgB6&d-2l5ZQiw)&mUg<%V34R zC#z*(pn{rw@!5;_7uzqNeam{GsZHfs0eNQDu+6LoXHUstjQGPG;LXS+!i+od0fPn% zwlsn$Vv{1Wso0Vh#7qW;EsYgKm UCBU1N4Wy3+2qS>Hs+mAM0FYEd3jhEB literal 0 HcmV?d00001 diff --git a/tests/functional/test_invalid_versions_and_specifiers.py b/tests/functional/test_invalid_versions_and_specifiers.py new file mode 100644 index 00000000000..ea0bc6371e0 --- /dev/null +++ b/tests/functional/test_invalid_versions_and_specifiers.py @@ -0,0 +1,15 @@ +from tests.lib import PipTestEnvironment, TestData + + +def test_install_from_index_with_invalid_version( + script: PipTestEnvironment, data: TestData +) -> None: + """ + Test that pip does not crash when installing a package from an index with + an invalid version. It ignores invalid versions. + """ + index_url = data.index_url("invalid-version") + result = script.pip( + "install", "--dry-run", "--index-url", index_url, "invalid-version" + ) + assert "Would install invalid-version-1.0" in result.stdout From b63e279aab10432f3c92e213c99d1de452b8b89f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Sat, 30 Mar 2024 18:46:27 +0100 Subject: [PATCH 082/113] Ignore legacy versions in the package finder --- src/pip/_internal/index/package_finder.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/pip/_internal/index/package_finder.py b/src/pip/_internal/index/package_finder.py index 98ee6315567..fb270f22f83 100644 --- a/src/pip/_internal/index/package_finder.py +++ b/src/pip/_internal/index/package_finder.py @@ -11,7 +11,7 @@ from pip._vendor.packaging import specifiers from pip._vendor.packaging.tags import Tag from pip._vendor.packaging.utils import canonicalize_name -from pip._vendor.packaging.version import _BaseVersion +from pip._vendor.packaging.version import InvalidVersion, _BaseVersion from pip._vendor.packaging.version import parse as parse_version from pip._internal.exceptions import ( @@ -752,11 +752,14 @@ def get_install_candidate( self._log_skipped_link(link, result, detail) return None - return InstallationCandidate( - name=link_evaluator.project_name, - link=link, - version=detail, - ) + try: + return InstallationCandidate( + name=link_evaluator.project_name, + link=link, + version=detail, + ) + except InvalidVersion: + return None def evaluate_links( self, link_evaluator: LinkEvaluator, links: Iterable[Link] From 587854a79926db6187d3e1a6c9eb67e620ad6331 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Sat, 30 Mar 2024 19:36:50 +0100 Subject: [PATCH 083/113] Add failing test for package with invalid specifier in dependencies --- .../invalid-version/index.html | 6 ++++++ .../require-invalid-version/index.html | 7 +++++++ tests/data/packages/README.txt | 8 ++++++++ .../require_invalid_version-0.1-py3-none-any.whl | Bin 0 -> 1059 bytes .../require_invalid_version-1.0-py3-none-any.whl | Bin 0 -> 1083 bytes .../test_invalid_versions_and_specifiers.py | 14 ++++++++++++++ 6 files changed, 35 insertions(+) create mode 100644 tests/data/indexes/require-invalid-version/invalid-version/index.html create mode 100644 tests/data/indexes/require-invalid-version/require-invalid-version/index.html create mode 100644 tests/data/packages/require_invalid_version-0.1-py3-none-any.whl create mode 100644 tests/data/packages/require_invalid_version-1.0-py3-none-any.whl diff --git a/tests/data/indexes/require-invalid-version/invalid-version/index.html b/tests/data/indexes/require-invalid-version/invalid-version/index.html new file mode 100644 index 00000000000..b1ff5550ea1 --- /dev/null +++ b/tests/data/indexes/require-invalid-version/invalid-version/index.html @@ -0,0 +1,6 @@ + + + + invalid_version-2010i-py3-none-any.whl + + diff --git a/tests/data/indexes/require-invalid-version/require-invalid-version/index.html b/tests/data/indexes/require-invalid-version/require-invalid-version/index.html new file mode 100644 index 00000000000..66afe3e6daa --- /dev/null +++ b/tests/data/indexes/require-invalid-version/require-invalid-version/index.html @@ -0,0 +1,7 @@ + + + + require_invalid_version-0.1-py3-none-any.whl + require_invalid_version-1.0-py3-none-any.whl + + diff --git a/tests/data/packages/README.txt b/tests/data/packages/README.txt index a0cb164c4f1..bdf1a4fa22c 100644 --- a/tests/data/packages/README.txt +++ b/tests/data/packages/README.txt @@ -112,3 +112,11 @@ The invalid-version package with a legacy version. invalid_version-1.0-py3-none-any.whl ------------------------------------ A valid variant of the invalid-version package. + +require_invalid_version-0.1-py3-none-any.whl +-------------------------------------------- +A valid variant of the require-invalid-version package. + +require_invalid_version-1.0-py3-none-any.whl +-------------------------------------------- +This package has an invalid version specifier in its requirements. diff --git a/tests/data/packages/require_invalid_version-0.1-py3-none-any.whl b/tests/data/packages/require_invalid_version-0.1-py3-none-any.whl new file mode 100644 index 0000000000000000000000000000000000000000..976b80b89a095138508dd05b015e2cbc62c8b174 GIT binary patch literal 1059 zcmWIWW@Zs#U|`^2$e36c!7jg$-vY>U0%AQNE=nyd%`8fd&&(@J%*jlNFH0>d&dkr# zHPAEEOUW!Q(ap?D%h&gH4RLgF3~~JItLy3GspENt*IQTX+?n&6gA6ViKl$W+#&>Iw zhL^6+SskydXU}MRpE>DsQMXy+VUxBggVs|fh5!^d{Nbzijse;Y!rDZ;A>6~&)rb4M zhws^o2-~k1KPd7)>#gghb5eg(kcZbv9lcI{_LYGb3@#WOUNgS%gzMCK{fl39uJLN< zdYwJ#vnfbJQ_J@$m#5G6AdM9!*Up|k?|s$x%<3nP&>cCM?Ya>&&~c1FY)rHxOY#fi zb5hGvbM#6oN*<$|srR7Q@fy&qc%Y#=M4K7p>g*rn(tFmssL6nV;lkc@&x};B39jNb zOjiOVHJeHff9RamD8$2f00cM z+va_~$9sGY?r?=V&)@CspYX-FYi^atmTDbY(_JgQQ-zpcM*2r+&M#ZN_*)O7lk`R{ z&)S~m>kr)6o47N0aDAE!uy4mQ-4q`F`!;LUe=ClM2FMO<+V5=V+9KgKhzO dxppBJOCcTVcq}$ literal 0 HcmV?d00001 diff --git a/tests/data/packages/require_invalid_version-1.0-py3-none-any.whl b/tests/data/packages/require_invalid_version-1.0-py3-none-any.whl new file mode 100644 index 0000000000000000000000000000000000000000..3dd628a16d79577b4c869ec2c7cf80200d320517 GIT binary patch literal 1083 zcmWIWW@Zs#U|`^2I6a{*f;GWj+aJiw1Y$iPE=nyd%`8fd&&(@J%*jlNFH0>d&dkr# zHPkcEOUW!Q(ap?D%h&gH4RLgF3~~JItLy3GspENt*IQTX+?n&6gA6ViKl$W+#&>Iw zhL^6+SskydXU}MRpE>DsQMXy+VUxD$72^k7O&VI)N>A$OZ47ba(v5ykKy_*zlV1g(qC6&g)s{1jz`$@}Z@TA-3rdFR zAJ{(CCo?;)kv=``YKBU4!k*?;o5HfxR8sD??teS?%B{$itVi-!zMOqNdv2^@&GKZ; z#2>w^t`b*&D0aWGk$>TOnfaJ{w7Y!?|CO!#q{MW0zPGrz`U8{mBDL*zD_40*y=s{q zZ!>#Q&Ee-WmOQ*KIO|NgfBSF28Mh8Oev;doqOw-${U_7N(D&6d+(dJP1S{@7vFgp~ zxxLu_cyx~VF?O9dRUH2F7+xLHDBM=Ki8Fxb)vqDV``$Yx_p zcMy{q7`8OdCfaOpDnvIJJ*^-Nb^yjSagIhwImi}ZOE?G%rVwobBpC&Gv$BD7vjAZM KQ14bI5Dx&s<8sRY literal 0 HcmV?d00001 diff --git a/tests/functional/test_invalid_versions_and_specifiers.py b/tests/functional/test_invalid_versions_and_specifiers.py index ea0bc6371e0..08d31ec2ace 100644 --- a/tests/functional/test_invalid_versions_and_specifiers.py +++ b/tests/functional/test_invalid_versions_and_specifiers.py @@ -13,3 +13,17 @@ def test_install_from_index_with_invalid_version( "install", "--dry-run", "--index-url", index_url, "invalid-version" ) assert "Would install invalid-version-1.0" in result.stdout + + +def test_install_from_index_with_invalid_specifier( + script: PipTestEnvironment, data: TestData +) -> None: + """ + Test that pip does not crash when installing a package with an invalid + version specifier in its dependencies. + """ + index_url = data.index_url("require-invalid-version") + result = script.pip( + "install", "--dry-run", "--index-url", index_url, "require-invalid-version" + ) + assert "Would install require-invalid-version-0.1" in result.stdout From bf8b887ebfcb48eab4bd4905db359949a59a57c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Sat, 6 Apr 2024 17:20:58 +0200 Subject: [PATCH 084/113] Discard candidates with invalid dependencies --- src/pip/_internal/exceptions.py | 11 +++++++++++ src/pip/_internal/resolution/resolvelib/candidates.py | 9 +++++++++ src/pip/_internal/resolution/resolvelib/factory.py | 5 +++-- 3 files changed, 23 insertions(+), 2 deletions(-) diff --git a/src/pip/_internal/exceptions.py b/src/pip/_internal/exceptions.py index 0609e450683..0542d12cc01 100644 --- a/src/pip/_internal/exceptions.py +++ b/src/pip/_internal/exceptions.py @@ -358,6 +358,17 @@ def __str__(self) -> str: ) +class MetadataInvalid(InstallationError): + """Metadata is invalid.""" + + def __init__(self, ireq: "InstallRequirement", error: str) -> None: + self.ireq = ireq + self.error = error + + def __str__(self) -> str: + return f"Requested {self.ireq} has invalid metadata: {self.error}" + + class InstallationSubprocessError(DiagnosticPipError, InstallationError): """A subprocess call failed.""" diff --git a/src/pip/_internal/resolution/resolvelib/candidates.py b/src/pip/_internal/resolution/resolvelib/candidates.py index a8d7d78da7c..d30d477be68 100644 --- a/src/pip/_internal/resolution/resolvelib/candidates.py +++ b/src/pip/_internal/resolution/resolvelib/candidates.py @@ -2,6 +2,7 @@ import sys from typing import TYPE_CHECKING, Any, FrozenSet, Iterable, Optional, Tuple, Union, cast +from pip._vendor.packaging.requirements import InvalidRequirement from pip._vendor.packaging.utils import NormalizedName, canonicalize_name from pip._vendor.packaging.version import Version @@ -9,6 +10,7 @@ HashError, InstallationSubprocessError, MetadataInconsistent, + MetadataInvalid, ) from pip._internal.metadata import BaseDistribution from pip._internal.models.link import Link, links_equivalent @@ -220,6 +222,13 @@ def _check_metadata_consistency(self, dist: BaseDistribution) -> None: str(self._version), str(dist.version), ) + # check dependencies are valid + # TODO performance: this means we iterate the dependencies at least twice, + # we may want to cache parsed Requires-Dist + try: + list(dist.iter_dependencies(list(dist.iter_provided_extras()))) + except InvalidRequirement as e: + raise MetadataInvalid(self._ireq, str(e)) def _prepare(self) -> BaseDistribution: try: diff --git a/src/pip/_internal/resolution/resolvelib/factory.py b/src/pip/_internal/resolution/resolvelib/factory.py index 6de5d698e16..cebdeebad01 100644 --- a/src/pip/_internal/resolution/resolvelib/factory.py +++ b/src/pip/_internal/resolution/resolvelib/factory.py @@ -31,6 +31,7 @@ DistributionNotFound, InstallationError, MetadataInconsistent, + MetadataInvalid, UnsupportedPythonVersion, UnsupportedWheel, ) @@ -213,7 +214,7 @@ def _make_base_candidate_from_link( name=name, version=version, ) - except MetadataInconsistent as e: + except (MetadataInconsistent, MetadataInvalid) as e: logger.info( "Discarding [blue underline]%s[/]: [yellow]%s[reset]", link, @@ -234,7 +235,7 @@ def _make_base_candidate_from_link( name=name, version=version, ) - except MetadataInconsistent as e: + except (MetadataInconsistent, MetadataInvalid) as e: logger.info( "Discarding [blue underline]%s[/]: [yellow]%s[reset]", link, From c44c6a4aec68a31d3584450c20c53177ea25445a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Sun, 7 Apr 2024 15:42:18 +0200 Subject: [PATCH 085/113] Add failing test for uninstallation of dist with legacy version --- .../test_invalid_versions_and_specifiers.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tests/functional/test_invalid_versions_and_specifiers.py b/tests/functional/test_invalid_versions_and_specifiers.py index 08d31ec2ace..8df90de708d 100644 --- a/tests/functional/test_invalid_versions_and_specifiers.py +++ b/tests/functional/test_invalid_versions_and_specifiers.py @@ -1,3 +1,5 @@ +import zipfile + from tests.lib import PipTestEnvironment, TestData @@ -27,3 +29,15 @@ def test_install_from_index_with_invalid_specifier( "install", "--dry-run", "--index-url", index_url, "require-invalid-version" ) assert "Would install require-invalid-version-0.1" in result.stdout + + +def test_uninstall_invalid_version(script: PipTestEnvironment, data: TestData) -> None: + """ + Test that it is possible to uninstall a package with an invalid version. + """ + # install package with legacy version in site packages + with zipfile.ZipFile( + data.packages.joinpath("invalid_version-2010i-py3-none-any.whl") + ) as zf: + zf.extractall(script.site_packages_path) + script.pip("uninstall", "-y", "invalid-version") From 73f67440a9ac3c68bf317fa62cccec6a0c3ee98c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Sun, 7 Apr 2024 15:44:32 +0200 Subject: [PATCH 086/113] Allow uninstallation of dist with legacy version We do this by adding a version_str property to BaseDistribution, which allows us to access the version without parsing it. --- src/pip/_internal/metadata/base.py | 8 ++++++-- src/pip/_internal/metadata/importlib/_dists.py | 4 ++++ src/pip/_internal/metadata/pkg_resources.py | 4 ++++ src/pip/_internal/req/req_uninstall.py | 2 +- 4 files changed, 15 insertions(+), 3 deletions(-) diff --git a/src/pip/_internal/metadata/base.py b/src/pip/_internal/metadata/base.py index ac585c717e6..aba6e3a3a9f 100644 --- a/src/pip/_internal/metadata/base.py +++ b/src/pip/_internal/metadata/base.py @@ -138,10 +138,10 @@ def from_wheel(cls, wheel: "Wheel", name: str) -> "BaseDistribution": raise NotImplementedError() def __repr__(self) -> str: - return f"{self.raw_name} {self.version} ({self.location})" + return f"{self.raw_name} {self.version_str} ({self.location})" def __str__(self) -> str: - return f"{self.raw_name} {self.version}" + return f"{self.raw_name} {self.version_str}" @property def location(self) -> Optional[str]: @@ -275,6 +275,10 @@ def canonical_name(self) -> NormalizedName: def version(self) -> Version: raise NotImplementedError() + @property + def version_str(self) -> str: + raise NotImplementedError() + @property def setuptools_filename(self) -> str: """Convert a project name to its setuptools-compatible filename. diff --git a/src/pip/_internal/metadata/importlib/_dists.py b/src/pip/_internal/metadata/importlib/_dists.py index 94a13a280d3..2fa32f6877b 100644 --- a/src/pip/_internal/metadata/importlib/_dists.py +++ b/src/pip/_internal/metadata/importlib/_dists.py @@ -174,6 +174,10 @@ def canonical_name(self) -> NormalizedName: def version(self) -> Version: return parse_version(self._dist.version) + @property + def version_str(self) -> str: + return self._dist.version + def is_file(self, path: InfoPath) -> bool: return self._dist.read_text(str(path)) is not None diff --git a/src/pip/_internal/metadata/pkg_resources.py b/src/pip/_internal/metadata/pkg_resources.py index 3786b7cd394..c31c5673143 100644 --- a/src/pip/_internal/metadata/pkg_resources.py +++ b/src/pip/_internal/metadata/pkg_resources.py @@ -193,6 +193,10 @@ def canonical_name(self) -> NormalizedName: def version(self) -> Version: return parse_version(self._dist.version) + @property + def version_str(self) -> str: + return self._dist.version + def is_file(self, path: InfoPath) -> bool: return self._dist.has_metadata(str(path)) diff --git a/src/pip/_internal/req/req_uninstall.py b/src/pip/_internal/req/req_uninstall.py index 3a9ae2b1097..f3e49f98709 100644 --- a/src/pip/_internal/req/req_uninstall.py +++ b/src/pip/_internal/req/req_uninstall.py @@ -367,7 +367,7 @@ def remove(self, auto_confirm: bool = False, verbose: bool = False) -> None: ) return - dist_name_version = f"{self._dist.raw_name}-{self._dist.version}" + dist_name_version = f"{self._dist.raw_name}-{self._dist.version_str}" logger.info("Uninstalling %s:", dist_name_version) with indent_log(): From 056313220538a422808138882024f738000c83f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Sun, 7 Apr 2024 15:46:29 +0200 Subject: [PATCH 087/113] Refactor test --- .../test_invalid_versions_and_specifiers.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/tests/functional/test_invalid_versions_and_specifiers.py b/tests/functional/test_invalid_versions_and_specifiers.py index 8df90de708d..4cea86bf33a 100644 --- a/tests/functional/test_invalid_versions_and_specifiers.py +++ b/tests/functional/test_invalid_versions_and_specifiers.py @@ -31,13 +31,19 @@ def test_install_from_index_with_invalid_specifier( assert "Would install require-invalid-version-0.1" in result.stdout -def test_uninstall_invalid_version(script: PipTestEnvironment, data: TestData) -> None: +def _install_invalid_version(script: PipTestEnvironment, data: TestData) -> None: """ - Test that it is possible to uninstall a package with an invalid version. + Install a package with an invalid version. """ - # install package with legacy version in site packages with zipfile.ZipFile( data.packages.joinpath("invalid_version-2010i-py3-none-any.whl") ) as zf: zf.extractall(script.site_packages_path) + + +def test_uninstall_invalid_version(script: PipTestEnvironment, data: TestData) -> None: + """ + Test that it is possible to uninstall a package with an invalid version. + """ + _install_invalid_version(script, data) script.pip("uninstall", "-y", "invalid-version") From 27807fb5992ead09cee8c84e061e1c57c24df289 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Sun, 7 Apr 2024 15:48:00 +0200 Subject: [PATCH 088/113] Add failing test for pip list in presence of legacy version --- tests/functional/test_invalid_versions_and_specifiers.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tests/functional/test_invalid_versions_and_specifiers.py b/tests/functional/test_invalid_versions_and_specifiers.py index 4cea86bf33a..5cc4a6b0978 100644 --- a/tests/functional/test_invalid_versions_and_specifiers.py +++ b/tests/functional/test_invalid_versions_and_specifiers.py @@ -47,3 +47,11 @@ def test_uninstall_invalid_version(script: PipTestEnvironment, data: TestData) - """ _install_invalid_version(script, data) script.pip("uninstall", "-y", "invalid-version") + + +def test_list_invalid_version(script: PipTestEnvironment, data: TestData) -> None: + """ + Test that pip can list an environment containing a package with a legacy version. + """ + _install_invalid_version(script, data) + script.pip("list") From 0529c044fa0089df0ca1d6bcee315ee6bd0a4909 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Sun, 7 Apr 2024 15:51:05 +0200 Subject: [PATCH 089/113] Fix pip list in presence of installed legacy versions --- src/pip/_internal/commands/list.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pip/_internal/commands/list.py b/src/pip/_internal/commands/list.py index 171461aebaf..5653907a1fe 100644 --- a/src/pip/_internal/commands/list.py +++ b/src/pip/_internal/commands/list.py @@ -333,7 +333,7 @@ def format_for_columns( for proj in pkgs: # if we're working on the 'outdated' list, separate out the # latest_version and type - row = [proj.raw_name, str(proj.version)] + row = [proj.raw_name, str(proj.version_str)] if running_outdated: row.append(str(proj.latest_version)) From 93e52cfa919ad2097fc3ed024049daa8d4caa564 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Sun, 7 Apr 2024 17:11:12 +0200 Subject: [PATCH 090/113] Add failing test for pip freeze in presence of legacy version --- tests/functional/test_invalid_versions_and_specifiers.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/functional/test_invalid_versions_and_specifiers.py b/tests/functional/test_invalid_versions_and_specifiers.py index 5cc4a6b0978..45fb2ee1204 100644 --- a/tests/functional/test_invalid_versions_and_specifiers.py +++ b/tests/functional/test_invalid_versions_and_specifiers.py @@ -55,3 +55,12 @@ def test_list_invalid_version(script: PipTestEnvironment, data: TestData) -> Non """ _install_invalid_version(script, data) script.pip("list") + + +def test_freeze_invalid_version(script: PipTestEnvironment, data: TestData) -> None: + """ + Test that pip can freeze an environment containing a package with a legacy version. + """ + _install_invalid_version(script, data) + result = script.pip("freeze") + assert "invalid-version===2010i\n" in result.stdout From a28d13a2281b5f733b3294ce5fc7562114bb1c32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Sun, 7 Apr 2024 17:11:24 +0200 Subject: [PATCH 091/113] Fix pip freeze in presence of legacy version --- src/pip/_internal/operations/freeze.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/pip/_internal/operations/freeze.py b/src/pip/_internal/operations/freeze.py index 35445684514..de5563ba7d6 100644 --- a/src/pip/_internal/operations/freeze.py +++ b/src/pip/_internal/operations/freeze.py @@ -4,7 +4,7 @@ from typing import Container, Dict, Generator, Iterable, List, NamedTuple, Optional, Set from pip._vendor.packaging.utils import canonicalize_name -from pip._vendor.packaging.version import Version +from pip._vendor.packaging.version import InvalidVersion from pip._internal.exceptions import BadCommand, InstallationError from pip._internal.metadata import BaseDistribution, get_environment @@ -145,10 +145,13 @@ def freeze( def _format_as_name_version(dist: BaseDistribution) -> str: - dist_version = dist.version - if isinstance(dist_version, Version): + try: + dist_version = dist.version + except InvalidVersion: + # legacy version + return f"{dist.raw_name}==={dist.version_str}" + else: return f"{dist.raw_name}=={dist_version}" - return f"{dist.raw_name}==={dist_version}" def _get_editable_info(dist: BaseDistribution) -> _EditableInfo: From be652aa19745f41d3b8874538fd185a707697b9d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Sun, 7 Apr 2024 17:26:46 +0200 Subject: [PATCH 092/113] Add failing test for pip show of legacy version --- tests/functional/test_invalid_versions_and_specifiers.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/functional/test_invalid_versions_and_specifiers.py b/tests/functional/test_invalid_versions_and_specifiers.py index 45fb2ee1204..1044d9e5689 100644 --- a/tests/functional/test_invalid_versions_and_specifiers.py +++ b/tests/functional/test_invalid_versions_and_specifiers.py @@ -64,3 +64,12 @@ def test_freeze_invalid_version(script: PipTestEnvironment, data: TestData) -> N _install_invalid_version(script, data) result = script.pip("freeze") assert "invalid-version===2010i\n" in result.stdout + + +def test_show_invalid_version(script: PipTestEnvironment, data: TestData) -> None: + """ + Test that pip can show an installed distribution with a legacy version. + """ + _install_invalid_version(script, data) + result = script.pip("show", "invalid-version") + assert "Name: invalid-version\nVersion: 2010i\n" in result.stdout From 8f97eb592e2589b921fed5e4aacd44c9c56f778f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Sun, 7 Apr 2024 17:28:05 +0200 Subject: [PATCH 093/113] Fix pip show of legacy version --- src/pip/_internal/commands/show.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pip/_internal/commands/show.py b/src/pip/_internal/commands/show.py index b7894ce1f9c..80809a3a997 100644 --- a/src/pip/_internal/commands/show.py +++ b/src/pip/_internal/commands/show.py @@ -139,7 +139,7 @@ def _get_requiring_packages(current_dist: BaseDistribution) -> Iterator[str]: yield _PackageInfo( name=dist.raw_name, - version=str(dist.version), + version=dist.version_str, location=dist.location or "", editable_project_location=dist.editable_project_location, requires=requires, From 08ae751a26d4e957b0c6d6a5836212421095b248 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Sun, 7 Apr 2024 17:55:34 +0200 Subject: [PATCH 094/113] Upgrade packaging to 24.0 --- news/packaging.vendor.rst | 2 +- src/pip/_vendor/packaging/__init__.py | 2 +- src/pip/_vendor/packaging/_manylinux.py | 14 ++- src/pip/_vendor/packaging/_parser.py | 5 +- src/pip/_vendor/packaging/metadata.py | 101 +++++++++++----------- src/pip/_vendor/packaging/requirements.py | 2 +- src/pip/_vendor/packaging/specifiers.py | 63 ++++++++------ src/pip/_vendor/packaging/tags.py | 44 +++++++--- src/pip/_vendor/vendor.txt | 2 +- 9 files changed, 135 insertions(+), 100 deletions(-) diff --git a/news/packaging.vendor.rst b/news/packaging.vendor.rst index 1834f6df449..08f099e1cb3 100644 --- a/news/packaging.vendor.rst +++ b/news/packaging.vendor.rst @@ -1 +1 @@ -Upgrade packaging to 23.2 +Upgrade packaging to 24.0 diff --git a/src/pip/_vendor/packaging/__init__.py b/src/pip/_vendor/packaging/__init__.py index 22809cfd5dc..e7c0aa12ca9 100644 --- a/src/pip/_vendor/packaging/__init__.py +++ b/src/pip/_vendor/packaging/__init__.py @@ -6,7 +6,7 @@ __summary__ = "Core utilities for Python packages" __uri__ = "https://github.com/pypa/packaging" -__version__ = "23.2" +__version__ = "24.0" __author__ = "Donald Stufft and individual contributors" __email__ = "donald@stufft.io" diff --git a/src/pip/_vendor/packaging/_manylinux.py b/src/pip/_vendor/packaging/_manylinux.py index 3705d50db91..ad62505f3ff 100644 --- a/src/pip/_vendor/packaging/_manylinux.py +++ b/src/pip/_vendor/packaging/_manylinux.py @@ -55,7 +55,15 @@ def _have_compatible_abi(executable: str, archs: Sequence[str]) -> bool: return _is_linux_armhf(executable) if "i686" in archs: return _is_linux_i686(executable) - allowed_archs = {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x", "loongarch64"} + allowed_archs = { + "x86_64", + "aarch64", + "ppc64", + "ppc64le", + "s390x", + "loongarch64", + "riscv64", + } return any(arch in allowed_archs for arch in archs) @@ -82,7 +90,7 @@ def _glibc_version_string_confstr() -> Optional[str]: # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183 try: # Should be a string like "glibc 2.17". - version_string: str = getattr(os, "confstr")("CS_GNU_LIBC_VERSION") + version_string: Optional[str] = os.confstr("CS_GNU_LIBC_VERSION") assert version_string is not None _, version = version_string.rsplit() except (AssertionError, AttributeError, OSError, ValueError): @@ -174,7 +182,7 @@ def _is_compatible(arch: str, version: _GLibCVersion) -> bool: return False # Check for presence of _manylinux module. try: - import _manylinux # noqa + import _manylinux except ImportError: return True if hasattr(_manylinux, "manylinux_compatible"): diff --git a/src/pip/_vendor/packaging/_parser.py b/src/pip/_vendor/packaging/_parser.py index 4576981c2dd..684df75457c 100644 --- a/src/pip/_vendor/packaging/_parser.py +++ b/src/pip/_vendor/packaging/_parser.py @@ -324,10 +324,7 @@ def _parse_marker_var(tokenizer: Tokenizer) -> MarkerVar: def process_env_var(env_var: str) -> Variable: - if ( - env_var == "platform_python_implementation" - or env_var == "python_implementation" - ): + if env_var in ("platform_python_implementation", "python_implementation"): return Variable("platform_python_implementation") else: return Variable(env_var) diff --git a/src/pip/_vendor/packaging/metadata.py b/src/pip/_vendor/packaging/metadata.py index 87763455ab1..5e4c106c549 100644 --- a/src/pip/_vendor/packaging/metadata.py +++ b/src/pip/_vendor/packaging/metadata.py @@ -41,10 +41,10 @@ def __init_subclass__(*_args, **_kwargs): try: - ExceptionGroup = __builtins__.ExceptionGroup # type: ignore[attr-defined] -except AttributeError: + ExceptionGroup +except NameError: # pragma: no cover - class ExceptionGroup(Exception): # type: ignore[no-redef] # noqa: N818 + class ExceptionGroup(Exception): # noqa: N818 """A minimal implementation of :external:exc:`ExceptionGroup` from Python 3.11. If :external:exc:`ExceptionGroup` is already defined by Python itself, @@ -61,6 +61,9 @@ def __init__(self, message: str, exceptions: List[Exception]) -> None: def __repr__(self) -> str: return f"{self.__class__.__name__}({self.message!r}, {self.exceptions!r})" +else: # pragma: no cover + ExceptionGroup = ExceptionGroup + class InvalidMetadata(ValueError): """A metadata field contains invalid data.""" @@ -505,24 +508,19 @@ def __get__(self, instance: "Metadata", _owner: Type["Metadata"]) -> T: # No need to check the cache as attribute lookup will resolve into the # instance's __dict__ before __get__ is called. cache = instance.__dict__ - try: - value = instance._raw[self.name] # type: ignore[literal-required] - except KeyError: - if self.name in _STRING_FIELDS: - value = "" - elif self.name in _LIST_FIELDS: - value = [] - elif self.name in _DICT_FIELDS: - value = {} - else: # pragma: no cover - assert False + value = instance._raw.get(self.name) - try: - converter: Callable[[Any], T] = getattr(self, f"_process_{self.name}") - except AttributeError: - pass - else: - value = converter(value) + # To make the _process_* methods easier, we'll check if the value is None + # and if this field is NOT a required attribute, and if both of those + # things are true, we'll skip the the converter. This will mean that the + # converters never have to deal with the None union. + if self.name in _REQUIRED_ATTRS or value is not None: + try: + converter: Callable[[Any], T] = getattr(self, f"_process_{self.name}") + except AttributeError: + pass + else: + value = converter(value) cache[self.name] = value try: @@ -677,7 +675,7 @@ def from_raw(cls, data: RawMetadata, *, validate: bool = True) -> "Metadata": ins._raw = data.copy() # Mutations occur due to caching enriched values. if validate: - exceptions: List[InvalidMetadata] = [] + exceptions: List[Exception] = [] try: metadata_version = ins.metadata_version metadata_age = _VALID_METADATA_VERSIONS.index(metadata_version) @@ -732,10 +730,10 @@ def from_email( If *validate* is true, the metadata will be validated. All exceptions related to validation will be gathered and raised as an :class:`ExceptionGroup`. """ - exceptions: list[InvalidMetadata] = [] raw, unparsed = parse_email(data) if validate: + exceptions: list[Exception] = [] for unparsed_key in unparsed: if unparsed_key in _EMAIL_TO_RAW_MAPPING: message = f"{unparsed_key!r} has invalid data" @@ -749,8 +747,9 @@ def from_email( try: return cls.from_raw(raw, validate=validate) except ExceptionGroup as exc_group: - exceptions.extend(exc_group.exceptions) - raise ExceptionGroup("invalid or unparsed metadata", exceptions) from None + raise ExceptionGroup( + "invalid or unparsed metadata", exc_group.exceptions + ) from None metadata_version: _Validator[_MetadataVersion] = _Validator() """:external:ref:`core-metadata-metadata-version` @@ -761,62 +760,66 @@ def from_email( *validate* parameter)""" version: _Validator[version_module.Version] = _Validator() """:external:ref:`core-metadata-version` (required)""" - dynamic: _Validator[List[str]] = _Validator( + dynamic: _Validator[Optional[List[str]]] = _Validator( added="2.2", ) """:external:ref:`core-metadata-dynamic` (validated against core metadata field names and lowercased)""" - platforms: _Validator[List[str]] = _Validator() + platforms: _Validator[Optional[List[str]]] = _Validator() """:external:ref:`core-metadata-platform`""" - supported_platforms: _Validator[List[str]] = _Validator(added="1.1") + supported_platforms: _Validator[Optional[List[str]]] = _Validator(added="1.1") """:external:ref:`core-metadata-supported-platform`""" - summary: _Validator[str] = _Validator() + summary: _Validator[Optional[str]] = _Validator() """:external:ref:`core-metadata-summary` (validated to contain no newlines)""" - description: _Validator[str] = _Validator() # TODO 2.1: can be in body + description: _Validator[Optional[str]] = _Validator() # TODO 2.1: can be in body """:external:ref:`core-metadata-description`""" - description_content_type: _Validator[str] = _Validator(added="2.1") + description_content_type: _Validator[Optional[str]] = _Validator(added="2.1") """:external:ref:`core-metadata-description-content-type` (validated)""" - keywords: _Validator[List[str]] = _Validator() + keywords: _Validator[Optional[List[str]]] = _Validator() """:external:ref:`core-metadata-keywords`""" - home_page: _Validator[str] = _Validator() + home_page: _Validator[Optional[str]] = _Validator() """:external:ref:`core-metadata-home-page`""" - download_url: _Validator[str] = _Validator(added="1.1") + download_url: _Validator[Optional[str]] = _Validator(added="1.1") """:external:ref:`core-metadata-download-url`""" - author: _Validator[str] = _Validator() + author: _Validator[Optional[str]] = _Validator() """:external:ref:`core-metadata-author`""" - author_email: _Validator[str] = _Validator() + author_email: _Validator[Optional[str]] = _Validator() """:external:ref:`core-metadata-author-email`""" - maintainer: _Validator[str] = _Validator(added="1.2") + maintainer: _Validator[Optional[str]] = _Validator(added="1.2") """:external:ref:`core-metadata-maintainer`""" - maintainer_email: _Validator[str] = _Validator(added="1.2") + maintainer_email: _Validator[Optional[str]] = _Validator(added="1.2") """:external:ref:`core-metadata-maintainer-email`""" - license: _Validator[str] = _Validator() + license: _Validator[Optional[str]] = _Validator() """:external:ref:`core-metadata-license`""" - classifiers: _Validator[List[str]] = _Validator(added="1.1") + classifiers: _Validator[Optional[List[str]]] = _Validator(added="1.1") """:external:ref:`core-metadata-classifier`""" - requires_dist: _Validator[List[requirements.Requirement]] = _Validator(added="1.2") + requires_dist: _Validator[Optional[List[requirements.Requirement]]] = _Validator( + added="1.2" + ) """:external:ref:`core-metadata-requires-dist`""" - requires_python: _Validator[specifiers.SpecifierSet] = _Validator(added="1.2") + requires_python: _Validator[Optional[specifiers.SpecifierSet]] = _Validator( + added="1.2" + ) """:external:ref:`core-metadata-requires-python`""" # Because `Requires-External` allows for non-PEP 440 version specifiers, we # don't do any processing on the values. - requires_external: _Validator[List[str]] = _Validator(added="1.2") + requires_external: _Validator[Optional[List[str]]] = _Validator(added="1.2") """:external:ref:`core-metadata-requires-external`""" - project_urls: _Validator[Dict[str, str]] = _Validator(added="1.2") + project_urls: _Validator[Optional[Dict[str, str]]] = _Validator(added="1.2") """:external:ref:`core-metadata-project-url`""" # PEP 685 lets us raise an error if an extra doesn't pass `Name` validation # regardless of metadata version. - provides_extra: _Validator[List[utils.NormalizedName]] = _Validator( + provides_extra: _Validator[Optional[List[utils.NormalizedName]]] = _Validator( added="2.1", ) """:external:ref:`core-metadata-provides-extra`""" - provides_dist: _Validator[List[str]] = _Validator(added="1.2") + provides_dist: _Validator[Optional[List[str]]] = _Validator(added="1.2") """:external:ref:`core-metadata-provides-dist`""" - obsoletes_dist: _Validator[List[str]] = _Validator(added="1.2") + obsoletes_dist: _Validator[Optional[List[str]]] = _Validator(added="1.2") """:external:ref:`core-metadata-obsoletes-dist`""" - requires: _Validator[List[str]] = _Validator(added="1.1") + requires: _Validator[Optional[List[str]]] = _Validator(added="1.1") """``Requires`` (deprecated)""" - provides: _Validator[List[str]] = _Validator(added="1.1") + provides: _Validator[Optional[List[str]]] = _Validator(added="1.1") """``Provides`` (deprecated)""" - obsoletes: _Validator[List[str]] = _Validator(added="1.1") + obsoletes: _Validator[Optional[List[str]]] = _Validator(added="1.1") """``Obsoletes`` (deprecated)""" diff --git a/src/pip/_vendor/packaging/requirements.py b/src/pip/_vendor/packaging/requirements.py index 0c00eba331b..bdc43a7e98d 100644 --- a/src/pip/_vendor/packaging/requirements.py +++ b/src/pip/_vendor/packaging/requirements.py @@ -38,7 +38,7 @@ def __init__(self, requirement_string: str) -> None: self.name: str = parsed.name self.url: Optional[str] = parsed.url or None - self.extras: Set[str] = set(parsed.extras if parsed.extras else []) + self.extras: Set[str] = set(parsed.extras or []) self.specifier: SpecifierSet = SpecifierSet(parsed.specifier) self.marker: Optional[Marker] = None if parsed.marker is not None: diff --git a/src/pip/_vendor/packaging/specifiers.py b/src/pip/_vendor/packaging/specifiers.py index 7617726c385..3a6497e44e6 100644 --- a/src/pip/_vendor/packaging/specifiers.py +++ b/src/pip/_vendor/packaging/specifiers.py @@ -11,17 +11,7 @@ import abc import itertools import re -from typing import ( - Callable, - Iterable, - Iterator, - List, - Optional, - Set, - Tuple, - TypeVar, - Union, -) +from typing import Callable, Iterable, Iterator, List, Optional, Tuple, TypeVar, Union from .utils import canonicalize_version from .version import Version @@ -383,7 +373,7 @@ def _compare_compatible(self, prospective: Version, spec: str) -> bool: # We want everything but the last item in the version, but we want to # ignore suffix segments. - prefix = ".".join( + prefix = _version_join( list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1] ) @@ -404,13 +394,13 @@ def _compare_equal(self, prospective: Version, spec: str) -> bool: ) # Get the normalized version string ignoring the trailing .* normalized_spec = canonicalize_version(spec[:-2], strip_trailing_zero=False) - # Split the spec out by dots, and pretend that there is an implicit - # dot in between a release segment and a pre-release segment. + # Split the spec out by bangs and dots, and pretend that there is + # an implicit dot in between a release segment and a pre-release segment. split_spec = _version_split(normalized_spec) - # Split the prospective version out by dots, and pretend that there - # is an implicit dot in between a release segment and a pre-release - # segment. + # Split the prospective version out by bangs and dots, and pretend + # that there is an implicit dot in between a release segment and + # a pre-release segment. split_prospective = _version_split(normalized_prospective) # 0-pad the prospective version before shortening it to get the correct @@ -644,8 +634,19 @@ def filter( def _version_split(version: str) -> List[str]: + """Split version into components. + + The split components are intended for version comparison. The logic does + not attempt to retain the original version string, so joining the + components back with :func:`_version_join` may not produce the original + version string. + """ result: List[str] = [] - for item in version.split("."): + + epoch, _, rest = version.rpartition("!") + result.append(epoch or "0") + + for item in rest.split("."): match = _prefix_regex.search(item) if match: result.extend(match.groups()) @@ -654,6 +655,17 @@ def _version_split(version: str) -> List[str]: return result +def _version_join(components: List[str]) -> str: + """Join split version components into a version string. + + This function assumes the input came from :func:`_version_split`, where the + first component must be the epoch (either empty or numeric), and all other + components numeric. + """ + epoch, *rest = components + return f"{epoch}!{'.'.join(rest)}" + + def _is_not_suffix(segment: str) -> bool: return not any( segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post") @@ -675,7 +687,10 @@ def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0]))) right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0]))) - return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split))) + return ( + list(itertools.chain.from_iterable(left_split)), + list(itertools.chain.from_iterable(right_split)), + ) class SpecifierSet(BaseSpecifier): @@ -707,14 +722,8 @@ def __init__( # strip each item to remove leading/trailing whitespace. split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] - # Parsed each individual specifier, attempting first to make it a - # Specifier. - parsed: Set[Specifier] = set() - for specifier in split_specifiers: - parsed.add(Specifier(specifier)) - - # Turn our parsed specifiers into a frozen set and save them for later. - self._specs = frozenset(parsed) + # Make each individual specifier a Specifier and save in a frozen set for later. + self._specs = frozenset(map(Specifier, split_specifiers)) # Store our prereleases value so we can use it later to determine if # we accept prereleases or not. diff --git a/src/pip/_vendor/packaging/tags.py b/src/pip/_vendor/packaging/tags.py index 37f33b1ef84..89f1926137d 100644 --- a/src/pip/_vendor/packaging/tags.py +++ b/src/pip/_vendor/packaging/tags.py @@ -4,6 +4,7 @@ import logging import platform +import re import struct import subprocess import sys @@ -124,20 +125,37 @@ def _normalize_string(string: str) -> str: return string.replace(".", "_").replace("-", "_").replace(" ", "_") -def _abi3_applies(python_version: PythonVersion) -> bool: +def _is_threaded_cpython(abis: List[str]) -> bool: + """ + Determine if the ABI corresponds to a threaded (`--disable-gil`) build. + + The threaded builds are indicated by a "t" in the abiflags. + """ + if len(abis) == 0: + return False + # expect e.g., cp313 + m = re.match(r"cp\d+(.*)", abis[0]) + if not m: + return False + abiflags = m.group(1) + return "t" in abiflags + + +def _abi3_applies(python_version: PythonVersion, threading: bool) -> bool: """ Determine if the Python version supports abi3. - PEP 384 was first implemented in Python 3.2. + PEP 384 was first implemented in Python 3.2. The threaded (`--disable-gil`) + builds do not support abi3. """ - return len(python_version) > 1 and tuple(python_version) >= (3, 2) + return len(python_version) > 1 and tuple(python_version) >= (3, 2) and not threading def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]: py_version = tuple(py_version) # To allow for version comparison. abis = [] version = _version_nodot(py_version[:2]) - debug = pymalloc = ucs4 = "" + threading = debug = pymalloc = ucs4 = "" with_debug = _get_config_var("Py_DEBUG", warn) has_refcount = hasattr(sys, "gettotalrefcount") # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled @@ -146,6 +164,8 @@ def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]: has_ext = "_d.pyd" in EXTENSION_SUFFIXES if with_debug or (with_debug is None and (has_refcount or has_ext)): debug = "d" + if py_version >= (3, 13) and _get_config_var("Py_GIL_DISABLED", warn): + threading = "t" if py_version < (3, 8): with_pymalloc = _get_config_var("WITH_PYMALLOC", warn) if with_pymalloc or with_pymalloc is None: @@ -159,13 +179,8 @@ def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]: elif debug: # Debug builds can also load "normal" extension modules. # We can also assume no UCS-4 or pymalloc requirement. - abis.append(f"cp{version}") - abis.insert( - 0, - "cp{version}{debug}{pymalloc}{ucs4}".format( - version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4 - ), - ) + abis.append(f"cp{version}{threading}") + abis.insert(0, f"cp{version}{threading}{debug}{pymalloc}{ucs4}") return abis @@ -213,11 +228,14 @@ def cpython_tags( for abi in abis: for platform_ in platforms: yield Tag(interpreter, abi, platform_) - if _abi3_applies(python_version): + + threading = _is_threaded_cpython(abis) + use_abi3 = _abi3_applies(python_version, threading) + if use_abi3: yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms) yield from (Tag(interpreter, "none", platform_) for platform_ in platforms) - if _abi3_applies(python_version): + if use_abi3: for minor_version in range(python_version[1] - 1, 1, -1): for platform_ in platforms: interpreter = "cp{version}".format( diff --git a/src/pip/_vendor/vendor.txt b/src/pip/_vendor/vendor.txt index 75eeb1e0658..e433acf8e04 100644 --- a/src/pip/_vendor/vendor.txt +++ b/src/pip/_vendor/vendor.txt @@ -3,7 +3,7 @@ colorama==0.4.6 distlib==0.3.8 distro==1.9.0 msgpack==1.0.8 -packaging==23.2 +packaging==24.0 platformdirs==4.2.1 pyproject-hooks==1.0.0 requests==2.31.0 From 196d536e565d851ad12e85afd134c823290a382c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Sun, 7 Apr 2024 18:56:24 +0200 Subject: [PATCH 095/113] Add news --- news/12063.removal.rst | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 news/12063.removal.rst diff --git a/news/12063.removal.rst b/news/12063.removal.rst new file mode 100644 index 00000000000..03d785d69c5 --- /dev/null +++ b/news/12063.removal.rst @@ -0,0 +1,4 @@ +Remove support for legacy versions and dependency specifiers. Packages with non +standard-compliant versions or dependency specifiers are now ignored by the resolver. +Already installed packages with non standard-compliant versions or dependency specifiers +must be uninstalled before upgrading them. From 83e77bfc66bf54f036bf6554525f40e63918817c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Sat, 13 Apr 2024 18:52:24 +0200 Subject: [PATCH 096/113] Add test sdist that requires invalid version --- .../require-invalid-version/index.html | 1 + tests/data/packages/README.txt | 4 ++++ .../packages/require_invalid_version-1.0.tar.gz | Bin 0 -> 319 bytes 3 files changed, 5 insertions(+) create mode 100644 tests/data/packages/require_invalid_version-1.0.tar.gz diff --git a/tests/data/indexes/require-invalid-version/require-invalid-version/index.html b/tests/data/indexes/require-invalid-version/require-invalid-version/index.html index 66afe3e6daa..4b3300db082 100644 --- a/tests/data/indexes/require-invalid-version/require-invalid-version/index.html +++ b/tests/data/indexes/require-invalid-version/require-invalid-version/index.html @@ -3,5 +3,6 @@ require_invalid_version-0.1-py3-none-any.whl require_invalid_version-1.0-py3-none-any.whl + require_invalid_version-1.0.tar.gz diff --git a/tests/data/packages/README.txt b/tests/data/packages/README.txt index bdf1a4fa22c..067ee98225c 100644 --- a/tests/data/packages/README.txt +++ b/tests/data/packages/README.txt @@ -120,3 +120,7 @@ A valid variant of the require-invalid-version package. require_invalid_version-1.0-py3-none-any.whl -------------------------------------------- This package has an invalid version specifier in its requirements. + +require_invalid_version-1.0.tar.gz +---------------------------------- +This package has an invalid version specifier in its requirements. diff --git a/tests/data/packages/require_invalid_version-1.0.tar.gz b/tests/data/packages/require_invalid_version-1.0.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..9db066988ff29cea6330da97f3a3f1b1dabaf686 GIT binary patch literal 319 zcmb2|=3oE==C@b;y$>5muzi^KS<~tLTF=?b0$%uv2rjSqV04Tln{VRR{#>&Uw`cFP zO5MSi7JB4)x#kxAEe|W_Z~K**8f9yKHA>glF#h#6<5JNN6XsSbC;H7&o2~6H{O#kf zV~?GTW*#Zjx+5b#hqGTTx%R+>(DYF6mEo@*?o2-ae?^(X|J|=oKgcR7{C%}NhiRIV z%$fP>?|*WO@7aAY^-ZAJF~ Date: Sat, 13 Apr 2024 19:09:58 +0200 Subject: [PATCH 097/113] Ignore all candidates of a version when one has invalid metadata We do this to avoid needlessly building a sdist when we have determined that a wheel has invalid metadata. --- .../resolution/resolvelib/factory.py | 2 +- .../resolution/resolvelib/found_candidates.py | 29 +++++++++++++++---- .../test_invalid_versions_and_specifiers.py | 11 ++++++- 3 files changed, 35 insertions(+), 7 deletions(-) diff --git a/src/pip/_internal/resolution/resolvelib/factory.py b/src/pip/_internal/resolution/resolvelib/factory.py index cebdeebad01..1f31d834b04 100644 --- a/src/pip/_internal/resolution/resolvelib/factory.py +++ b/src/pip/_internal/resolution/resolvelib/factory.py @@ -235,7 +235,7 @@ def _make_base_candidate_from_link( name=name, version=version, ) - except (MetadataInconsistent, MetadataInvalid) as e: + except MetadataInconsistent as e: logger.info( "Discarding [blue underline]%s[/]: [yellow]%s[reset]", link, diff --git a/src/pip/_internal/resolution/resolvelib/found_candidates.py b/src/pip/_internal/resolution/resolvelib/found_candidates.py index 8663097b447..a1d57e0f4b2 100644 --- a/src/pip/_internal/resolution/resolvelib/found_candidates.py +++ b/src/pip/_internal/resolution/resolvelib/found_candidates.py @@ -9,13 +9,18 @@ """ import functools +import logging from collections.abc import Sequence from typing import TYPE_CHECKING, Any, Callable, Iterator, Optional, Set, Tuple from pip._vendor.packaging.version import _BaseVersion +from pip._internal.exceptions import MetadataInvalid + from .base import Candidate +logger = logging.getLogger(__name__) + IndexCandidateInfo = Tuple[_BaseVersion, Callable[[], Optional[Candidate]]] if TYPE_CHECKING: @@ -44,11 +49,25 @@ def _iter_built(infos: Iterator[IndexCandidateInfo]) -> Iterator[Candidate]: for version, func in infos: if version in versions_found: continue - candidate = func() - if candidate is None: - continue - yield candidate - versions_found.add(version) + try: + candidate = func() + except MetadataInvalid as e: + logger.warning( + "Ignoring version %s of %s since it has invalid metadata:\n" + "%s\n" + "Please use pip<24.1 if you need to use this version.", + version, + e.ireq.name, + e, + ) + # Mark version as found to avoid trying other candidates with the same + # version, since they most likely have invalid metadata as well. + versions_found.add(version) + else: + if candidate is None: + continue + yield candidate + versions_found.add(version) def _iter_built_with_prepended( diff --git a/tests/functional/test_invalid_versions_and_specifiers.py b/tests/functional/test_invalid_versions_and_specifiers.py index 1044d9e5689..d661b39c0ae 100644 --- a/tests/functional/test_invalid_versions_and_specifiers.py +++ b/tests/functional/test_invalid_versions_and_specifiers.py @@ -26,8 +26,17 @@ def test_install_from_index_with_invalid_specifier( """ index_url = data.index_url("require-invalid-version") result = script.pip( - "install", "--dry-run", "--index-url", index_url, "require-invalid-version" + "install", + "--dry-run", + "--index-url", + index_url, + "require-invalid-version", + allow_stderr_warning=True, ) + assert ( + "WARNING: Ignoring version 1.0 of require-invalid-version " + "since it has invalid metadata" + ) in result.stderr assert "Would install require-invalid-version-0.1" in result.stdout From 350c9803455074defae70c7a3b2384bf173f8d20 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Sat, 13 Apr 2024 19:16:56 +0200 Subject: [PATCH 098/113] Simplify Co-authored-by: Tzu-ping Chung --- src/pip/_internal/commands/list.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pip/_internal/commands/list.py b/src/pip/_internal/commands/list.py index 5653907a1fe..76bd2634934 100644 --- a/src/pip/_internal/commands/list.py +++ b/src/pip/_internal/commands/list.py @@ -333,7 +333,7 @@ def format_for_columns( for proj in pkgs: # if we're working on the 'outdated' list, separate out the # latest_version and type - row = [proj.raw_name, str(proj.version_str)] + row = [proj.raw_name, proj.version_str] if running_outdated: row.append(str(proj.latest_version)) From f4b821c13f95d7ed54a03687de202ab1aa51e86b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Sat, 13 Apr 2024 19:19:59 +0200 Subject: [PATCH 099/113] Don't create a list when an iterator is sufficient --- src/pip/_internal/metadata/pkg_resources.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pip/_internal/metadata/pkg_resources.py b/src/pip/_internal/metadata/pkg_resources.py index c31c5673143..2741d3a6309 100644 --- a/src/pip/_internal/metadata/pkg_resources.py +++ b/src/pip/_internal/metadata/pkg_resources.py @@ -249,7 +249,7 @@ def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requiremen return self._dist.requires(extras) def iter_provided_extras(self) -> Iterable[NormalizedName]: - return list(self._extra_mapping.keys()) + return self._extra_mapping.keys() class Environment(BaseEnvironment): From c50290efe36095d4f46319d1e3f423a1b58397a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Sun, 14 Apr 2024 10:58:51 +0200 Subject: [PATCH 100/113] Rename version_str to raw_version for consistency with raw_name --- src/pip/_internal/commands/list.py | 2 +- src/pip/_internal/commands/show.py | 2 +- src/pip/_internal/metadata/base.py | 6 +++--- src/pip/_internal/metadata/importlib/_dists.py | 2 +- src/pip/_internal/metadata/pkg_resources.py | 2 +- src/pip/_internal/operations/freeze.py | 2 +- src/pip/_internal/req/req_uninstall.py | 2 +- 7 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/pip/_internal/commands/list.py b/src/pip/_internal/commands/list.py index 76bd2634934..cb1e313f468 100644 --- a/src/pip/_internal/commands/list.py +++ b/src/pip/_internal/commands/list.py @@ -333,7 +333,7 @@ def format_for_columns( for proj in pkgs: # if we're working on the 'outdated' list, separate out the # latest_version and type - row = [proj.raw_name, proj.version_str] + row = [proj.raw_name, proj.raw_version] if running_outdated: row.append(str(proj.latest_version)) diff --git a/src/pip/_internal/commands/show.py b/src/pip/_internal/commands/show.py index 80809a3a997..b478da118e0 100644 --- a/src/pip/_internal/commands/show.py +++ b/src/pip/_internal/commands/show.py @@ -139,7 +139,7 @@ def _get_requiring_packages(current_dist: BaseDistribution) -> Iterator[str]: yield _PackageInfo( name=dist.raw_name, - version=dist.version_str, + version=dist.raw_version, location=dist.location or "", editable_project_location=dist.editable_project_location, requires=requires, diff --git a/src/pip/_internal/metadata/base.py b/src/pip/_internal/metadata/base.py index aba6e3a3a9f..2652333e86b 100644 --- a/src/pip/_internal/metadata/base.py +++ b/src/pip/_internal/metadata/base.py @@ -138,10 +138,10 @@ def from_wheel(cls, wheel: "Wheel", name: str) -> "BaseDistribution": raise NotImplementedError() def __repr__(self) -> str: - return f"{self.raw_name} {self.version_str} ({self.location})" + return f"{self.raw_name} {self.raw_version} ({self.location})" def __str__(self) -> str: - return f"{self.raw_name} {self.version_str}" + return f"{self.raw_name} {self.raw_version}" @property def location(self) -> Optional[str]: @@ -276,7 +276,7 @@ def version(self) -> Version: raise NotImplementedError() @property - def version_str(self) -> str: + def raw_version(self) -> str: raise NotImplementedError() @property diff --git a/src/pip/_internal/metadata/importlib/_dists.py b/src/pip/_internal/metadata/importlib/_dists.py index 2fa32f6877b..f65ccb1e706 100644 --- a/src/pip/_internal/metadata/importlib/_dists.py +++ b/src/pip/_internal/metadata/importlib/_dists.py @@ -175,7 +175,7 @@ def version(self) -> Version: return parse_version(self._dist.version) @property - def version_str(self) -> str: + def raw_version(self) -> str: return self._dist.version def is_file(self, path: InfoPath) -> bool: diff --git a/src/pip/_internal/metadata/pkg_resources.py b/src/pip/_internal/metadata/pkg_resources.py index 2741d3a6309..235556781c6 100644 --- a/src/pip/_internal/metadata/pkg_resources.py +++ b/src/pip/_internal/metadata/pkg_resources.py @@ -194,7 +194,7 @@ def version(self) -> Version: return parse_version(self._dist.version) @property - def version_str(self) -> str: + def raw_version(self) -> str: return self._dist.version def is_file(self, path: InfoPath) -> bool: diff --git a/src/pip/_internal/operations/freeze.py b/src/pip/_internal/operations/freeze.py index de5563ba7d6..bb1039fb776 100644 --- a/src/pip/_internal/operations/freeze.py +++ b/src/pip/_internal/operations/freeze.py @@ -149,7 +149,7 @@ def _format_as_name_version(dist: BaseDistribution) -> str: dist_version = dist.version except InvalidVersion: # legacy version - return f"{dist.raw_name}==={dist.version_str}" + return f"{dist.raw_name}==={dist.raw_version}" else: return f"{dist.raw_name}=={dist_version}" diff --git a/src/pip/_internal/req/req_uninstall.py b/src/pip/_internal/req/req_uninstall.py index f3e49f98709..51ee01171ae 100644 --- a/src/pip/_internal/req/req_uninstall.py +++ b/src/pip/_internal/req/req_uninstall.py @@ -367,7 +367,7 @@ def remove(self, auto_confirm: bool = False, verbose: bool = False) -> None: ) return - dist_name_version = f"{self._dist.raw_name}-{self._dist.version_str}" + dist_name_version = f"{self._dist.raw_name}-{self._dist.raw_version}" logger.info("Uninstalling %s:", dist_name_version) with indent_log(): From 00edcf4e87d3aa8ce265f011e917bba60bc963db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Sun, 21 Apr 2024 19:04:10 +0200 Subject: [PATCH 101/113] Add failing test for pip show of dist with legacy specifier --- .../test_invalid_versions_and_specifiers.py | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/tests/functional/test_invalid_versions_and_specifiers.py b/tests/functional/test_invalid_versions_and_specifiers.py index d661b39c0ae..69a8afddaa2 100644 --- a/tests/functional/test_invalid_versions_and_specifiers.py +++ b/tests/functional/test_invalid_versions_and_specifiers.py @@ -50,6 +50,18 @@ def _install_invalid_version(script: PipTestEnvironment, data: TestData) -> None zf.extractall(script.site_packages_path) +def _install_require_invalid_version( + script: PipTestEnvironment, data: TestData +) -> None: + """ + Install a package with an invalid version. + """ + with zipfile.ZipFile( + data.packages.joinpath("require_invalid_version-1.0-py3-none-any.whl") + ) as zf: + zf.extractall(script.site_packages_path) + + def test_uninstall_invalid_version(script: PipTestEnvironment, data: TestData) -> None: """ Test that it is possible to uninstall a package with an invalid version. @@ -82,3 +94,14 @@ def test_show_invalid_version(script: PipTestEnvironment, data: TestData) -> Non _install_invalid_version(script, data) result = script.pip("show", "invalid-version") assert "Name: invalid-version\nVersion: 2010i\n" in result.stdout + + +def test_show_require_invalid_version( + script: PipTestEnvironment, data: TestData +) -> None: + """ + Test that pip can show an installed distribution with a legacy specifier. + """ + _install_require_invalid_version(script, data) + result = script.pip("show", "require-invalid-version") + assert "Name: require-invalid-version\nVersion: 1.0\n" in result.stdout From f24fa663d2c307fecdb6001351b1a11d58890c42 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Sun, 21 Apr 2024 19:13:40 +0200 Subject: [PATCH 102/113] Add iterator of raw dependencie (Requires-Dist) to BaseDistribution --- src/pip/_internal/metadata/base.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/pip/_internal/metadata/base.py b/src/pip/_internal/metadata/base.py index 2652333e86b..9eabcdb278b 100644 --- a/src/pip/_internal/metadata/base.py +++ b/src/pip/_internal/metadata/base.py @@ -445,6 +445,10 @@ def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requiremen """ raise NotImplementedError() + def iter_raw_dependencies(self) -> Iterable[str]: + """Raw Requires-Dist metadata.""" + return self.metadata.get_all("Requires-Dist", []) + def iter_provided_extras(self) -> Iterable[NormalizedName]: """Extras provided by this distribution. From d85efceb37f960479fcdf6c37f10bbca5f31b2c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Sun, 21 Apr 2024 19:14:09 +0200 Subject: [PATCH 103/113] Fix pip show of dist with legacy dependency specifiers --- src/pip/_internal/commands/show.py | 20 +++++++++++++------ .../test_invalid_versions_and_specifiers.py | 2 ++ 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/src/pip/_internal/commands/show.py b/src/pip/_internal/commands/show.py index b478da118e0..c54d548f5fb 100644 --- a/src/pip/_internal/commands/show.py +++ b/src/pip/_internal/commands/show.py @@ -2,6 +2,7 @@ from optparse import Values from typing import Generator, Iterable, Iterator, List, NamedTuple, Optional +from pip._vendor.packaging.requirements import InvalidRequirement from pip._vendor.packaging.utils import canonicalize_name from pip._internal.cli.base_command import Command @@ -100,12 +101,19 @@ def _get_requiring_packages(current_dist: BaseDistribution) -> Iterator[str]: except KeyError: continue - requires = sorted( - # Avoid duplicates in requirements (e.g. due to environment markers). - {req.name for req in dist.iter_dependencies()}, - key=str.lower, - ) - required_by = sorted(_get_requiring_packages(dist), key=str.lower) + try: + requires = sorted( + # Avoid duplicates in requirements (e.g. due to environment markers). + {req.name for req in dist.iter_dependencies()}, + key=str.lower, + ) + except InvalidRequirement: + requires = sorted(dist.iter_raw_dependencies(), key=str.lower) + + try: + required_by = sorted(_get_requiring_packages(dist), key=str.lower) + except InvalidRequirement: + required_by = ["#N/A"] try: entry_points_text = dist.read_text("entry_points.txt") diff --git a/tests/functional/test_invalid_versions_and_specifiers.py b/tests/functional/test_invalid_versions_and_specifiers.py index 69a8afddaa2..9a4615b9e49 100644 --- a/tests/functional/test_invalid_versions_and_specifiers.py +++ b/tests/functional/test_invalid_versions_and_specifiers.py @@ -105,3 +105,5 @@ def test_show_require_invalid_version( _install_require_invalid_version(script, data) result = script.pip("show", "require-invalid-version") assert "Name: require-invalid-version\nVersion: 1.0\n" in result.stdout + assert "Requires: invalid-version ==2010i\n" in result.stdout + assert "Required-by: #N/A\n" in result.stdout From 4052f6a6700bc20d7bfd657baed044aaf3870292 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Mon, 22 Apr 2024 08:31:32 +0200 Subject: [PATCH 104/113] Clarify warning messages Dependencies should be more precise than requirements; --- src/pip/_internal/operations/check.py | 2 +- tests/functional/test_check.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pip/_internal/operations/check.py b/src/pip/_internal/operations/check.py index ee1a5bcfe06..623db76e229 100644 --- a/src/pip/_internal/operations/check.py +++ b/src/pip/_internal/operations/check.py @@ -43,7 +43,7 @@ def create_package_set_from_installed() -> Tuple[PackageSet, bool]: package_set[name] = PackageDetails(dist.version, dependencies) except (OSError, ValueError) as e: # Don't crash on unreadable or broken metadata. - logger.warning("Error parsing requirements for %s: %s", name, e) + logger.warning("Error parsing dependencies of %s: %s", name, e) problems = True return package_set, problems diff --git a/tests/functional/test_check.py b/tests/functional/test_check.py index 79b6df39c19..46ecdcc6457 100644 --- a/tests/functional/test_check.py +++ b/tests/functional/test_check.py @@ -272,7 +272,7 @@ def test_basic_check_broken_metadata(script: PipTestEnvironment) -> None: result = script.pip("check", expect_error=True) - assert "Error parsing requirements" in result.stderr + assert "Error parsing dependencies of" in result.stderr assert result.returncode == 1 From edc98cd174261de66954301e0998343d59fed9ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Sun, 28 Apr 2024 13:42:04 +0200 Subject: [PATCH 105/113] Clarify test packages README --- tests/data/packages/README.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/data/packages/README.txt b/tests/data/packages/README.txt index 067ee98225c..b957f158a3b 100644 --- a/tests/data/packages/README.txt +++ b/tests/data/packages/README.txt @@ -111,7 +111,8 @@ The invalid-version package with a legacy version. invalid_version-1.0-py3-none-any.whl ------------------------------------ -A valid variant of the invalid-version package. +A valid variant of the invalid-version package. Used, for instance, to test that the +version with invalid metadata is ignored in favor of this one. require_invalid_version-0.1-py3-none-any.whl -------------------------------------------- From 4bb5085384de20da5769a8204cc45cd28b8cc91e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Sun, 28 Apr 2024 19:11:30 +0200 Subject: [PATCH 106/113] Add a failing test for upgrading a distribution with an invalid version --- .../test_invalid_versions_and_specifiers.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/tests/functional/test_invalid_versions_and_specifiers.py b/tests/functional/test_invalid_versions_and_specifiers.py index 9a4615b9e49..1d924d5d02f 100644 --- a/tests/functional/test_invalid_versions_and_specifiers.py +++ b/tests/functional/test_invalid_versions_and_specifiers.py @@ -1,5 +1,7 @@ import zipfile +import pytest + from tests.lib import PipTestEnvironment, TestData @@ -64,12 +66,22 @@ def _install_require_invalid_version( def test_uninstall_invalid_version(script: PipTestEnvironment, data: TestData) -> None: """ - Test that it is possible to uninstall a package with an invalid version. + Test that it is possible to uninstall a distribution with an invalid version. """ _install_invalid_version(script, data) script.pip("uninstall", "-y", "invalid-version") +@pytest.mark.xfail +def test_upgrade_invalid_version(script: PipTestEnvironment, data: TestData) -> None: + """ + Test that it is possible to upgrade a distribution with an invalid version. + """ + _install_invalid_version(script, data) + index_url = data.index_url("invalid-version") + script.pip("install", "--index-url", index_url, "invalid-version") + + def test_list_invalid_version(script: PipTestEnvironment, data: TestData) -> None: """ Test that pip can list an environment containing a package with a legacy version. From c51fb4f9556b52194229502a801f7601c856d9af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Thu, 2 May 2024 17:24:40 +0200 Subject: [PATCH 107/113] Add a failing test for upgrading a distribution with invalid metadata --- .../test_invalid_versions_and_specifiers.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tests/functional/test_invalid_versions_and_specifiers.py b/tests/functional/test_invalid_versions_and_specifiers.py index 1d924d5d02f..f48b92be71e 100644 --- a/tests/functional/test_invalid_versions_and_specifiers.py +++ b/tests/functional/test_invalid_versions_and_specifiers.py @@ -82,6 +82,18 @@ def test_upgrade_invalid_version(script: PipTestEnvironment, data: TestData) -> script.pip("install", "--index-url", index_url, "invalid-version") +@pytest.mark.xfail +def test_upgrade_require_invalid_version( + script: PipTestEnvironment, data: TestData +) -> None: + """ + Test that it is possible to upgrade a distribution with an invalid metadata. + """ + _install_require_invalid_version(script, data) + index_url = data.index_url("require-invalid-version") + script.pip("install", "--index-url", index_url, "require-invalid-version") + + def test_list_invalid_version(script: PipTestEnvironment, data: TestData) -> None: """ Test that pip can list an environment containing a package with a legacy version. From 84ad55a7807060a3bfe2c9d4857d541439959945 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Thu, 2 May 2024 18:48:47 +0200 Subject: [PATCH 108/113] Fix test_show_require_invalid_version with pkg_resources backend The pkg_ressources metadata backend does not suffer from invalid metadata in this test case. --- tests/functional/test_invalid_versions_and_specifiers.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/functional/test_invalid_versions_and_specifiers.py b/tests/functional/test_invalid_versions_and_specifiers.py index f48b92be71e..21ce02085fc 100644 --- a/tests/functional/test_invalid_versions_and_specifiers.py +++ b/tests/functional/test_invalid_versions_and_specifiers.py @@ -2,6 +2,7 @@ import pytest +from pip._internal.metadata import select_backend from tests.lib import PipTestEnvironment, TestData @@ -130,4 +131,9 @@ def test_show_require_invalid_version( result = script.pip("show", "require-invalid-version") assert "Name: require-invalid-version\nVersion: 1.0\n" in result.stdout assert "Requires: invalid-version ==2010i\n" in result.stdout - assert "Required-by: #N/A\n" in result.stdout + if select_backend().NAME == "importlib": + assert "Required-by: #N/A\n" in result.stdout + elif select_backend().NAME == "pkg_resources": + assert "Required-by: \n" in result.stdout + else: + assert False, "Unknown metadata backend" From 50ada46ed487c5a3385aac82090c18f1cd0ea2a7 Mon Sep 17 00:00:00 2001 From: Richard Si Date: Wed, 17 Apr 2024 20:11:42 -0400 Subject: [PATCH 109/113] Move warn_if_run_as_root() to utils While it's reasonable to put this function in req_install.py, putting it there unfortunately means importing a bunch of heavy network/index code as well which isn't acceptable. --- src/pip/_internal/cli/req_command.py | 34 ------------------------- src/pip/_internal/commands/install.py | 2 +- src/pip/_internal/commands/uninstall.py | 3 ++- src/pip/_internal/utils/misc.py | 33 ++++++++++++++++++++++++ 4 files changed, 36 insertions(+), 36 deletions(-) diff --git a/src/pip/_internal/cli/req_command.py b/src/pip/_internal/cli/req_command.py index 7a5580a8457..8acffb241ad 100644 --- a/src/pip/_internal/cli/req_command.py +++ b/src/pip/_internal/cli/req_command.py @@ -39,7 +39,6 @@ TempDirectoryTypeRegistry, tempdir_kinds, ) -from pip._internal.utils.virtualenv import running_under_virtualenv if TYPE_CHECKING: from ssl import SSLContext @@ -194,39 +193,6 @@ def handle_pip_version_check(self, options: Values) -> None: ] -def warn_if_run_as_root() -> None: - """Output a warning for sudo users on Unix. - - In a virtual environment, sudo pip still writes to virtualenv. - On Windows, users may run pip as Administrator without issues. - This warning only applies to Unix root users outside of virtualenv. - """ - if running_under_virtualenv(): - return - if not hasattr(os, "getuid"): - return - # On Windows, there are no "system managed" Python packages. Installing as - # Administrator via pip is the correct way of updating system environments. - # - # We choose sys.platform over utils.compat.WINDOWS here to enable Mypy platform - # checks: https://mypy.readthedocs.io/en/stable/common_issues.html - if sys.platform == "win32" or sys.platform == "cygwin": - return - - if os.getuid() != 0: - return - - logger.warning( - "Running pip as the 'root' user can result in broken permissions and " - "conflicting behaviour with the system package manager, possibly " - "rendering your system unusable." - "It is recommended to use a virtual environment instead: " - "https://pip.pypa.io/warnings/venv. " - "Use the --root-user-action option if you know what you are doing and " - "want to suppress this warning." - ) - - def with_cleanup(func: Any) -> Any: """Decorator for common logic related to managing temporary directories. diff --git a/src/pip/_internal/commands/install.py b/src/pip/_internal/commands/install.py index a9e83f9d8cd..29276c243cc 100644 --- a/src/pip/_internal/commands/install.py +++ b/src/pip/_internal/commands/install.py @@ -14,7 +14,6 @@ from pip._internal.cli.cmdoptions import make_target_python from pip._internal.cli.req_command import ( RequirementCommand, - warn_if_run_as_root, with_cleanup, ) from pip._internal.cli.status_codes import ERROR, SUCCESS @@ -37,6 +36,7 @@ ensure_dir, get_pip_version, protect_pip_from_modification_on_windows, + warn_if_run_as_root, write_output, ) from pip._internal.utils.temp_dir import TempDirectory diff --git a/src/pip/_internal/commands/uninstall.py b/src/pip/_internal/commands/uninstall.py index f198fc313ff..32a0aa8a044 100644 --- a/src/pip/_internal/commands/uninstall.py +++ b/src/pip/_internal/commands/uninstall.py @@ -6,7 +6,7 @@ from pip._internal.cli import cmdoptions from pip._internal.cli.base_command import Command -from pip._internal.cli.req_command import SessionCommandMixin, warn_if_run_as_root +from pip._internal.cli.req_command import SessionCommandMixin from pip._internal.cli.status_codes import SUCCESS from pip._internal.exceptions import InstallationError from pip._internal.req import parse_requirements @@ -17,6 +17,7 @@ from pip._internal.utils.misc import ( check_externally_managed, protect_pip_from_modification_on_windows, + warn_if_run_as_root, ) logger = logging.getLogger(__name__) diff --git a/src/pip/_internal/utils/misc.py b/src/pip/_internal/utils/misc.py index 82b6261322d..48771c09919 100644 --- a/src/pip/_internal/utils/misc.py +++ b/src/pip/_internal/utils/misc.py @@ -745,3 +745,36 @@ def prepare_metadata_for_build_editable( config_settings=cs, _allow_fallback=_allow_fallback, ) + + +def warn_if_run_as_root() -> None: + """Output a warning for sudo users on Unix. + + In a virtual environment, sudo pip still writes to virtualenv. + On Windows, users may run pip as Administrator without issues. + This warning only applies to Unix root users outside of virtualenv. + """ + if running_under_virtualenv(): + return + if not hasattr(os, "getuid"): + return + # On Windows, there are no "system managed" Python packages. Installing as + # Administrator via pip is the correct way of updating system environments. + # + # We choose sys.platform over utils.compat.WINDOWS here to enable Mypy platform + # checks: https://mypy.readthedocs.io/en/stable/common_issues.html + if sys.platform == "win32" or sys.platform == "cygwin": + return + + if os.getuid() != 0: + return + + logger.warning( + "Running pip as the 'root' user can result in broken permissions and " + "conflicting behaviour with the system package manager, possibly " + "rendering your system unusable." + "It is recommended to use a virtual environment instead: " + "https://pip.pypa.io/warnings/venv. " + "Use the --root-user-action option if you know what you are doing and " + "want to suppress this warning." + ) From 55cfa546c8193bc419335c424ee2dcddd767f605 Mon Sep 17 00:00:00 2001 From: Richard Si Date: Wed, 17 Apr 2024 20:59:57 -0400 Subject: [PATCH 110/113] Move non requirement command classes out of req_command.py By moving SessionCommandMixin and IndexGroupCommand into their own module, lazy imports can be easily used. --- src/pip/_internal/cli/index_command.py | 172 +++++++++++++++++++++++++ src/pip/_internal/cli/req_command.py | 158 +---------------------- src/pip/_internal/commands/list.py | 2 +- tests/unit/test_base_command.py | 2 +- tests/unit/test_commands.py | 2 +- 5 files changed, 181 insertions(+), 155 deletions(-) create mode 100644 src/pip/_internal/cli/index_command.py diff --git a/src/pip/_internal/cli/index_command.py b/src/pip/_internal/cli/index_command.py new file mode 100644 index 00000000000..4ff7b2c3a5d --- /dev/null +++ b/src/pip/_internal/cli/index_command.py @@ -0,0 +1,172 @@ +""" +Contains command classes which may interact with an index / the network. + +Unlike its sister module, req_command, this module still uses lazy imports +so commands which don't always hit the network (e.g. list w/o --outdated or +--uptodate) don't need waste time importing PipSession and friends. +""" + +import logging +import os +import sys +from optparse import Values +from typing import TYPE_CHECKING, List, Optional + +from pip._internal.cli.base_command import Command +from pip._internal.cli.command_context import CommandContextMixIn +from pip._internal.exceptions import CommandError + +if TYPE_CHECKING: + from ssl import SSLContext + + from pip._internal.network.session import PipSession + +logger = logging.getLogger(__name__) + + +def _create_truststore_ssl_context() -> Optional["SSLContext"]: + if sys.version_info < (3, 10): + raise CommandError("The truststore feature is only available for Python 3.10+") + + try: + import ssl + except ImportError: + logger.warning("Disabling truststore since ssl support is missing") + return None + + try: + from pip._vendor import truststore + except ImportError as e: + raise CommandError(f"The truststore feature is unavailable: {e}") + + return truststore.SSLContext(ssl.PROTOCOL_TLS_CLIENT) + + +class SessionCommandMixin(CommandContextMixIn): + """ + A class mixin for command classes needing _build_session(). + """ + + def __init__(self) -> None: + super().__init__() + self._session: Optional["PipSession"] = None + + @classmethod + def _get_index_urls(cls, options: Values) -> Optional[List[str]]: + """Return a list of index urls from user-provided options.""" + index_urls = [] + if not getattr(options, "no_index", False): + url = getattr(options, "index_url", None) + if url: + index_urls.append(url) + urls = getattr(options, "extra_index_urls", None) + if urls: + index_urls.extend(urls) + # Return None rather than an empty list + return index_urls or None + + def get_default_session(self, options: Values) -> "PipSession": + """Get a default-managed session.""" + if self._session is None: + self._session = self.enter_context(self._build_session(options)) + # there's no type annotation on requests.Session, so it's + # automatically ContextManager[Any] and self._session becomes Any, + # then https://github.com/python/mypy/issues/7696 kicks in + assert self._session is not None + return self._session + + def _build_session( + self, + options: Values, + retries: Optional[int] = None, + timeout: Optional[int] = None, + fallback_to_certifi: bool = False, + ) -> "PipSession": + from pip._internal.network.session import PipSession + + cache_dir = options.cache_dir + assert not cache_dir or os.path.isabs(cache_dir) + + if "truststore" in options.features_enabled: + try: + ssl_context = _create_truststore_ssl_context() + except Exception: + if not fallback_to_certifi: + raise + ssl_context = None + else: + ssl_context = None + + session = PipSession( + cache=os.path.join(cache_dir, "http-v2") if cache_dir else None, + retries=retries if retries is not None else options.retries, + trusted_hosts=options.trusted_hosts, + index_urls=self._get_index_urls(options), + ssl_context=ssl_context, + ) + + # Handle custom ca-bundles from the user + if options.cert: + session.verify = options.cert + + # Handle SSL client certificate + if options.client_cert: + session.cert = options.client_cert + + # Handle timeouts + if options.timeout or timeout: + session.timeout = timeout if timeout is not None else options.timeout + + # Handle configured proxies + if options.proxy: + session.proxies = { + "http": options.proxy, + "https": options.proxy, + } + session.trust_env = False + + # Determine if we can prompt the user for authentication or not + session.auth.prompting = not options.no_input + session.auth.keyring_provider = options.keyring_provider + + return session + + +def _pip_self_version_check(session: "PipSession", options: Values) -> None: + from pip._internal.self_outdated_check import pip_self_version_check as check + + check(session, options) + + +class IndexGroupCommand(Command, SessionCommandMixin): + """ + Abstract base class for commands with the index_group options. + + This also corresponds to the commands that permit the pip version check. + """ + + def handle_pip_version_check(self, options: Values) -> None: + """ + Do the pip version check if not disabled. + + This overrides the default behavior of not doing the check. + """ + # Make sure the index_group options are present. + assert hasattr(options, "no_index") + + if options.disable_pip_version_check or options.no_index: + return + + # Otherwise, check if we're using the latest version of pip available. + session = self._build_session( + options, + retries=0, + timeout=min(5, options.timeout), + # This is set to ensure the function does not fail when truststore is + # specified in use-feature but cannot be loaded. This usually raises a + # CommandError and shows a nice user-facing error, but this function is not + # called in that try-except block. + fallback_to_certifi=True, + ) + with session: + _pip_self_version_check(session, options) diff --git a/src/pip/_internal/cli/req_command.py b/src/pip/_internal/cli/req_command.py index 8acffb241ad..92900f94ff4 100644 --- a/src/pip/_internal/cli/req_command.py +++ b/src/pip/_internal/cli/req_command.py @@ -1,21 +1,19 @@ -"""Contains the Command base classes that depend on PipSession. +"""Contains the RequirementCommand base class. -The classes in this module are in a separate module so the commands not -needing download / PackageFinder capability don't unnecessarily import the +This class is in a separate module so the commands that do not always +need PackageFinder capability don't unnecessarily import the PackageFinder machinery and all its vendored dependencies, etc. """ import logging -import os -import sys from functools import partial from optparse import Values -from typing import TYPE_CHECKING, Any, List, Optional, Tuple +from typing import Any, List, Optional, Tuple from pip._internal.cache import WheelCache from pip._internal.cli import cmdoptions -from pip._internal.cli.base_command import Command -from pip._internal.cli.command_context import CommandContextMixIn +from pip._internal.cli.index_command import IndexGroupCommand +from pip._internal.cli.index_command import SessionCommandMixin as SessionCommandMixin from pip._internal.exceptions import CommandError, PreviousBuildDirError from pip._internal.index.collector import LinkCollector from pip._internal.index.package_finder import PackageFinder @@ -33,159 +31,15 @@ from pip._internal.req.req_file import parse_requirements from pip._internal.req.req_install import InstallRequirement from pip._internal.resolution.base import BaseResolver -from pip._internal.self_outdated_check import pip_self_version_check from pip._internal.utils.temp_dir import ( TempDirectory, TempDirectoryTypeRegistry, tempdir_kinds, ) -if TYPE_CHECKING: - from ssl import SSLContext - logger = logging.getLogger(__name__) -def _create_truststore_ssl_context() -> Optional["SSLContext"]: - if sys.version_info < (3, 10): - raise CommandError("The truststore feature is only available for Python 3.10+") - - try: - import ssl - except ImportError: - logger.warning("Disabling truststore since ssl support is missing") - return None - - try: - from pip._vendor import truststore - except ImportError as e: - raise CommandError(f"The truststore feature is unavailable: {e}") - - return truststore.SSLContext(ssl.PROTOCOL_TLS_CLIENT) - - -class SessionCommandMixin(CommandContextMixIn): - """ - A class mixin for command classes needing _build_session(). - """ - - def __init__(self) -> None: - super().__init__() - self._session: Optional[PipSession] = None - - @classmethod - def _get_index_urls(cls, options: Values) -> Optional[List[str]]: - """Return a list of index urls from user-provided options.""" - index_urls = [] - if not getattr(options, "no_index", False): - url = getattr(options, "index_url", None) - if url: - index_urls.append(url) - urls = getattr(options, "extra_index_urls", None) - if urls: - index_urls.extend(urls) - # Return None rather than an empty list - return index_urls or None - - def get_default_session(self, options: Values) -> PipSession: - """Get a default-managed session.""" - if self._session is None: - self._session = self.enter_context(self._build_session(options)) - # there's no type annotation on requests.Session, so it's - # automatically ContextManager[Any] and self._session becomes Any, - # then https://github.com/python/mypy/issues/7696 kicks in - assert self._session is not None - return self._session - - def _build_session( - self, - options: Values, - retries: Optional[int] = None, - timeout: Optional[int] = None, - fallback_to_certifi: bool = False, - ) -> PipSession: - cache_dir = options.cache_dir - assert not cache_dir or os.path.isabs(cache_dir) - - if "truststore" in options.features_enabled: - try: - ssl_context = _create_truststore_ssl_context() - except Exception: - if not fallback_to_certifi: - raise - ssl_context = None - else: - ssl_context = None - - session = PipSession( - cache=os.path.join(cache_dir, "http-v2") if cache_dir else None, - retries=retries if retries is not None else options.retries, - trusted_hosts=options.trusted_hosts, - index_urls=self._get_index_urls(options), - ssl_context=ssl_context, - ) - - # Handle custom ca-bundles from the user - if options.cert: - session.verify = options.cert - - # Handle SSL client certificate - if options.client_cert: - session.cert = options.client_cert - - # Handle timeouts - if options.timeout or timeout: - session.timeout = timeout if timeout is not None else options.timeout - - # Handle configured proxies - if options.proxy: - session.proxies = { - "http": options.proxy, - "https": options.proxy, - } - session.trust_env = False - - # Determine if we can prompt the user for authentication or not - session.auth.prompting = not options.no_input - session.auth.keyring_provider = options.keyring_provider - - return session - - -class IndexGroupCommand(Command, SessionCommandMixin): - """ - Abstract base class for commands with the index_group options. - - This also corresponds to the commands that permit the pip version check. - """ - - def handle_pip_version_check(self, options: Values) -> None: - """ - Do the pip version check if not disabled. - - This overrides the default behavior of not doing the check. - """ - # Make sure the index_group options are present. - assert hasattr(options, "no_index") - - if options.disable_pip_version_check or options.no_index: - return - - # Otherwise, check if we're using the latest version of pip available. - session = self._build_session( - options, - retries=0, - timeout=min(5, options.timeout), - # This is set to ensure the function does not fail when truststore is - # specified in use-feature but cannot be loaded. This usually raises a - # CommandError and shows a nice user-facing error, but this function is not - # called in that try-except block. - fallback_to_certifi=True, - ) - with session: - pip_self_version_check(session, options) - - KEEPABLE_TEMPDIR_TYPES = [ tempdir_kinds.BUILD_ENV, tempdir_kinds.EPHEM_WHEEL_CACHE, diff --git a/src/pip/_internal/commands/list.py b/src/pip/_internal/commands/list.py index cb1e313f468..2da5e2d5204 100644 --- a/src/pip/_internal/commands/list.py +++ b/src/pip/_internal/commands/list.py @@ -7,7 +7,7 @@ from pip._vendor.packaging.version import Version from pip._internal.cli import cmdoptions -from pip._internal.cli.req_command import IndexGroupCommand +from pip._internal.cli.index_command import IndexGroupCommand from pip._internal.cli.status_codes import SUCCESS from pip._internal.exceptions import CommandError from pip._internal.index.collector import LinkCollector diff --git a/tests/unit/test_base_command.py b/tests/unit/test_base_command.py index 44dae384a75..80c93799d94 100644 --- a/tests/unit/test_base_command.py +++ b/tests/unit/test_base_command.py @@ -93,7 +93,7 @@ def test_raise_broken_stdout__debug_logging( assert "Traceback (most recent call last):" in stderr -@patch("pip._internal.cli.req_command.Command.handle_pip_version_check") +@patch("pip._internal.cli.index_command.Command.handle_pip_version_check") def test_handle_pip_version_check_called(mock_handle_version_check: Mock) -> None: """ Check that Command.handle_pip_version_check() is called. diff --git a/tests/unit/test_commands.py b/tests/unit/test_commands.py index 4f6366513d7..9dfb71a4d1d 100644 --- a/tests/unit/test_commands.py +++ b/tests/unit/test_commands.py @@ -87,7 +87,7 @@ def has_option_no_index(command: Command) -> bool: (True, True, False), ], ) -@mock.patch("pip._internal.cli.req_command.pip_self_version_check") +@mock.patch("pip._internal.cli.index_command._pip_self_version_check") def test_index_group_handle_pip_version_check( mock_version_check: mock.Mock, command_name: str, From 1071614e5c579266fdc0d031824595e87d1469a0 Mon Sep 17 00:00:00 2001 From: Richard Si Date: Wed, 17 Apr 2024 21:10:51 -0400 Subject: [PATCH 111/113] Avoid heavy index imports for pip uninstall & list pip uninstall and list currently depend on req_install.py which always imports the expensive network and index machinery. However, it's only in rare situations that these commands actually hit the network: - `pip list --outdated` - `pip list --uptodate` - `pip uninstall --requirement ` This commit builds on the previous two refactoring commits and modifies these commands to avoid the expensive imports unless truly necessary. --- ...24ca5f01-e27e-41b1-9a9c-7e3a828e22d9.trivial.rst | 10 ++++++++++ src/pip/_internal/commands/list.py | 13 ++++++++----- src/pip/_internal/commands/uninstall.py | 2 +- tests/functional/test_cli.py | 3 +-- tests/unit/test_commands.py | 2 +- 5 files changed, 21 insertions(+), 9 deletions(-) create mode 100644 news/24ca5f01-e27e-41b1-9a9c-7e3a828e22d9.trivial.rst diff --git a/news/24ca5f01-e27e-41b1-9a9c-7e3a828e22d9.trivial.rst b/news/24ca5f01-e27e-41b1-9a9c-7e3a828e22d9.trivial.rst new file mode 100644 index 00000000000..1be25395635 --- /dev/null +++ b/news/24ca5f01-e27e-41b1-9a9c-7e3a828e22d9.trivial.rst @@ -0,0 +1,10 @@ +pip uninstall and list currently depend on req_install.py which always imports +the expensive network and index machinery. However, it's only in rare situations +that these commands actually hit the network: + +- ``pip list --outdated`` +- ``pip list --uptodate`` +- ``pip uninstall --requirement `` + +This patch refactors req_install.py so these commands can avoid the expensive +imports unless truly necessary. diff --git a/src/pip/_internal/commands/list.py b/src/pip/_internal/commands/list.py index 2da5e2d5204..82fc46a118f 100644 --- a/src/pip/_internal/commands/list.py +++ b/src/pip/_internal/commands/list.py @@ -10,15 +10,14 @@ from pip._internal.cli.index_command import IndexGroupCommand from pip._internal.cli.status_codes import SUCCESS from pip._internal.exceptions import CommandError -from pip._internal.index.collector import LinkCollector -from pip._internal.index.package_finder import PackageFinder from pip._internal.metadata import BaseDistribution, get_environment from pip._internal.models.selection_prefs import SelectionPreferences -from pip._internal.network.session import PipSession from pip._internal.utils.compat import stdlib_pkgs from pip._internal.utils.misc import tabulate, write_output if TYPE_CHECKING: + from pip._internal.index.package_finder import PackageFinder + from pip._internal.network.session import PipSession class _DistWithLatestInfo(BaseDistribution): """Give the distribution object a couple of extra fields. @@ -140,11 +139,15 @@ def handle_pip_version_check(self, options: Values) -> None: super().handle_pip_version_check(options) def _build_package_finder( - self, options: Values, session: PipSession - ) -> PackageFinder: + self, options: Values, session: "PipSession" + ) -> "PackageFinder": """ Create a package finder appropriate to this list command. """ + # Lazy import the heavy index modules as most list invocations won't need 'em. + from pip._internal.index.collector import LinkCollector + from pip._internal.index.package_finder import PackageFinder + link_collector = LinkCollector.create(session, options=options) # Pass allow_yanked=False to ignore yanked versions. diff --git a/src/pip/_internal/commands/uninstall.py b/src/pip/_internal/commands/uninstall.py index 32a0aa8a044..bc0edeac9fb 100644 --- a/src/pip/_internal/commands/uninstall.py +++ b/src/pip/_internal/commands/uninstall.py @@ -6,7 +6,7 @@ from pip._internal.cli import cmdoptions from pip._internal.cli.base_command import Command -from pip._internal.cli.req_command import SessionCommandMixin +from pip._internal.cli.index_command import SessionCommandMixin from pip._internal.cli.status_codes import SUCCESS from pip._internal.exceptions import InstallationError from pip._internal.req import parse_requirements diff --git a/tests/functional/test_cli.py b/tests/functional/test_cli.py index 3c166152111..e1ccf04ea1c 100644 --- a/tests/functional/test_cli.py +++ b/tests/functional/test_cli.py @@ -57,8 +57,7 @@ def test_entrypoints_work(entrypoint: str, script: PipTestEnvironment) -> None: sorted( set(commands_dict).symmetric_difference( # Exclude commands that are expected to use the network. - # TODO: uninstall and list should only import network modules as needed - {"install", "uninstall", "download", "search", "index", "wheel", "list"} + {"install", "download", "search", "index", "wheel"} ) ), ) diff --git a/tests/unit/test_commands.py b/tests/unit/test_commands.py index 9dfb71a4d1d..9d5aefec298 100644 --- a/tests/unit/test_commands.py +++ b/tests/unit/test_commands.py @@ -128,7 +128,7 @@ def is_requirement_command(command: Command) -> bool: @pytest.mark.parametrize("flag", ["", "--outdated", "--uptodate"]) -@mock.patch("pip._internal.cli.req_command.pip_self_version_check") +@mock.patch("pip._internal.cli.index_command._pip_self_version_check") @mock.patch.dict(os.environ, {"PIP_DISABLE_PIP_VERSION_CHECK": "no"}) def test_list_pip_version_check(version_check_mock: mock.Mock, flag: str) -> None: """ From 22142d6fcc9885587bcf102d7652529ec1d9503e Mon Sep 17 00:00:00 2001 From: Agus Date: Mon, 6 May 2024 13:16:10 +0200 Subject: [PATCH 112/113] Add documentation for CLI architecture (#12093) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Pradyun Gedam Co-authored-by: chrysle <96722107+chrysle@users.noreply.github.com> --- .../architecture/command-line-interface.rst | 166 +++++++++++++++++- news/6831.doc.rst | 1 + 2 files changed, 158 insertions(+), 9 deletions(-) create mode 100644 news/6831.doc.rst diff --git a/docs/html/development/architecture/command-line-interface.rst b/docs/html/development/architecture/command-line-interface.rst index 9bfa9119258..725199bf580 100644 --- a/docs/html/development/architecture/command-line-interface.rst +++ b/docs/html/development/architecture/command-line-interface.rst @@ -28,19 +28,167 @@ for parsing top level args. ``Command`` then uses another ``ConfigOptionParser`` instance, to parse command-specific args. -* TODO: How & where options are defined - (cmdoptions, command-specific files). +Command structure +----------------- -* TODO: How & where arguments are processed. - (main_parser, command-specific parser) +This section shows the class hierarchy from which every command's class will inherit +from. -* TODO: How processed arguments are accessed. - (attributes on argument to ``Command.run()``) +`base_command.py `_ +defines the base ``Command`` class, from which every other command will inherit directly or +indirectly (see the *command tree* at the end of this section). -* TODO: How configuration and CLI "blend". - (implemented in ``ConfigOptionParser``) +Using the ``ConfigOptionParser`` (see `Configuration and CLI "blend" `_), +this class adds the general options and instantiates the *cmd_opts* group, where every other specific +option will be added if needed on each command's class. For those commands that define specific +options, like ``--dry-run`` on ``pip install`` command, the options must be added to *cmd_opts* +this is the job of *add_options* method), which will be automatically called on ``Command``'s initialization. -* TODO: progress bars and spinners +The base ``Command`` has the following methods: + +.. py:class:: Command + + .. py:method:: main() + + Main method of the class, it's always called (as can be seen in main.py's + `main `_). + It's in charge of calling the specific ``run`` method of the class and handling the possible errors. + + .. py:method:: run() + + Abstract method where the actual action of a command is defined. + + .. py:method:: add_options() + + Optional method to insert additional options on a class, called on ``Command`` initialization. + +Some commands have more specialized behavior, (see for example ``pip index``). +These commands instead will inherit from ``IndexGroupCommand``, which inherits from ``Command`` +and ``SessionCommandMixin`` to build build the pip session for the corresponding requests. + +Lastly, ``RequirementCommand``, which inherits from ``IndexGroupCommand`` is the base class +for those commands which make use of requirements in any form, like ``pip install``. + +In addition to the previous classes, a last mixin class must be mentioned, from which +``Command`` as well as ``SessionCommandMixin`` inherit: ``CommandContextMixIn``, in +charge of the command's context. + +In the following command tree we can see the hierarchy defined for the different pip +commands, where each command is defined under the base class it inherits from: + +| ``Command`` +| ├─ ``cache``, ``check``, ``completion``, ``configuration``, ``debug``, ``freeze``, ``hash``, ``help``, ``inspect``, ``show``, ``search``, ``uninstall`` +| └─ ``IndexGroupCommand`` +| ├─ ``index``, ``list`` +| └─ ``RequirementCommand`` +| └─ ``wheel``, ``download``, ``install`` + + +Option definition +----------------- + +The set of shared options are defined in `cmdoptions.py `_ +module, as well as the *general options* and *package index options* groups of options +we see when we call a command's help, or the ``pip index``'s help message respectively. +All options are defined in terms of functions that return `optparse.Option `_ +instances once called, while specific groups of options, like *Config Options* for +``pip config`` are defined in each specific command file (see for example the +`configuration.py `_). + +Argument parsing +---------------- + +The main entrypoint for the application is defined in the ``main`` function in the +`main.py `_ module. +This function is in charge of the `autocompletion `_, +calling the ``parse_command`` function and creating and running the subprograms +via ``create_command``, on which the ``main`` method is called. + +The ``parse_command`` is defined in the `main_parser.py `_ +module, which defines the following two functions: + +.. py:function:: parse_command() + + Function in charge of the initial parse of ``pip``'s program. Creates the main parser (see + the next function ``create_main_parser``) to extract the general options + and the remaining arguments. For example, running ``pip --timeout=5 install --user INITools`` + will split ``['--timeout=5']`` as general option and ``['install', '--user', 'INITools']`` + as the remainder. + + At this step the program deals with the options ``--python``, ``--version``, ``pip`` + or ``pip help``. If neither of the previous options is found, it tries to extract the command + name and arguments. + +.. py:function:: create_main_parser() + + Creates the main parser (type ``pip`` in the console to see the description of the + program). The internal parser (`ConfigOptionParser `_), + adds the general option group and the list of commands coming from ``cmdoptions.py`` + at this point. + +After the initial parsing is done, ``create_command`` is in charge of creating the appropriate +command using the information stored in `commands_dict `_ +variable, and calling its ``main`` method (see `Command structure `_). + +A second argument parsing is done at each specific command (defined in the base ``Command`` class), +again using the ``ConfigOptionParser``. + +Argument access +--------------- + +To access all the options and arguments, ``Command.run()`` takes +the options as `optparse.Values `_ +and a list of strings for the arguments (parsed in ``Command.main()``). The internal methods of +the base ``Command`` class are in charge of passing these variables after ``parse_args`` is +called for a specific command. + +Configuration and CLI "blend" +----------------------------- + +The base ``Command`` instantiates the class `ConfigOptionParser `_ +which is in charge of the parsing process (via its parent class +`optparse.OptionParser `_). +Its main addition consists of the following function: + +.. py:class:: ConfigOptionParser(OptionParser) + + .. py:method:: get_default_values() + + Overrides the original method to allow updating the defaults ater the instantiation of the + option parser. + +It allows overriding the default options and arguments using the ``Configuration`` class +(more information can be found on :ref:`Configuration`) to include environment variables and +settings from configuration files. + +Progress bars and spinners +-------------------------- + +There are two more modules in the ``cli`` subpackage in charge of showing the state of the +program. + +* `progress_bars.py `_ + + This module contains the following function: + + .. py:function:: get_download_progress_renderer() + + It uses `rich `_ + functionalities to render the download progress. + + This function (used in `download.py `_, + inside the ``Downloader`` class), allows watching the download process when running + ``pip install`` on *big* packages. + +* `spinner.py `_ + + The main function of this module is: + + .. py:function:: open_spinner() + + It yields the appropriate type of spinner, which is used in ``call_subprocess`` + function, inside `subprocess.py `_ + module, so the user can see there is a program running. * TODO: quirks / standard practices / broad ideas. (avoiding lists in option def'n, special cased option value types, diff --git a/news/6831.doc.rst b/news/6831.doc.rst new file mode 100644 index 00000000000..994fc763d6e --- /dev/null +++ b/news/6831.doc.rst @@ -0,0 +1 @@ +Update architecture documentation for command line interface. From 12c171f193b7b38329a75bbaecabca814831d5e5 Mon Sep 17 00:00:00 2001 From: Petr Viktorin Date: Mon, 6 May 2024 13:20:47 +0200 Subject: [PATCH 113/113] Use the ``data_filter`` when extracting tarballs, if it's available. (#12214) Previous behaviour is used on Python without PEP-720 tarfile filters. (Note that the feature is now in security releases of all supported versions.) A custom filter (which wraps `data_filter`) is used to retain pip-specific behaviour: - Removing a common leading directory - Setting the mode (Unix permissions) Compared to the previous behaviour, if a file can't be unpacked, the unpacking operation will fail with `InstallError`, rather than skipping the individual file with a `logger.warning`. This means that "some corrupt tar files" now can't be unpacked. Note that PEP 721 limits itself to sdists, this change affects unpacking any other tar file. --- news/12111.feature.rst | 1 + src/pip/_internal/utils/unpacking.py | 170 +++++++++++++++++++-------- tests/unit/test_utils_unpacking.py | 28 ++++- 3 files changed, 148 insertions(+), 51 deletions(-) create mode 100644 news/12111.feature.rst diff --git a/news/12111.feature.rst b/news/12111.feature.rst new file mode 100644 index 00000000000..eb03b76d826 --- /dev/null +++ b/news/12111.feature.rst @@ -0,0 +1 @@ +PEP 721: Use the ``data_filter`` when extracting tarballs, if it's available. diff --git a/src/pip/_internal/utils/unpacking.py b/src/pip/_internal/utils/unpacking.py index 78b5c13ced3..341269550ce 100644 --- a/src/pip/_internal/utils/unpacking.py +++ b/src/pip/_internal/utils/unpacking.py @@ -5,6 +5,7 @@ import os import shutil import stat +import sys import tarfile import zipfile from typing import Iterable, List, Optional @@ -85,12 +86,16 @@ def is_within_directory(directory: str, target: str) -> bool: return prefix == abs_directory +def _get_default_mode_plus_executable() -> int: + return 0o777 & ~current_umask() | 0o111 + + def set_extracted_file_to_default_mode_plus_executable(path: str) -> None: """ Make file present at path have execute for user/group/world (chmod +x) is no-op on windows per python docs """ - os.chmod(path, (0o777 & ~current_umask() | 0o111)) + os.chmod(path, _get_default_mode_plus_executable()) def zip_item_is_executable(info: ZipInfo) -> bool: @@ -151,8 +156,8 @@ def untar_file(filename: str, location: str) -> None: Untar the file (with path `filename`) to the destination `location`. All files are written based on system defaults and umask (i.e. permissions are not preserved), except that regular file members with any execute - permissions (user, group, or world) have "chmod +x" applied after being - written. Note that for windows, any execute changes using os.chmod are + permissions (user, group, or world) have "chmod +x" applied on top of the + default. Note that for windows, any execute changes using os.chmod are no-ops per the python docs. """ ensure_dir(location) @@ -170,62 +175,127 @@ def untar_file(filename: str, location: str) -> None: filename, ) mode = "r:*" + tar = tarfile.open(filename, mode, encoding="utf-8") try: leading = has_leading_dir([member.name for member in tar.getmembers()]) - for member in tar.getmembers(): - fn = member.name - if leading: - fn = split_leading_dir(fn)[1] - path = os.path.join(location, fn) - if not is_within_directory(location, path): - message = ( - "The tar file ({}) has a file ({}) trying to install " - "outside target directory ({})" - ) - raise InstallationError(message.format(filename, path, location)) - if member.isdir(): - ensure_dir(path) - elif member.issym(): - try: - tar._extract_member(member, path) - except Exception as exc: - # Some corrupt tar files seem to produce this - # (specifically bad symlinks) - logger.warning( - "In the tar file %s the member %s is invalid: %s", - filename, - member.name, - exc, - ) - continue - else: + + # PEP 706 added `tarfile.data_filter`, and made some other changes to + # Python's tarfile module (see below). The features were backported to + # security releases. + try: + data_filter = tarfile.data_filter + except AttributeError: + _untar_without_filter(filename, location, tar, leading) + else: + default_mode_plus_executable = _get_default_mode_plus_executable() + + def pip_filter(member: tarfile.TarInfo, path: str) -> tarfile.TarInfo: + if leading: + member.name = split_leading_dir(member.name)[1] + orig_mode = member.mode try: - fp = tar.extractfile(member) - except (KeyError, AttributeError) as exc: - # Some corrupt tar files seem to produce this - # (specifically bad symlinks) - logger.warning( - "In the tar file %s the member %s is invalid: %s", - filename, - member.name, - exc, + try: + member = data_filter(member, location) + except tarfile.LinkOutsideDestinationError: + if sys.version_info[:3] in { + (3, 8, 17), + (3, 9, 17), + (3, 10, 12), + (3, 11, 4), + }: + # The tarfile filter in specific Python versions + # raises LinkOutsideDestinationError on valid input + # (https://github.com/python/cpython/issues/107845) + # Ignore the error there, but do use the + # more lax `tar_filter` + member = tarfile.tar_filter(member, location) + else: + raise + except tarfile.TarError as exc: + message = "Invalid member in the tar file {}: {}" + # Filter error messages mention the member name. + # No need to add it here. + raise InstallationError( + message.format( + filename, + exc, + ) ) - continue - ensure_dir(os.path.dirname(path)) - assert fp is not None - with open(path, "wb") as destfp: - shutil.copyfileobj(fp, destfp) - fp.close() - # Update the timestamp (useful for cython compiled files) - tar.utime(member, path) - # member have any execute permissions for user/group/world? - if member.mode & 0o111: - set_extracted_file_to_default_mode_plus_executable(path) + if member.isfile() and orig_mode & 0o111: + member.mode = default_mode_plus_executable + else: + # See PEP 706 note above. + # The PEP changed this from `int` to `Optional[int]`, + # where None means "use the default". Mypy doesn't + # know this yet. + member.mode = None # type: ignore [assignment] + return member + + tar.extractall(location, filter=pip_filter) + finally: tar.close() +def _untar_without_filter( + filename: str, + location: str, + tar: tarfile.TarFile, + leading: bool, +) -> None: + """Fallback for Python without tarfile.data_filter""" + for member in tar.getmembers(): + fn = member.name + if leading: + fn = split_leading_dir(fn)[1] + path = os.path.join(location, fn) + if not is_within_directory(location, path): + message = ( + "The tar file ({}) has a file ({}) trying to install " + "outside target directory ({})" + ) + raise InstallationError(message.format(filename, path, location)) + if member.isdir(): + ensure_dir(path) + elif member.issym(): + try: + tar._extract_member(member, path) + except Exception as exc: + # Some corrupt tar files seem to produce this + # (specifically bad symlinks) + logger.warning( + "In the tar file %s the member %s is invalid: %s", + filename, + member.name, + exc, + ) + continue + else: + try: + fp = tar.extractfile(member) + except (KeyError, AttributeError) as exc: + # Some corrupt tar files seem to produce this + # (specifically bad symlinks) + logger.warning( + "In the tar file %s the member %s is invalid: %s", + filename, + member.name, + exc, + ) + continue + ensure_dir(os.path.dirname(path)) + assert fp is not None + with open(path, "wb") as destfp: + shutil.copyfileobj(fp, destfp) + fp.close() + # Update the timestamp (useful for cython compiled files) + tar.utime(member, path) + # member have any execute permissions for user/group/world? + if member.mode & 0o111: + set_extracted_file_to_default_mode_plus_executable(path) + + def unpack_file( filename: str, location: str, diff --git a/tests/unit/test_utils_unpacking.py b/tests/unit/test_utils_unpacking.py index 1f0b59dbd6b..3fdd822e739 100644 --- a/tests/unit/test_utils_unpacking.py +++ b/tests/unit/test_utils_unpacking.py @@ -155,7 +155,13 @@ def test_unpack_tar_failure(self) -> None: test_tar = self.make_tar_file("test_tar.tar", files) with pytest.raises(InstallationError) as e: untar_file(test_tar, self.tempdir) - assert "trying to install outside target directory" in str(e.value) + + # The error message comes from tarfile.data_filter when it is available, + # otherwise from pip's own check. + if hasattr(tarfile, "data_filter"): + assert "is outside the destination" in str(e.value) + else: + assert "trying to install outside target directory" in str(e.value) def test_unpack_tar_success(self) -> None: """ @@ -171,6 +177,26 @@ def test_unpack_tar_success(self) -> None: test_tar = self.make_tar_file("test_tar.tar", files) untar_file(test_tar, self.tempdir) + @pytest.mark.skipif( + not hasattr(tarfile, "data_filter"), + reason="tarfile filters (PEP-721) not available", + ) + def test_unpack_tar_filter(self) -> None: + """ + Test that the tarfile.data_filter is used to disallow dangerous + behaviour (PEP-721) + """ + test_tar = os.path.join(self.tempdir, "test_tar_filter.tar") + with tarfile.open(test_tar, "w") as mytar: + file_tarinfo = tarfile.TarInfo("bad-link") + file_tarinfo.type = tarfile.SYMTYPE + file_tarinfo.linkname = "../../../../pwn" + mytar.addfile(file_tarinfo, io.BytesIO(b"")) + with pytest.raises(InstallationError) as e: + untar_file(test_tar, self.tempdir) + + assert "is outside the destination" in str(e.value) + def test_unpack_tar_unicode(tmpdir: Path) -> None: test_tar = tmpdir / "test.tar"